diff --git a/Makefile b/Makefile index 57ad62ba..8eb9a3b5 100644 --- a/Makefile +++ b/Makefile @@ -3,11 +3,6 @@ SHELL := /bin/bash # Use bash syntax # Set up variables GO111MODULE=on -AWS_SDK_GO_VERSION="$(shell echo $(shell go list -m -f '{{.Version}}' github.com/aws/aws-sdk-go))" -AWS_SDK_GO_VERSIONED_PATH="$(shell echo github.com/aws/aws-sdk-go@$(AWS_SDK_GO_VERSION))" -ELASTICACHE_API_PATH="$(shell echo $(shell go env GOPATH))/pkg/mod/$(AWS_SDK_GO_VERSIONED_PATH)/service/elasticache/elasticacheiface" -SERVICE_CONTROLLER_SRC_PATH="$(shell pwd)" - # Build ldflags VERSION ?= "v0.0.0" GITCOMMIT=$(shell git rev-parse HEAD) @@ -16,34 +11,22 @@ GO_LDFLAGS=-ldflags "-X main.version=$(VERSION) \ -X main.buildHash=$(GITCOMMIT) \ -X main.buildDate=$(BUILDDATE)" -.PHONY: all test local-test clean-mocks mocks +.PHONY: all test local-test all: test -test: | mocks ## Run code tests - go test -v ./... +local-run-controller: ## Run a controller image locally for SERVICE + @go run ./cmd/controller/main.go \ + --aws-region=us-west-2 \ + --enable-development-logging \ + --log-level=debug -test-cover: | mocks ## Run code tests with resources coverage - go test -coverpkg=./pkg/resource/... -covermode=count -coverprofile=coverage.out ./... - go tool cover -func=coverage.out +test: ## Run code tests + go test -v ./... -local-test: | mocks ## Run code tests using go.local.mod file +local-test: ## Run code tests using go.local.mod file go test -modfile=go.local.mod -v ./... -clean-mocks: ## Remove mocks directory - rm -rf mocks - -install-mockery: - @scripts/install-mockery.sh - -mocks: install-mockery ## Build mocks - go get -d $(AWS_SDK_GO_VERSIONED_PATH) - @echo "building mocks for $(ELASTICACHE_API_PATH) ... " - @pushd $(ELASTICACHE_API_PATH) 1>/dev/null; \ - $(SERVICE_CONTROLLER_SRC_PATH)/bin/mockery --all --dir=. --output=$(SERVICE_CONTROLLER_SRC_PATH)/mocks/aws-sdk-go/elasticache/ ; \ - popd 1>/dev/null; - @echo "ok." - help: ## Show this help. @grep -F -h "##" $(MAKEFILE_LIST) | grep -F -v grep | sed -e 's/\\$$//' \ - | awk -F'[:#]' '{print $$1 = sprintf("%-30s", $$1), $$4}' + | awk -F'[:#]' '{print $$1 = sprintf("%-30s", $$1), $$4}' \ No newline at end of file diff --git a/OWNERS b/OWNERS index 5f7c991a..ceb3426c 100644 --- a/OWNERS +++ b/OWNERS @@ -1,5 +1,4 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - core-ack-team - - service-team \ No newline at end of file + - core-ack-team \ No newline at end of file diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 127083e5..53d9a0a2 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -1,14 +1,17 @@ # See the OWNERS docs at https://go.k8s.io/owners#owners_aliases aliases: - # Always allow the core ACK maintainers to have access to your repository core-ack-team: - - jaypipes - - mhausenblas - a-hilaly - - RedbackThomson - - vijtrip2 - # TODO: Add your team members to your team controller alias - service-team: - - nmvk - - kumargauravsharma + - jlbutler + - michaelhtm + - rushmash91 + - knottnt + # emeritus-core-ack-team: + # - TiberiuGC + # - jaypipes + # - jljaco + # - mhausenblas + # - RedbackThomson + # - vijtrip2 + # - ivelichkovich \ No newline at end of file diff --git a/README.md b/README.md index dda164d2..430800b2 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,9 @@ The ACK service controller for Amazon ElastiCache supports the following Amazon - [x] User - [x] Snapshot Group - [x] Cache Parameter Group +- [x] Serverless Cache +- [x] Serverless Cache Snapshot +- [x] Cache Cluster ## Contributing diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index db386813..2094eb12 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2023-02-09T23:07:38Z" - build_hash: d0f3d78cbea8061f822cbceac3786128f091efe6 - go_version: go1.19 - version: v0.24.2 -api_directory_checksum: 885f952f7ca2ce7a676b9bbf8eb262de71de6238 + build_date: "2025-09-19T17:10:48Z" + build_hash: 6b4211163dcc34776b01da9a18217bac0f4103fd + go_version: go1.24.6 + version: v0.52.0 +api_directory_checksum: 65127f2f0a24a801fad4e043be37857f0e6bcfb9 api_version: v1alpha1 -aws_sdk_go_version: v1.44.93 +aws_sdk_go_version: v1.32.6 generator_config_info: - file_checksum: 2461349a2827ec98ae7440fbff3301c1effcfaf4 + file_checksum: 64095ac8a26b456580bc303aceec26581b1b6f6a original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/cache_cluster.go b/apis/v1alpha1/cache_cluster.go new file mode 100644 index 00000000..c754be47 --- /dev/null +++ b/apis/v1alpha1/cache_cluster.go @@ -0,0 +1,388 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CacheClusterSpec defines the desired state of CacheCluster. +// +// Contains all of the attributes of a specific cluster. +type CacheClusterSpec struct { + + // Specifies whether the nodes in this Memcached cluster are created in a single + // Availability Zone or created across multiple Availability Zones in the cluster's + // region. + // + // This parameter is only supported for Memcached clusters. + // + // If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + // assumes single-az mode. + AZMode *string `json:"azMode,omitempty"` + // Reserved parameter. The password used to access a password protected server. + // + // Password constraints: + // + // - Must be only printable ASCII characters. + // + // - Must be at least 16 characters and no more than 128 characters in length. + AuthToken *ackv1alpha1.SecretKeyReference `json:"authToken,omitempty"` + // If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + // above, set this parameter to yes to opt-in to the next auto minor version + // upgrade campaign. This parameter is disabled for previous versions. + AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` + // The node group (shard) identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // - A name must contain from 1 to 50 alphanumeric characters or hyphens. + // + // - The first character must be a letter. + // + // - A name cannot end with a hyphen or contain two consecutive hyphens. + // + // +kubebuilder:validation:Required + CacheClusterID *string `json:"cacheClusterID"` + // The compute and memory capacity of the nodes in the node group (shard). + // + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. + // + // - General purpose: Current generation: M7g node types: cache.m7g.large, + // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // M6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // T4g node types (available only for Redis OSS engine version 5.0.6 onward + // and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + // generation: (not recommended. Existing clusters are still supported but + // creation of new clusters is not supported for these types.) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge + // + // - Compute optimized: Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) C1 node types: cache.c1.xlarge + // + // - Memory optimized: Current generation: R7g node types: cache.r7g.large, + // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // R6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge + // + // Additional node type info + // + // - All current generation instance types are created in Amazon VPC by default. + // + // - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + // or T2 instances. + // + // - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + // on T1 instances. + // + // - The configuration variables appendonly and appendfsync are not supported + // on Valkey, or on Redis OSS version 2.8.22 and later. + CacheNodeType *string `json:"cacheNodeType,omitempty"` + // The name of the parameter group to associate with this cluster. If this argument + // is omitted, the default parameter group for the specified engine is used. + // You cannot use any parameter group which has cluster-enabled='yes' when creating + // a cluster. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable once set" + CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` + CacheParameterGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheParameterGroupRef,omitempty"` + // A list of security group names to associate with this cluster. + // + // Use this parameter only when you are creating a cluster outside of an Amazon + // Virtual Private Cloud (Amazon VPC). + CacheSecurityGroupNames []*string `json:"cacheSecurityGroupNames,omitempty"` + // The name of the subnet group to be used for the cluster. + // + // Use this parameter only when you are creating a cluster in an Amazon Virtual + // Private Cloud (Amazon VPC). + // + // If you're going to launch your cluster in an Amazon VPC, you need to create + // a subnet group before you start creating a cluster. For more information, + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). + CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` + CacheSubnetGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheSubnetGroupRef,omitempty"` + // The name of the cache engine to be used for this cluster. + // + // Valid values for this parameter are: memcached | redis + Engine *string `json:"engine,omitempty"` + // The version number of the cache engine to be used for this cluster. To view + // the supported cache engine versions, use the DescribeCacheEngineVersions + // operation. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)), + // but you cannot downgrade to an earlier engine version. If you want to use + // an earlier engine version, you must delete the existing cluster or replication + // group and create it anew with the earlier engine version. + EngineVersion *string `json:"engineVersion,omitempty"` + // The network type you choose when modifying a cluster, either ipv4 | ipv6. + // IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine + // version 6.2 and above or Memcached engine version 1.6.6 and above on all + // instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + IPDiscovery *string `json:"ipDiscovery,omitempty"` + // Specifies the destination, format and type of the logs. + LogDeliveryConfigurations []*LogDeliveryConfigurationRequest `json:"logDeliveryConfigurations,omitempty"` + // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + // using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + // engine version 1.6.6 and above on all instances built on the Nitro system + // (http://aws.amazon.com/ec2/nitro/). + NetworkType *string `json:"networkType,omitempty"` + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications are sent. + // + // The Amazon SNS topic owner must be the same as the cluster owner. + NotificationTopicARN *string `json:"notificationTopicARN,omitempty"` + NotificationTopicRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"notificationTopicRef,omitempty"` + // The initial number of cache nodes that the cluster has. + // + // For clusters running Valkey or Redis OSS, this value must be 1. For clusters + // running Memcached, this value must be between 1 and 40. + // + // If you need more than 40 nodes for your Memcached cluster, please fill out + // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + // Specifies whether the nodes in the cluster are created in a single outpost + // or across multiple outposts. + OutpostMode *string `json:"outpostMode,omitempty"` + // The port number on which each of the cache nodes accepts connections. + Port *int64 `json:"port,omitempty"` + // The EC2 Availability Zone in which the cluster is created. + // + // All nodes belonging to this cluster are placed in the preferred Availability + // Zone. If you want to create your nodes across multiple Availability Zones, + // use PreferredAvailabilityZones. + // + // Default: System chosen Availability Zone. + PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` + // A list of the Availability Zones in which cache nodes are created. The order + // of the zones in the list is not important. + // + // This option is only supported on Memcached. + // + // If you are creating your cluster in an Amazon VPC (recommended) you can only + // locate nodes in Availability Zones that are associated with the subnets in + // the selected subnet group. + // + // The number of Availability Zones listed must equal the value of NumCacheNodes. + // + // If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + // instead, or repeat the Availability Zone multiple times in the list. + // + // Default: System chosen Availability Zones. + PreferredAvailabilityZones []*string `json:"preferredAvailabilityZones,omitempty"` + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` + // The outpost ARN in which the cache cluster is created. + PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` + // The outpost ARNs in which the cache cluster is created. + PreferredOutpostARNs []*string `json:"preferredOutpostARNs,omitempty"` + // The ID of the replication group to which this cluster should belong. If this + // parameter is specified, the cluster is added to the specified replication + // group as a read replica; otherwise, the cluster is a standalone primary that + // is not part of any replication group. + // + // If the specified replication group is Multi-AZ enabled and the Availability + // Zone is not specified, the cluster is created in Availability Zones that + // provide the best spread of read replicas across Availability Zones. + // + // This parameter is only valid if the Engine parameter is redis. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable once set" + ReplicationGroupID *string `json:"replicationGroupID,omitempty"` + ReplicationGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"replicationGroupRef,omitempty"` + // One or more VPC security groups associated with the cluster. + // + // Use this parameter only when you are creating a cluster in an Amazon Virtual + // Private Cloud (Amazon VPC). + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + SecurityGroupRefs []*ackv1alpha1.AWSResourceReferenceWrapper `json:"securityGroupRefs,omitempty"` + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon + // S3. The snapshot file is used to populate the node group (shard). The Amazon + // S3 object name in the ARN cannot contain any commas. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotARNs []*string `json:"snapshotARNs,omitempty"` + // The name of a Valkey or Redis OSS snapshot from which to restore data into + // the new node group (shard). The snapshot status changes to restoring while + // the new node group (shard) is being created. + // + // This parameter is only valid if the Engine parameter is redis. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable once set" + SnapshotName *string `json:"snapshotName,omitempty"` + SnapshotRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"snapshotRef,omitempty"` + // The number of days for which ElastiCache retains automatic snapshots before + // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + // taken today is retained for 5 days before being deleted. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` + // The daily time range (in UTC) during which ElastiCache begins taking a daily + // snapshot of your node group (shard). + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, ElastiCache automatically chooses an + // appropriate time range. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `json:"snapshotWindow,omitempty"` + // A list of tags to be added to this resource. + Tags []*Tag `json:"tags,omitempty"` + // A flag that enables in-transit encryption when set to true. + TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` +} + +// CacheClusterStatus defines the observed state of CacheCluster +type CacheClusterStatus struct { + // All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + // that is used to contain resource sync state, account ownership, + // constructed ARN for the resource + // +kubebuilder:validation:Optional + ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` + // All CRs managed by ACK have a common `Status.Conditions` member that + // contains a collection of `ackv1alpha1.Condition` objects that describe + // the various terminal states of the CR and its backend AWS service API + // resource + // +kubebuilder:validation:Optional + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // A flag that enables encryption at-rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the cluster + // is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + // to true when you create a cluster. + // + // Required: Only available when creating a replication group in an Amazon VPC + // using Redis OSS version 3.2.6, 4.x or later. + // + // Default: false + // +kubebuilder:validation:Optional + AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` + // A flag that enables using an AuthToken (password) when issuing Valkey or + // Redis OSS commands. + // + // Default: false + // +kubebuilder:validation:Optional + AuthTokenEnabled *bool `json:"authTokenEnabled,omitempty"` + // The date the auth token was last modified + // +kubebuilder:validation:Optional + AuthTokenLastModifiedDate *metav1.Time `json:"authTokenLastModifiedDate,omitempty"` + // The date and time when the cluster was created. + // +kubebuilder:validation:Optional + CacheClusterCreateTime *metav1.Time `json:"cacheClusterCreateTime,omitempty"` + // The current state of this cluster, one of the following values: available, + // creating, deleted, deleting, incompatible-network, modifying, rebooting cluster + // nodes, restore-failed, or snapshotting. + // +kubebuilder:validation:Optional + CacheClusterStatus *string `json:"cacheClusterStatus,omitempty"` + // A list of cache nodes that are members of the cluster. + // +kubebuilder:validation:Optional + CacheNodes []*CacheNode `json:"cacheNodes,omitempty"` + // Status of the cache parameter group. + // +kubebuilder:validation:Optional + CacheParameterGroup *CacheParameterGroupStatus_SDK `json:"cacheParameterGroup,omitempty"` + // A list of cache security group elements, composed of name and status sub-elements. + // +kubebuilder:validation:Optional + CacheSecurityGroups []*CacheSecurityGroupMembership `json:"cacheSecurityGroups,omitempty"` + // The URL of the web page where you can download the latest ElastiCache client + // library. + // +kubebuilder:validation:Optional + ClientDownloadLandingPage *string `json:"clientDownloadLandingPage,omitempty"` + // Represents a Memcached cluster endpoint which can be used by an application + // to connect to any node in the cluster. The configuration endpoint will always + // have .cfg in it. + // + // Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 + // +kubebuilder:validation:Optional + ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + // +kubebuilder:validation:Optional + NotificationConfiguration *NotificationConfiguration `json:"notificationConfiguration,omitempty"` + // +kubebuilder:validation:Optional + PendingModifiedValues *PendingModifiedValues `json:"pendingModifiedValues,omitempty"` + // A boolean value indicating whether log delivery is enabled for the replication + // group. + // +kubebuilder:validation:Optional + ReplicationGroupLogDeliveryEnabled *bool `json:"replicationGroupLogDeliveryEnabled,omitempty"` + // A list of VPC Security Groups associated with the cluster. + // +kubebuilder:validation:Optional + SecurityGroups []*SecurityGroupMembership `json:"securityGroups,omitempty"` + // A setting that allows you to migrate your clients to use in-transit encryption, + // with no downtime. + // +kubebuilder:validation:Optional + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` +} + +// CacheCluster is the Schema for the CacheClusters API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="VERSION",type=string,priority=0,JSONPath=`.spec.engineVersion` +// +kubebuilder:printcolumn:name="STATUS",type=string,priority=0,JSONPath=`.status.cacheClusterStatus` +// +kubebuilder:printcolumn:name="ENDPOINT",type=string,priority=1,JSONPath=`.status.configurationEndpoint.address` +// +kubebuilder:printcolumn:name="Synced",type="string",priority=0,JSONPath=".status.conditions[?(@.type==\"ACK.ResourceSynced\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",priority=0,JSONPath=".metadata.creationTimestamp" +type CacheCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec CacheClusterSpec `json:"spec,omitempty"` + Status CacheClusterStatus `json:"status,omitempty"` +} + +// CacheClusterList contains a list of CacheCluster +// +kubebuilder:object:root=true +type CacheClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CacheCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CacheCluster{}, &CacheClusterList{}) +} diff --git a/apis/v1alpha1/cache_parameter_group.go b/apis/v1alpha1/cache_parameter_group.go index a74dd72c..2f6fb5fe 100644 --- a/apis/v1alpha1/cache_parameter_group.go +++ b/apis/v1alpha1/cache_parameter_group.go @@ -29,7 +29,7 @@ type CacheParameterGroupSpec struct { // can be used with. // // Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | - // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x + // redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 // +kubebuilder:validation:Required CacheParameterGroupFamily *string `json:"cacheParameterGroupFamily"` // A user-specified name for the cache parameter group. @@ -54,7 +54,7 @@ type CacheParameterGroupStatus struct { // constructed ARN for the resource // +kubebuilder:validation:Optional ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` - // All CRS managed by ACK have a common `Status.Conditions` member that + // All CRs managed by ACK have a common `Status.Conditions` member that // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource diff --git a/apis/v1alpha1/cache_subnet_group.go b/apis/v1alpha1/cache_subnet_group.go index 813498e8..5f69bd7b 100644 --- a/apis/v1alpha1/cache_subnet_group.go +++ b/apis/v1alpha1/cache_subnet_group.go @@ -40,8 +40,8 @@ type CacheSubnetGroupSpec struct { // +kubebuilder:validation:Required CacheSubnetGroupName *string `json:"cacheSubnetGroupName"` // A list of VPC subnet IDs for the cache subnet group. - // +kubebuilder:validation:Required - SubnetIDs []*string `json:"subnetIDs"` + SubnetIDs []*string `json:"subnetIDs,omitempty"` + SubnetRefs []*ackv1alpha1.AWSResourceReferenceWrapper `json:"subnetRefs,omitempty"` // A list of tags to be added to this resource. A tag is a key-value pair. A // tag key must be accompanied by a tag value, although null is accepted. Tags []*Tag `json:"tags,omitempty"` @@ -54,7 +54,7 @@ type CacheSubnetGroupStatus struct { // constructed ARN for the resource // +kubebuilder:validation:Optional ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` - // All CRS managed by ACK have a common `Status.Conditions` member that + // All CRs managed by ACK have a common `Status.Conditions` member that // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource diff --git a/apis/v1alpha1/enums.go b/apis/v1alpha1/enums.go index d2b6b994..39ec4363 100644 --- a/apis/v1alpha1/enums.go +++ b/apis/v1alpha1/enums.go @@ -18,39 +18,40 @@ package v1alpha1 type AZMode string const ( - AZMode_single_az AZMode = "single-az" AZMode_cross_az AZMode = "cross-az" + AZMode_single_az AZMode = "single-az" ) type AuthTokenUpdateStatus string const ( - AuthTokenUpdateStatus_SETTING AuthTokenUpdateStatus = "SETTING" AuthTokenUpdateStatus_ROTATING AuthTokenUpdateStatus = "ROTATING" + AuthTokenUpdateStatus_SETTING AuthTokenUpdateStatus = "SETTING" ) type AuthTokenUpdateStrategyType string const ( - AuthTokenUpdateStrategyType_SET AuthTokenUpdateStrategyType = "SET" - AuthTokenUpdateStrategyType_ROTATE AuthTokenUpdateStrategyType = "ROTATE" AuthTokenUpdateStrategyType_DELETE AuthTokenUpdateStrategyType = "DELETE" + AuthTokenUpdateStrategyType_ROTATE AuthTokenUpdateStrategyType = "ROTATE" + AuthTokenUpdateStrategyType_SET AuthTokenUpdateStrategyType = "SET" ) type AuthenticationType string const ( - AuthenticationType_password AuthenticationType = "password" + AuthenticationType_iam AuthenticationType = "iam" AuthenticationType_no_password AuthenticationType = "no-password" + AuthenticationType_password AuthenticationType = "password" ) type AutomaticFailoverStatus string const ( - AutomaticFailoverStatus_enabled AutomaticFailoverStatus = "enabled" AutomaticFailoverStatus_disabled AutomaticFailoverStatus = "disabled" - AutomaticFailoverStatus_enabling AutomaticFailoverStatus = "enabling" AutomaticFailoverStatus_disabling AutomaticFailoverStatus = "disabling" + AutomaticFailoverStatus_enabled AutomaticFailoverStatus = "enabled" + AutomaticFailoverStatus_enabling AutomaticFailoverStatus = "enabling" ) type ChangeType string @@ -60,11 +61,25 @@ const ( ChangeType_requires_reboot ChangeType = "requires-reboot" ) +type ClusterMode string + +const ( + ClusterMode_compatible ClusterMode = "compatible" + ClusterMode_disabled ClusterMode = "disabled" + ClusterMode_enabled ClusterMode = "enabled" +) + +type DataStorageUnit string + +const ( + DataStorageUnit_GB DataStorageUnit = "GB" +) + type DataTieringStatus string const ( - DataTieringStatus_enabled DataTieringStatus = "enabled" DataTieringStatus_disabled DataTieringStatus = "disabled" + DataTieringStatus_enabled DataTieringStatus = "enabled" ) type DestinationType string @@ -74,67 +89,90 @@ const ( DestinationType_kinesis_firehose DestinationType = "kinesis-firehose" ) +type IPDiscovery string + +const ( + IPDiscovery_ipv4 IPDiscovery = "ipv4" + IPDiscovery_ipv6 IPDiscovery = "ipv6" +) + +type InputAuthenticationType string + +const ( + InputAuthenticationType_iam InputAuthenticationType = "iam" + InputAuthenticationType_no_password_required InputAuthenticationType = "no-password-required" + InputAuthenticationType_password InputAuthenticationType = "password" +) + type LogDeliveryConfigurationStatus string const ( LogDeliveryConfigurationStatus_active LogDeliveryConfigurationStatus = "active" - LogDeliveryConfigurationStatus_enabling LogDeliveryConfigurationStatus = "enabling" - LogDeliveryConfigurationStatus_modifying LogDeliveryConfigurationStatus = "modifying" LogDeliveryConfigurationStatus_disabling LogDeliveryConfigurationStatus = "disabling" + LogDeliveryConfigurationStatus_enabling LogDeliveryConfigurationStatus = "enabling" LogDeliveryConfigurationStatus_error LogDeliveryConfigurationStatus = "error" + LogDeliveryConfigurationStatus_modifying LogDeliveryConfigurationStatus = "modifying" ) type LogFormat string const ( - LogFormat_text LogFormat = "text" LogFormat_json LogFormat = "json" + LogFormat_text LogFormat = "text" ) type LogType string const ( - LogType_slow_log LogType = "slow-log" LogType_engine_log LogType = "engine-log" + LogType_slow_log LogType = "slow-log" ) type MultiAZStatus string const ( - MultiAZStatus_enabled MultiAZStatus = "enabled" MultiAZStatus_disabled MultiAZStatus = "disabled" + MultiAZStatus_enabled MultiAZStatus = "enabled" +) + +type NetworkType string + +const ( + NetworkType_dual_stack NetworkType = "dual_stack" + NetworkType_ipv4 NetworkType = "ipv4" + NetworkType_ipv6 NetworkType = "ipv6" ) type NodeUpdateInitiatedBy string const ( - NodeUpdateInitiatedBy_system NodeUpdateInitiatedBy = "system" NodeUpdateInitiatedBy_customer NodeUpdateInitiatedBy = "customer" + NodeUpdateInitiatedBy_system NodeUpdateInitiatedBy = "system" ) type NodeUpdateStatus string const ( - NodeUpdateStatus_not_applied NodeUpdateStatus = "not-applied" - NodeUpdateStatus_waiting_to_start NodeUpdateStatus = "waiting-to-start" + NodeUpdateStatus_complete NodeUpdateStatus = "complete" NodeUpdateStatus_in_progress NodeUpdateStatus = "in-progress" - NodeUpdateStatus_stopping NodeUpdateStatus = "stopping" + NodeUpdateStatus_not_applied NodeUpdateStatus = "not-applied" NodeUpdateStatus_stopped NodeUpdateStatus = "stopped" - NodeUpdateStatus_complete NodeUpdateStatus = "complete" + NodeUpdateStatus_stopping NodeUpdateStatus = "stopping" + NodeUpdateStatus_waiting_to_start NodeUpdateStatus = "waiting-to-start" ) type OutpostMode string const ( - OutpostMode_single_outpost OutpostMode = "single-outpost" OutpostMode_cross_outpost OutpostMode = "cross-outpost" + OutpostMode_single_outpost OutpostMode = "single-outpost" ) type PendingAutomaticFailoverStatus string const ( - PendingAutomaticFailoverStatus_enabled PendingAutomaticFailoverStatus = "enabled" PendingAutomaticFailoverStatus_disabled PendingAutomaticFailoverStatus = "disabled" + PendingAutomaticFailoverStatus_enabled PendingAutomaticFailoverStatus = "enabled" ) type ServiceUpdateSeverity string @@ -142,8 +180,8 @@ type ServiceUpdateSeverity string const ( ServiceUpdateSeverity_critical ServiceUpdateSeverity = "critical" ServiceUpdateSeverity_important ServiceUpdateSeverity = "important" - ServiceUpdateSeverity_medium ServiceUpdateSeverity = "medium" ServiceUpdateSeverity_low ServiceUpdateSeverity = "low" + ServiceUpdateSeverity_medium ServiceUpdateSeverity = "medium" ) type ServiceUpdateStatus string @@ -163,33 +201,42 @@ const ( type SlaMet string const ( - SlaMet_yes SlaMet = "yes" - SlaMet_no SlaMet = "no" SlaMet_n_a SlaMet = "n/a" + SlaMet_no SlaMet = "no" + SlaMet_yes SlaMet = "yes" ) type SourceType string const ( - SourceType_cache_cluster SourceType = "cache-cluster" - SourceType_cache_parameter_group SourceType = "cache-parameter-group" - SourceType_cache_security_group SourceType = "cache-security-group" - SourceType_cache_subnet_group SourceType = "cache-subnet-group" - SourceType_replication_group SourceType = "replication-group" - SourceType_user SourceType = "user" - SourceType_user_group SourceType = "user-group" + SourceType_cache_cluster SourceType = "cache-cluster" + SourceType_cache_parameter_group SourceType = "cache-parameter-group" + SourceType_cache_security_group SourceType = "cache-security-group" + SourceType_cache_subnet_group SourceType = "cache-subnet-group" + SourceType_replication_group SourceType = "replication-group" + SourceType_serverless_cache SourceType = "serverless-cache" + SourceType_serverless_cache_snapshot SourceType = "serverless-cache-snapshot" + SourceType_user SourceType = "user" + SourceType_user_group SourceType = "user-group" +) + +type TransitEncryptionMode string + +const ( + TransitEncryptionMode_preferred TransitEncryptionMode = "preferred" + TransitEncryptionMode_required TransitEncryptionMode = "required" ) type UpdateActionStatus string const ( - UpdateActionStatus_not_applied UpdateActionStatus = "not-applied" - UpdateActionStatus_waiting_to_start UpdateActionStatus = "waiting-to-start" - UpdateActionStatus_in_progress UpdateActionStatus = "in-progress" - UpdateActionStatus_stopping UpdateActionStatus = "stopping" - UpdateActionStatus_stopped UpdateActionStatus = "stopped" UpdateActionStatus_complete UpdateActionStatus = "complete" - UpdateActionStatus_scheduling UpdateActionStatus = "scheduling" - UpdateActionStatus_scheduled UpdateActionStatus = "scheduled" + UpdateActionStatus_in_progress UpdateActionStatus = "in-progress" UpdateActionStatus_not_applicable UpdateActionStatus = "not-applicable" + UpdateActionStatus_not_applied UpdateActionStatus = "not-applied" + UpdateActionStatus_scheduled UpdateActionStatus = "scheduled" + UpdateActionStatus_scheduling UpdateActionStatus = "scheduling" + UpdateActionStatus_stopped UpdateActionStatus = "stopped" + UpdateActionStatus_stopping UpdateActionStatus = "stopping" + UpdateActionStatus_waiting_to_start UpdateActionStatus = "waiting-to-start" ) diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index 98f9babf..d8f2d78e 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -1,4 +1,96 @@ resources: + CacheCluster: + fields: + CacheSubnetGroupName: + references: + resource: CacheSubnetGroup + path: Spec.CacheSubnetGroupName + CacheParameterGroupName: + references: + resource: CacheParameterGroup + path: Spec.CacheParameterGroupName + is_immutable: true + ReplicationGroupID: + references: + resource: ReplicationGroup + path: Spec.ReplicationGroupID + is_immutable: true + SnapshotName: + references: + resource: Snapshot + path: Spec.SnapshotName + is_immutable: true + NotificationTopicARN: + references: + service_name: sns + resource: Topic + path: Status.ACKResourceMetadata.ARN + SecurityGroupIDs: + references: + resource: SecurityGroup + service_name: ec2 + path: Status.ID + AuthToken: + is_secret: true + PreferredAvailabilityZone: + late_initialize: {} + PreferredAvailabilityZones: + compare: + is_ignored: true + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: VERSION + json_path: .spec.engineVersion + type: string + index: 10 + - name: STATUS + json_path: .status.cacheClusterStatus + type: string + index: 20 + - name: ENDPOINT + json_path: .status.configurationEndpoint.address + type: string + index: 30 + priority: 1 + exceptions: + errors: + 404: + code: CacheClusterNotFound + terminal_codes: + - ReplicationGroupNotFoundFault + - InvalidReplicationGroupStateFault + - CacheClusterAlreadyExistsFault + - InsufficientCacheClusterCapacityFault + - CacheSecurityGroupNotFoundFault + - CacheSubnetGroupNotFoundFault + - ClusterQuotaForCustomerExceededFault + - NodeQuotaForClusterExceededFault + - NodeQuotaForCustomerExceededFault + - CacheParameterGroupNotFoundFault + - InvalidVPCNetworkStateFault + - TagQuotaPerResource + - InvalidParameterValue + - InvalidParameterCombination + hooks: + sdk_create_post_set_output: + template_path: hooks/cache_cluster/sdk_create_post_set_output.go.tpl + sdk_delete_pre_build_request: + template_path: hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl + sdk_read_many_post_build_request: + template_path: hooks/cache_cluster/sdk_read_many_post_build_request.go.tpl + sdk_read_many_post_set_output: + template_path: hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl + sdk_update_pre_build_request: + template_path: hooks/cache_cluster/sdk_update_pre_build_request.go.tpl + sdk_update_post_build_request: + template_path: hooks/cache_cluster/sdk_update_post_build_request.go.tpl + sdk_update_post_set_output: + template_path: hooks/cache_cluster/sdk_update_post_set_output.go.tpl + delta_post_compare: + code: "modifyDelta(delta, a, b)" CacheSubnetGroup: exceptions: errors: @@ -13,11 +105,19 @@ resources: - InvalidParameterValue - InvalidParameterCombination fields: + SubnetIDs: + references: + service_name: ec2 + resource: Subnet + path: Status.SubnetID Events: is_read_only: true from: operation: DescribeEvents path: Events + hooks: + sdk_read_many_post_set_output: + template_path: hooks/cache_subnet_group/sdk_read_many_post_set_output.go.tpl ReplicationGroup: exceptions: terminal_codes: @@ -47,6 +147,19 @@ resources: AutomaticFailoverEnabled: compare: is_ignored: true + CacheParameterGroupName: + references: + resource: CacheParameterGroup + path: Spec.CacheParameterGroupName + CacheSubnetGroupName: + references: + resource: CacheSubnetGroup + path: Spec.CacheSubnetGroupName + SecurityGroupIDs: + references: + resource: SecurityGroup + service_name: ec2 + path: Status.ID Events: is_read_only: true from: @@ -67,6 +180,8 @@ resources: PrimaryClusterId: # note: "PrimaryClusterID" will not function properly compare: is_ignored: true + NetworkType: + is_immutable: true hooks: sdk_read_many_post_set_output: template_path: hooks/replication_group/sdk_read_many_post_set_output.go.tpl @@ -74,6 +189,8 @@ resources: template_path: hooks/replication_group/sdk_delete_pre_build_request.go.tpl sdk_delete_post_request: template_path: hooks/replication_group/sdk_delete_post_request.go.tpl + sdk_update_pre_build_request: + template_path: hooks/replication_group/sdk_update_pre_build_request.go.tpl sdk_update_post_build_request: template_path: hooks/replication_group/sdk_update_post_build_request.go.tpl delta_post_compare: @@ -81,7 +198,7 @@ resources: sdk_file_end: template_path: hooks/replication_group/sdk_file_end.go.tpl sdk_file_end_set_output_post_populate: - code: "rm.customSetOutput(obj, ko) // custom set output from obj" + code: "rm.customSetOutput(ctx, *obj, ko) // custom set output from obj" renames: operations: CreateReplicationGroup: @@ -112,7 +229,6 @@ resources: terminal_codes: - CacheParameterGroupAlreadyExists - CacheParameterGroupQuotaExceeded - - InvalidCacheParameterGroupState - InvalidGlobalReplicationGroupState - InvalidParameterCombination - InvalidParameterValue @@ -145,7 +261,6 @@ resources: - InvalidParameterValue - InvalidParameterCombination - InvalidUserState - - DefaultUserAssociatedToUserGroup fields: LastRequestedAccessString: is_read_only: true @@ -184,7 +299,105 @@ resources: - TagQuotaPerResourceExceeded update_operation: custom_method_name: customUpdateUserGroup + ServerlessCache: + update_operation: + custom_method_name: customUpdateServerlessCache + fields: + SecurityGroupIDs: + references: + resource: SecurityGroup + service_name: ec2 + path: Status.ID + SubnetIDs: + references: + service_name: ec2 + resource: Subnet + path: Status.SubnetID + UserGroupIDs: + references: + resource: UserGroup + path: Spec.UserGroupID + synced: + when: + - path: Status.Status + in: + - available + - create_failed + exceptions: + terminal_codes: + - ServerlessCacheAlreadyExistsFault + - ServerlessCacheQuotaForCustomerExceededFault + - InvalidParameterValue + - InvalidParameterCombination + - InvalidVPCNetworkStateFault + - TagQuotaPerResourceExceeded + - InvalidKMSKeyFault + hooks: + sdk_read_many_post_set_output: + template_path: hooks/serverless_cache/sdk_read_many_post_set_output.go.tpl + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: STATUS + json_path: .status.status + type: string + index: 10 + - name: ENDPOINT + json_path: .status.endpoint.address + type: string + index: 20 + ServerlessCacheSnapshot: + update_operation: + custom_method_name: customUpdateServerlessCacheSnapshot + fields: + ServerlessCacheName: + is_immutable: true + references: + resource: ServerlessCache + path: Spec.ServerlessCacheName + ServerlessCacheSnapshotName: + is_immutable: true + kmsKeyId: + is_immutable: true + references: + service_name: kms + resource: Key + path: Status.ACKResourceMetadata.ARN + exceptions: + errors: + 404: + code: ServerlessCacheSnapshotNotFoundFault + terminal_codes: + - ServerlessCacheSnapshotAlreadyExistsFault + - InvalidParameterValueException + hooks: + sdk_create_post_set_output: + template_path: hooks/serverless_cache_snapshot/sdk_create_post_set_output.go.tpl + sdk_read_many_post_set_output: + template_path: hooks/serverless_cache_snapshot/sdk_read_many_post_set_output.go.tpl + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: STATUS + json_path: .status.status + type: string + index: 10 + synced: + when: + - path: Status.Status + in: + - available + - create_failed operations: + DescribeServerlessCaches: + operation_type: + - List + resource_name: + ServerlessCache DescribeCacheSubnetGroups: set_output_custom_method_name: CustomDescribeCacheSubnetGroupsSetOutput DescribeReplicationGroups: @@ -195,7 +408,7 @@ operations: custom_implementation: CustomModifyReplicationGroup set_output_custom_method_name: CustomModifyReplicationGroupSetOutput override_values: - ApplyImmediately: true + ApplyImmediately: aws.Bool(true) CreateSnapshot: custom_implementation: CustomCreateSnapshot set_output_custom_method_name: CustomCreateSnapshotSetOutput @@ -214,10 +427,15 @@ operations: set_output_custom_method_name: CustomCreateUserGroupSetOutput DescribeUserGroups: set_output_custom_method_name: CustomDescribeUserGroupsSetOutput + CreateCacheCluster: + set_output_custom_method_name: customCreateCacheClusterSetOutput + ModifyCacheCluster: + set_output_custom_method_name: customModifyCacheClusterSetOutput + override_values: + ApplyImmediately: aws.Bool(true) ignore: resource_names: - GlobalReplicationGroup - - CacheCluster - CacheSecurityGroup field_paths: - DescribeSnapshotsInput.CacheClusterId @@ -232,3 +450,27 @@ ignore: - CreateReplicationGroupInput.GlobalReplicationGroupId - CreateReplicationGroupInput.AutoMinorVersionUpgrade - CreateReplicationGroupInput.NumCacheClusters + - CacheCluster.LogDeliveryConfigurations + - PendingModifiedValues.LogDeliveryConfigurations + - CreateUserInput.AuthenticationMode + - ModifyUserInput.AuthenticationMode + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.SupportedNetworkTypes + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - ModifyCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - CreateUserGroupOutput.ServerlessCaches + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + # - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionEnabled + # - ModifyReplicationGroupInput.TransitEncryptionEnabled + # - CreateReplicationGroupInput.TransitEncryptionEnabled + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionMode + - CreateReplicationGroupInput.TransitEncryptionMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.ClusterMode + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - CreateReplicationGroupInput.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - Subnet.SupportedNetworkTypes + - CreateReplicationGroupInput.ServerlessCacheSnapshotName \ No newline at end of file diff --git a/apis/v1alpha1/replication_group.go b/apis/v1alpha1/replication_group.go index c0cb2757..7e12b451 100644 --- a/apis/v1alpha1/replication_group.go +++ b/apis/v1alpha1/replication_group.go @@ -22,7 +22,8 @@ import ( // ReplicationGroupSpec defines the desired state of ReplicationGroup. // -// Contains all of the attributes of a specific Redis replication group. +// Contains all of the attributes of a specific Valkey or Redis OSS replication +// group. type ReplicationGroupSpec struct { // A flag that enables encryption at rest when set to true. @@ -33,7 +34,7 @@ type ReplicationGroupSpec struct { // group. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. + // using Redis OSS version 3.2.6, 4.x or later. // // Default: false AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` @@ -50,19 +51,12 @@ type ReplicationGroupSpec struct { // - Must be only printable ASCII characters. // // - Must be at least 16 characters and no more than 128 characters in length. - // - // - The only permitted printable special characters are !, &, #, $, ^, <, - // >, and -. Other printable special characters cannot be used in the AUTH - // token. - // - // For more information, see AUTH password (http://redis.io/commands/AUTH) at - // http://redis.io/commands/AUTH. AuthToken *ackv1alpha1.SecretKeyReference `json:"authToken,omitempty"` // Specifies whether a read-only replica is automatically promoted to read/write // primary if the existing primary fails. // - // AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) - // replication groups. + // AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster + // mode enabled) replication groups. // // Default: false AutomaticFailoverEnabled *bool `json:"automaticFailoverEnabled,omitempty"` @@ -72,114 +66,132 @@ type ReplicationGroupSpec struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // - General purpose: Current generation: M6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - // only for Redis engine version 5.0.6 onward and Memcached engine version - // 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - // node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - // cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - // recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) T1 node types: cache.t1.micro - // M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // - General purpose: Current generation: M7g node types: cache.m7g.large, + // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // M6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // T4g node types (available only for Redis OSS engine version 5.0.6 onward + // and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + // generation: (not recommended. Existing clusters are still supported but + // creation of new clusters is not supported for these types.) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // - Compute optimized: Previous generation: (not recommended. Existing clusters // are still supported but creation of new clusters is not supported for // these types.) C1 node types: cache.c1.xlarge // - // - Memory optimized with data tiering: Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge - // - // - Memory optimized: Current generation: R6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - // Previous generation: (not recommended. Existing clusters are still supported - // but creation of new clusters is not supported for these types.) M2 node - // types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge + // - Memory optimized: Current generation: R7g node types: cache.r7g.large, + // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // R6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // // - All current generation instance types are created in Amazon VPC by default. // - // - Redis append-only files (AOF) are not supported for T1 or T2 instances. + // - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + // or T2 instances. // - // - Redis Multi-AZ with automatic failover is not supported on T1 instances. + // - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + // on T1 instances. // - // - Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. + // - The configuration variables appendonly and appendfsync are not supported + // on Valkey, or on Redis OSS version 2.8.22 and later. CacheNodeType *string `json:"cacheNodeType,omitempty"` // The name of the parameter group to associate with this replication group. // If this argument is omitted, the default cache parameter group for the specified // engine is used. // - // If you are running Redis version 3.2.4 or later, only one node group (shard), - // and want to use a default parameter group, we recommend that you specify - // the parameter group by name. + // If you are running Valkey or Redis OSS version 3.2.4 or later, only one node + // group (shard), and want to use a default parameter group, we recommend that + // you specify the parameter group by name. // - // - To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. + // - To create a Valkey or Redis OSS (cluster mode disabled) replication + // group, use CacheParameterGroupName=default.redis3.2. // - // - To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. - CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` + // - To create a Valkey or Redis OSS (cluster mode enabled) replication group, + // use CacheParameterGroupName=default.redis3.2.cluster.on. + CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` + CacheParameterGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheParameterGroupRef,omitempty"` // A list of cache security group names to associate with this replication group. CacheSecurityGroupNames []*string `json:"cacheSecurityGroupNames,omitempty"` // The name of the cache subnet group to be used for the replication group. // // If you're going to launch your cluster in an Amazon VPC, you need to create // a subnet group before you start creating a cluster. For more information, - // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). - CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). + CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` + CacheSubnetGroupRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"cacheSubnetGroupRef,omitempty"` // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd - // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). DataTieringEnabled *bool `json:"dataTieringEnabled,omitempty"` // A user-created description for the replication group. // +kubebuilder:validation:Required Description *string `json:"description"` // The name of the cache engine to be used for the clusters in this replication - // group. Must be Redis. + // group. The value must be set to Redis. Engine *string `json:"engine,omitempty"` // The version number of the cache engine to be used for the clusters in this // replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions // operation. // // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)) // in the ElastiCache User Guide, but you cannot downgrade to an earlier engine // version. If you want to use an earlier engine version, you must delete the // existing cluster or replication group and create it anew with the earlier // engine version. EngineVersion *string `json:"engineVersion,omitempty"` + // The network type you choose when creating a replication group, either ipv4 + // | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis + // OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above + // on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + IPDiscovery *string `json:"ipDiscovery,omitempty"` // The ID of the KMS key used to encrypt the disk in the cluster. KMSKeyID *string `json:"kmsKeyID,omitempty"` // Specifies the destination, format and type of the logs. LogDeliveryConfigurations []*LogDeliveryConfigurationRequest `json:"logDeliveryConfigurations,omitempty"` // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html). + // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). MultiAZEnabled *bool `json:"multiAZEnabled,omitempty"` + // Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + // using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + // engine version 1.6.6 and above on all instances built on the Nitro system + // (http://aws.amazon.com/ec2/nitro/). + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable once set" + NetworkType *string `json:"networkType,omitempty"` // A list of node group (shard) configuration options. Each node group (shard) // configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, // ReplicaCount, and Slots. // - // If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode - // enabled) replication group, you can use this parameter to individually configure - // each node group (shard), or you can omit this parameter. However, it is required - // when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You - // must configure each node group (shard) using this parameter because you must - // specify the slots for each node group. + // If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey + // or Redis OSS (cluster mode enabled) replication group, you can use this parameter + // to individually configure each node group (shard), or you can omit this parameter. + // However, it is required when seeding a Valkey or Redis OSS (cluster mode + // enabled) cluster from a S3 rdb file. You must configure each node group (shard) + // using this parameter because you must specify the slots for each node group. NodeGroupConfiguration []*NodeGroupConfiguration `json:"nodeGroupConfiguration,omitempty"` // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service // (SNS) topic to which notifications are sent. @@ -187,8 +199,9 @@ type ReplicationGroupSpec struct { // The Amazon SNS topic owner must be the same as the cluster owner. NotificationTopicARN *string `json:"notificationTopicARN,omitempty"` // An optional parameter that specifies the number of node groups (shards) for - // this Redis (cluster mode enabled) replication group. For Redis (cluster mode - // disabled) either omit this parameter or set it to 1. + // this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey + // or Redis OSS (cluster mode disabled) either omit this parameter or set it + // to 1. // // Default: 1 NumNodeGroups *int64 `json:"numNodeGroups,omitempty"` @@ -212,11 +225,6 @@ type ReplicationGroupSpec struct { PreferredCacheClusterAZs []*string `json:"preferredCacheClusterAZs,omitempty"` // Specifies the weekly time range during which maintenance on the cluster is // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid - // values for ddd are: - // - // Specifies the weekly time range during which maintenance on the cluster is - // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi // (24H Clock UTC). The minimum maintenance window is a 60 minute period. // // Valid values for ddd are: @@ -263,14 +271,15 @@ type ReplicationGroupSpec struct { // // Use this parameter only when you are creating a replication group in an Amazon // Virtual Private Cloud (Amazon VPC). - SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` - // A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB - // snapshot files stored in Amazon S3. The snapshot files are used to populate - // the new replication group. The Amazon S3 object name in the ARN cannot contain - // any commas. The new replication group will have the number of node groups - // (console: shards) specified by the parameter NumNodeGroups or the number - // of node groups configured by NodeGroupConfiguration regardless of the number - // of ARNs specified here. + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + SecurityGroupRefs []*ackv1alpha1.AWSResourceReferenceWrapper `json:"securityGroupRefs,omitempty"` + // A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or + // Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are + // used to populate the new replication group. The Amazon S3 object name in + // the ARN cannot contain any commas. The new replication group will have the + // number of node groups (console: shards) specified by the parameter NumNodeGroups + // or the number of node groups configured by NodeGroupConfiguration regardless + // of the number of ARNs specified here. // // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb SnapshotARNs []*string `json:"snapshotARNs,omitempty"` @@ -299,10 +308,6 @@ type ReplicationGroupSpec struct { Tags []*Tag `json:"tags,omitempty"` // A flag that enables in-transit encryption when set to true. // - // You cannot modify the value of TransitEncryptionEnabled after the cluster - // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled - // to true when you create a cluster. - // // This parameter is valid only if the Engine parameter is redis, the EngineVersion // parameter is 3.2.6, 4.x or later, and the cluster is being created in an // Amazon VPC. @@ -310,7 +315,7 @@ type ReplicationGroupSpec struct { // If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. // // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. + // using Redis OSS version 3.2.6, 4.x or later. // // Default: false // @@ -328,27 +333,28 @@ type ReplicationGroupStatus struct { // constructed ARN for the resource // +kubebuilder:validation:Optional ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` - // All CRS managed by ACK have a common `Status.Conditions` member that + // All CRs managed by ACK have a common `Status.Conditions` member that // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource // +kubebuilder:validation:Optional Conditions []*ackv1alpha1.Condition `json:"conditions"` // A string list, each element of which specifies a cache node type which you - // can use to scale your cluster or replication group. When scaling down a Redis - // cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, + // can use to scale your cluster or replication group. When scaling down a Valkey + // or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, // use a value from this list for the CacheNodeType parameter. // +kubebuilder:validation:Optional AllowedScaleDownModifications []*string `json:"allowedScaleDownModifications,omitempty"` // A string list, each element of which specifies a cache node type which you // can use to scale your cluster or replication group. // - // When scaling up a Redis cluster or replication group using ModifyCacheCluster - // or ModifyReplicationGroup, use a value from this list for the CacheNodeType - // parameter. + // When scaling up a Valkey or Redis OSS cluster or replication group using + // ModifyCacheCluster or ModifyReplicationGroup, use a value from this list + // for the CacheNodeType parameter. // +kubebuilder:validation:Optional AllowedScaleUpModifications []*string `json:"allowedScaleUpModifications,omitempty"` - // A flag that enables using an AuthToken (password) when issuing Redis commands. + // A flag that enables using an AuthToken (password) when issuing Valkey or + // Redis OSS commands. // // Default: false // +kubebuilder:validation:Optional @@ -356,12 +362,13 @@ type ReplicationGroupStatus struct { // The date the auth token was last modified // +kubebuilder:validation:Optional AuthTokenLastModifiedDate *metav1.Time `json:"authTokenLastModifiedDate,omitempty"` - // If you are running Redis engine version 6.0 or later, set this parameter - // to yes if you want to opt-in to the next auto minor version upgrade campaign. - // This parameter is disabled for previous versions. + // If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 + // and above, set this parameter to yes if you want to opt-in to the next auto + // minor version upgrade campaign. This parameter is disabled for previous versions. // +kubebuilder:validation:Optional AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` - // Indicates the status of automatic failover for this Redis replication group. + // Indicates the status of automatic failover for this Valkey or Redis OSS replication + // group. // +kubebuilder:validation:Optional AutomaticFailover *string `json:"automaticFailover,omitempty"` // A flag indicating whether or not this replication group is cluster enabled; @@ -377,7 +384,7 @@ type ReplicationGroupStatus struct { ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd - // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). // +kubebuilder:validation:Optional DataTiering *string `json:"dataTiering,omitempty"` // A list of events. Each element in the list contains detailed information @@ -398,13 +405,13 @@ type ReplicationGroupStatus struct { // +kubebuilder:validation:Optional MemberClustersOutpostARNs []*string `json:"memberClustersOutpostARNs,omitempty"` // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. - // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) + // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) // +kubebuilder:validation:Optional MultiAZ *string `json:"multiAZ,omitempty"` - // A list of node groups in this replication group. For Redis (cluster mode - // disabled) replication groups, this is a single-element list. For Redis (cluster - // mode enabled) replication groups, the list contains an entry for each node - // group (shard). + // A list of node groups in this replication group. For Valkey or Redis OSS + // (cluster mode disabled) replication groups, this is a single-element list. + // For Valkey or Redis OSS (cluster mode enabled) replication groups, the list + // contains an entry for each node group (shard). // +kubebuilder:validation:Optional NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` // A group of settings to be applied to the replication group, either immediately diff --git a/apis/v1alpha1/serverless_cache.go b/apis/v1alpha1/serverless_cache.go new file mode 100644 index 00000000..f994cbed --- /dev/null +++ b/apis/v1alpha1/serverless_cache.go @@ -0,0 +1,133 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ServerlessCacheSpec defines the desired state of ServerlessCache. +// +// The resource representing a serverless cache. +type ServerlessCacheSpec struct { + + // Sets the cache usage limits for storage and ElastiCache Processing Units + // for the cache. + CacheUsageLimits *CacheUsageLimits `json:"cacheUsageLimits,omitempty"` + // The daily time that snapshots will be created from the new serverless cache. + // By default this number is populated with 0, i.e. no snapshots will be created + // on an automatic daily basis. Available for Valkey, Redis OSS and Serverless + // Memcached only. + DailySnapshotTime *string `json:"dailySnapshotTime,omitempty"` + // User-provided description for the serverless cache. The default is NULL, + // i.e. if no description is provided then an empty string will be returned. + // The maximum length is 255 characters. + Description *string `json:"description,omitempty"` + // The name of the cache engine to be used for creating the serverless cache. + // +kubebuilder:validation:Required + Engine *string `json:"engine"` + // ARN of the customer managed key for encrypting the data at rest. If no KMS + // key is provided, a default service key is used. + KMSKeyID *string `json:"kmsKeyID,omitempty"` + // The version of the cache engine that will be used to create the serverless + // cache. + MajorEngineVersion *string `json:"majorEngineVersion,omitempty"` + // A list of the one or more VPC security groups to be associated with the serverless + // cache. The security group will authorize traffic access for the VPC end-point + // (private-link). If no other information is given this will be the VPC’s + // Default Security Group that is associated with the cluster VPC end-point. + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + SecurityGroupRefs []*ackv1alpha1.AWSResourceReferenceWrapper `json:"securityGroupRefs,omitempty"` + // User-provided identifier for the serverless cache. This parameter is stored + // as a lowercase string. + // +kubebuilder:validation:Required + ServerlessCacheName *string `json:"serverlessCacheName"` + // The ARN(s) of the snapshot that the new serverless cache will be created + // from. Available for Valkey, Redis OSS and Serverless Memcached only. + SnapshotARNsToRestore []*string `json:"snapshotARNsToRestore,omitempty"` + // The number of snapshots that will be retained for the serverless cache that + // is being created. As new snapshots beyond this limit are added, the oldest + // snapshots will be deleted on a rolling basis. Available for Valkey, Redis + // OSS and Serverless Memcached only. + SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` + // A list of the identifiers of the subnets where the VPC endpoint for the serverless + // cache will be deployed. All the subnetIds must belong to the same VPC. + SubnetIDs []*string `json:"subnetIDs,omitempty"` + SubnetRefs []*ackv1alpha1.AWSResourceReferenceWrapper `json:"subnetRefs,omitempty"` + // The list of tags (key, value) pairs to be added to the serverless cache resource. + // Default is NULL. + Tags []*Tag `json:"tags,omitempty"` + // The identifier of the UserGroup to be associated with the serverless cache. + // Available for Valkey and Redis OSS only. Default is NULL. + UserGroupID *string `json:"userGroupID,omitempty"` +} + +// ServerlessCacheStatus defines the observed state of ServerlessCache +type ServerlessCacheStatus struct { + // All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + // that is used to contain resource sync state, account ownership, + // constructed ARN for the resource + // +kubebuilder:validation:Optional + ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` + // All CRs managed by ACK have a common `Status.Conditions` member that + // contains a collection of `ackv1alpha1.Condition` objects that describe + // the various terminal states of the CR and its backend AWS service API + // resource + // +kubebuilder:validation:Optional + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // When the serverless cache was created. + // +kubebuilder:validation:Optional + CreateTime *metav1.Time `json:"createTime,omitempty"` + // +kubebuilder:validation:Optional + Endpoint *Endpoint `json:"endpoint,omitempty"` + // The name and version number of the engine the serverless cache is compatible + // with. + // +kubebuilder:validation:Optional + FullEngineVersion *string `json:"fullEngineVersion,omitempty"` + // +kubebuilder:validation:Optional + ReaderEndpoint *Endpoint `json:"readerEndpoint,omitempty"` + // The current status of the serverless cache. The allowed values are CREATING, + // AVAILABLE, DELETING, CREATE-FAILED and MODIFYING. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty"` +} + +// ServerlessCache is the Schema for the ServerlessCaches API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="STATUS",type=string,priority=0,JSONPath=`.status.status` +// +kubebuilder:printcolumn:name="ENDPOINT",type=string,priority=0,JSONPath=`.status.endpoint.address` +// +kubebuilder:printcolumn:name="Synced",type="string",priority=0,JSONPath=".status.conditions[?(@.type==\"ACK.ResourceSynced\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",priority=0,JSONPath=".metadata.creationTimestamp" +type ServerlessCache struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServerlessCacheSpec `json:"spec,omitempty"` + Status ServerlessCacheStatus `json:"status,omitempty"` +} + +// ServerlessCacheList contains a list of ServerlessCache +// +kubebuilder:object:root=true +type ServerlessCacheList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServerlessCache `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ServerlessCache{}, &ServerlessCacheList{}) +} diff --git a/apis/v1alpha1/serverless_cache_snapshot.go b/apis/v1alpha1/serverless_cache_snapshot.go new file mode 100644 index 00000000..14e8908c --- /dev/null +++ b/apis/v1alpha1/serverless_cache_snapshot.go @@ -0,0 +1,113 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package v1alpha1 + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ServerlessCacheSnapshotSpec defines the desired state of ServerlessCacheSnapshot. +// +// The resource representing a serverless cache snapshot. Available for Valkey, +// Redis OSS and Serverless Memcached only. +type ServerlessCacheSnapshotSpec struct { + + // The ID of the KMS key used to encrypt the snapshot. Available for Valkey, + // Redis OSS and Serverless Memcached only. Default: NULL + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable once set" + KMSKeyID *string `json:"kmsKeyID,omitempty"` + KMSKeyRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"kmsKeyRef,omitempty"` + // The name of an existing serverless cache. The snapshot is created from this + // cache. Available for Valkey, Redis OSS and Serverless Memcached only. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable once set" + ServerlessCacheName *string `json:"serverlessCacheName,omitempty"` + ServerlessCacheRef *ackv1alpha1.AWSResourceReferenceWrapper `json:"serverlessCacheRef,omitempty"` + // The name for the snapshot being created. Must be unique for the customer + // account. Available for Valkey, Redis OSS and Serverless Memcached only. Must + // be between 1 and 255 characters. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable once set" + // +kubebuilder:validation:Required + ServerlessCacheSnapshotName *string `json:"serverlessCacheSnapshotName"` + // A list of tags to be added to the snapshot resource. A tag is a key-value + // pair. Available for Valkey, Redis OSS and Serverless Memcached only. + Tags []*Tag `json:"tags,omitempty"` +} + +// ServerlessCacheSnapshotStatus defines the observed state of ServerlessCacheSnapshot +type ServerlessCacheSnapshotStatus struct { + // All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + // that is used to contain resource sync state, account ownership, + // constructed ARN for the resource + // +kubebuilder:validation:Optional + ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` + // All CRs managed by ACK have a common `Status.Conditions` member that + // contains a collection of `ackv1alpha1.Condition` objects that describe + // the various terminal states of the CR and its backend AWS service API + // resource + // +kubebuilder:validation:Optional + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // The total size of a serverless cache snapshot, in bytes. Available for Valkey, + // Redis OSS and Serverless Memcached only. + // +kubebuilder:validation:Optional + BytesUsedForCache *string `json:"bytesUsedForCache,omitempty"` + // The date and time that the source serverless cache's metadata and cache data + // set was obtained for the snapshot. Available for Valkey, Redis OSS and Serverless + // Memcached only. + // +kubebuilder:validation:Optional + CreateTime *metav1.Time `json:"createTime,omitempty"` + // The time that the serverless cache snapshot will expire. Available for Valkey, + // Redis OSS and Serverless Memcached only. + // +kubebuilder:validation:Optional + ExpiryTime *metav1.Time `json:"expiryTime,omitempty"` + // The configuration of the serverless cache, at the time the snapshot was taken. + // Available for Valkey, Redis OSS and Serverless Memcached only. + // +kubebuilder:validation:Optional + ServerlessCacheConfiguration *ServerlessCacheConfiguration `json:"serverlessCacheConfiguration,omitempty"` + // The type of snapshot of serverless cache. Available for Valkey, Redis OSS + // and Serverless Memcached only. + // +kubebuilder:validation:Optional + SnapshotType *string `json:"snapshotType,omitempty"` + // The current status of the serverless cache. Available for Valkey, Redis OSS + // and Serverless Memcached only. + // +kubebuilder:validation:Optional + Status *string `json:"status,omitempty"` +} + +// ServerlessCacheSnapshot is the Schema for the ServerlessCacheSnapshots API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="STATUS",type=string,priority=0,JSONPath=`.status.status` +// +kubebuilder:printcolumn:name="Synced",type="string",priority=0,JSONPath=".status.conditions[?(@.type==\"ACK.ResourceSynced\")].status" +// +kubebuilder:printcolumn:name="Age",type="date",priority=0,JSONPath=".metadata.creationTimestamp" +type ServerlessCacheSnapshot struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ServerlessCacheSnapshotSpec `json:"spec,omitempty"` + Status ServerlessCacheSnapshotStatus `json:"status,omitempty"` +} + +// ServerlessCacheSnapshotList contains a list of ServerlessCacheSnapshot +// +kubebuilder:object:root=true +type ServerlessCacheSnapshotList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ServerlessCacheSnapshot `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ServerlessCacheSnapshot{}, &ServerlessCacheSnapshotList{}) +} diff --git a/apis/v1alpha1/snapshot.go b/apis/v1alpha1/snapshot.go index 8b29231e..d45b4708 100644 --- a/apis/v1alpha1/snapshot.go +++ b/apis/v1alpha1/snapshot.go @@ -22,8 +22,8 @@ import ( // SnapshotSpec defines the desired state of Snapshot. // -// Represents a copy of an entire Redis cluster as of the time when the snapshot -// was taken. +// Represents a copy of an entire Valkey or Redis OSS cluster as of the time +// when the snapshot was taken. type SnapshotSpec struct { // The identifier of an existing cluster. The snapshot is created from this @@ -51,19 +51,19 @@ type SnapshotStatus struct { // constructed ARN for the resource // +kubebuilder:validation:Optional ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` - // All CRS managed by ACK have a common `Status.Conditions` member that + // All CRs managed by ACK have a common `Status.Conditions` member that // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource // +kubebuilder:validation:Optional Conditions []*ackv1alpha1.Condition `json:"conditions"` - // If you are running Redis engine version 6.0 or later, set this parameter - // to yes if you want to opt-in to the next auto minor version upgrade campaign. - // This parameter is disabled for previous versions. + // If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + // above, set this parameter to yes if you want to opt-in to the next auto minor + // version upgrade campaign. This parameter is disabled for previous versions. // +kubebuilder:validation:Optional AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` - // Indicates the status of automatic failover for the source Redis replication - // group. + // Indicates the status of automatic failover for the source Valkey or Redis + // OSS replication group. // +kubebuilder:validation:Optional AutomaticFailover *string `json:"automaticFailover,omitempty"` // The date and time when the source cluster was created. @@ -75,56 +75,58 @@ type SnapshotStatus struct { // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // - // * General purpose: Current generation: M6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - // cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - // cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available - // only for Redis engine version 5.0.6 onward and Memcached engine version - // 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 - // node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: - // cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not - // recommended. Existing clusters are still supported but creation of new - // clusters is not supported for these types.) T1 node types: cache.t1.micro - // M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge - // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * General purpose: Current generation: M7g node types: cache.m7g.large, + // cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + // cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // M6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + // cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + // cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // T4g node types (available only for Redis OSS engine version 5.0.6 onward + // and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + // cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + // generation: (not recommended. Existing clusters are still supported but + // creation of new clusters is not supported for these types.) T1 node types: + // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + // cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended. Existing clusters // are still supported but creation of new clusters is not supported for // these types.) C1 node types: cache.c1.xlarge // - // * Memory optimized with data tiering: Current generation: R6gd node types - // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, - // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, - // cache.r6gd.16xlarge - // - // * Memory optimized: Current generation: R6g node types (available only - // for Redis engine version 5.0.6 onward and for Memcached engine version - // 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - // cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, - // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - // Previous generation: (not recommended. Existing clusters are still supported - // but creation of new clusters is not supported for these types.) M2 node - // types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: - // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge + // * Memory optimized: Current generation: R7g node types: cache.r7g.large, + // cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + // cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + // Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + // R6g node types (available only for Redis OSS engine version 5.0.6 onward + // and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + // cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + // cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + // are still supported but creation of new clusters is not supported for + // these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // // * All current generation instance types are created in Amazon VPC by default. // - // * Redis append-only files (AOF) are not supported for T1 or T2 instances. + // * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + // or T2 instances. // - // * Redis Multi-AZ with automatic failover is not supported on T1 instances. + // * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + // on T1 instances. // - // * Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. + // * The configuration variables appendonly and appendfsync are not supported + // on Valkey, or on Redis OSS version 2.8.22 and later. // +kubebuilder:validation:Optional CacheNodeType *string `json:"cacheNodeType,omitempty"` // The cache parameter group that is associated with the source cluster. @@ -135,7 +137,7 @@ type SnapshotStatus struct { CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` // Enables data tiering. Data tiering is only supported for replication groups // using the r6gd node type. This parameter must be set to true when using r6gd - // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + // nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). // +kubebuilder:validation:Optional DataTiering *string `json:"dataTiering,omitempty"` // The name of the cache engine (memcached or redis) used by the source cluster. @@ -149,8 +151,8 @@ type SnapshotStatus struct { NodeSnapshots []*NodeSnapshot `json:"nodeSnapshots,omitempty"` // The number of cache nodes in the source cluster. // - // For clusters running Redis, this value must be 1. For clusters running Memcached, - // this value must be between 1 and 40. + // For clusters running Valkey or Redis OSS, this value must be 1. For clusters + // running Memcached, this value must be between 1 and 40. // +kubebuilder:validation:Optional NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` // The number of node groups (shards) in this snapshot. When restoring from diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index ea28aaa0..fd121ec4 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -34,13 +34,18 @@ type Authentication struct { Type *string `json:"type_,omitempty"` } +// Specifies the authentication mode to use. +type AuthenticationMode struct { + Passwords []*string `json:"passwords,omitempty"` +} + // Describes an Availability Zone in which the cluster is launched. type AvailabilityZone struct { Name *string `json:"name,omitempty"` } // Contains all of the attributes of a specific cluster. -type CacheCluster struct { +type CacheCluster_SDK struct { ARN *string `json:"arn,omitempty"` AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` AuthTokenEnabled *bool `json:"authTokenEnabled,omitempty"` @@ -50,23 +55,37 @@ type CacheCluster struct { CacheClusterID *string `json:"cacheClusterID,omitempty"` CacheClusterStatus *string `json:"cacheClusterStatus,omitempty"` CacheNodeType *string `json:"cacheNodeType,omitempty"` - CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` - ClientDownloadLandingPage *string `json:"clientDownloadLandingPage,omitempty"` + CacheNodes []*CacheNode `json:"cacheNodes,omitempty"` + // Status of the cache parameter group. + CacheParameterGroup *CacheParameterGroupStatus_SDK `json:"cacheParameterGroup,omitempty"` + CacheSecurityGroups []*CacheSecurityGroupMembership `json:"cacheSecurityGroups,omitempty"` + CacheSubnetGroupName *string `json:"cacheSubnetGroupName,omitempty"` + ClientDownloadLandingPage *string `json:"clientDownloadLandingPage,omitempty"` // Represents the information required for client programs to connect to a cache - // node. - ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` - Engine *string `json:"engine,omitempty"` - EngineVersion *string `json:"engineVersion,omitempty"` - LogDeliveryConfigurations []*LogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` - NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` - PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` - PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` - PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` - ReplicationGroupID *string `json:"replicationGroupID,omitempty"` - ReplicationGroupLogDeliveryEnabled *bool `json:"replicationGroupLogDeliveryEnabled,omitempty"` - SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` - SnapshotWindow *string `json:"snapshotWindow,omitempty"` - TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + // node. This value is read-only. + ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` + Engine *string `json:"engine,omitempty"` + EngineVersion *string `json:"engineVersion,omitempty"` + IPDiscovery *string `json:"ipDiscovery,omitempty"` + NetworkType *string `json:"networkType,omitempty"` + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + NotificationConfiguration *NotificationConfiguration `json:"notificationConfiguration,omitempty"` + NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + // A group of settings that are applied to the cluster in the future, or that + // are currently being applied. + PendingModifiedValues *PendingModifiedValues `json:"pendingModifiedValues,omitempty"` + PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` + PreferredMaintenanceWindow *string `json:"preferredMaintenanceWindow,omitempty"` + PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` + ReplicationGroupID *string `json:"replicationGroupID,omitempty"` + ReplicationGroupLogDeliveryEnabled *bool `json:"replicationGroupLogDeliveryEnabled,omitempty"` + SecurityGroups []*SecurityGroupMembership `json:"securityGroups,omitempty"` + SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` + SnapshotWindow *string `json:"snapshotWindow,omitempty"` + TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` } // Provides all of the details about a particular cache engine version. @@ -80,61 +99,64 @@ type CacheEngineVersion struct { // Represents an individual cache node within a cluster. Each cache node runs // its own instance of the cluster's protocol-compliant caching software - either -// Memcached or Redis. +// Memcached, Valkey or Redis OSS. // // The following node types are supported by ElastiCache. Generally speaking, // the current generation types provide more memory and computational power // at lower cost when compared to their equivalent previous generation counterparts. // -// - General purpose: Current generation: M6g node types: (available only -// for Redis engine version 5.0.6 onward and for Memcached engine version -// 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, -// cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge -// For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) -// M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, -// cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, -// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available -// only for Redis engine version 5.0.6 onward and for Memcached engine version -// 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 -// node types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: -// cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not -// recommended. Existing clusters are still supported but creation of new -// clusters is not supported for these types.) T1 node types: cache.t1.micro -// M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge -// M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge +// - General purpose: Current generation: M7g node types: cache.m7g.large, +// cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, +// cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported +// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) +// M6g node types (available only for Redis OSS engine version 5.0.6 onward +// and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, +// cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, +// cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, +// cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: +// cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge +// T4g node types (available only for Redis OSS engine version 5.0.6 onward +// and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, +// cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium +// T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous +// generation: (not recommended. Existing clusters are still supported but +// creation of new clusters is not supported for these types.) T1 node types: +// cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, +// cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, +// cache.m3.2xlarge // // - Compute optimized: Previous generation: (not recommended. Existing clusters // are still supported but creation of new clusters is not supported for // these types.) C1 node types: cache.c1.xlarge // -// - Memory optimized with data tiering: Current generation: R6gd node types -// (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge, -// cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge, -// cache.r6gd.16xlarge -// -// - Memory optimized: Current generation: R6g node types (available only -// for Redis engine version 5.0.6 onward and for Memcached engine version -// 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, -// cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge -// For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) -// R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, -// cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, -// cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge -// Previous generation: (not recommended. Existing clusters are still supported -// but creation of new clusters is not supported for these types.) M2 node -// types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: -// cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge +// - Memory optimized: Current generation: R7g node types: cache.r7g.large, +// cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, +// cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported +// Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) +// R6g node types (available only for Redis OSS engine version 5.0.6 onward +// and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, +// cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, +// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, +// cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: +// cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, +// cache.r4.16xlarge Previous generation: (not recommended. Existing clusters +// are still supported but creation of new clusters is not supported for +// these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, +// cache.r3.8xlarge // // Additional node type info // // - All current generation instance types are created in Amazon VPC by default. // -// - Redis append-only files (AOF) are not supported for T1 or T2 instances. +// - Valkey or Redis OSS append-only files (AOF) are not supported for T1 +// or T2 instances. // -// - Redis Multi-AZ with automatic failover is not supported on T1 instances. +// - Valkey or Redis OSS Multi-AZ with automatic failover is not supported +// on T1 instances. // -// - Redis configuration variables appendonly and appendfsync are not supported -// on Redis version 2.8.22 and later. +// - The configuration variables appendonly and appendfsync are not supported +// on Valkey, or on Redis OSS version 2.8.22 and later. type CacheNode struct { CacheNodeCreateTime *metav1.Time `json:"cacheNodeCreateTime,omitempty"` CacheNodeID *string `json:"cacheNodeID,omitempty"` @@ -142,15 +164,15 @@ type CacheNode struct { CustomerAvailabilityZone *string `json:"customerAvailabilityZone,omitempty"` CustomerOutpostARN *string `json:"customerOutpostARN,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. Endpoint *Endpoint `json:"endpoint,omitempty"` ParameterGroupStatus *string `json:"parameterGroupStatus,omitempty"` SourceCacheNodeID *string `json:"sourceCacheNodeID,omitempty"` } // A parameter that has a different value for each cache node type it is applied -// to. For example, in a Redis cluster, a cache.m1.large cache node type would -// have a larger maxmemory value than a cache.m1.small type. +// to. For example, in a Valkey or Redis OSS cluster, a cache.m1.large cache +// node type would have a larger maxmemory value than a cache.m1.small type. type CacheNodeTypeSpecificParameter struct { AllowedValues *string `json:"allowedValues,omitempty"` ChangeType *string `json:"changeType,omitempty"` @@ -180,8 +202,9 @@ type CacheNodeUpdateStatus struct { // Status of the cache parameter group. type CacheParameterGroupStatus_SDK struct { - CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` - ParameterApplyStatus *string `json:"parameterApplyStatus,omitempty"` + CacheNodeIDsToReboot []*string `json:"cacheNodeIDsToReboot,omitempty"` + CacheParameterGroupName *string `json:"cacheParameterGroupName,omitempty"` + ParameterApplyStatus *string `json:"parameterApplyStatus,omitempty"` } // Represents the output of a CreateCacheParameterGroup operation. @@ -226,6 +249,15 @@ type CacheSubnetGroup_SDK struct { VPCID *string `json:"vpcID,omitempty"` } +// The usage limits for storage and ElastiCache Processing Units for the cache. +type CacheUsageLimits struct { + // The data storage limit. + DataStorage *DataStorage `json:"dataStorage,omitempty"` + // The configuration for the number of ElastiCache Processing Units (ECPU) the + // cache can consume per second. + ECPUPerSecond *ECPUPerSecond `json:"eCPUPerSecond,omitempty"` +} + // The configuration details of the CloudWatch Logs destination. type CloudWatchLogsDestinationDetails struct { LogGroup *string `json:"logGroup,omitempty"` @@ -235,8 +267,10 @@ type CloudWatchLogsDestinationDetails struct { // Each node group (shard) configuration has the following members: NodeGroupId, // NewReplicaCount, and PreferredAvailabilityZones. type ConfigureShard struct { - NewReplicaCount *int64 `json:"newReplicaCount,omitempty"` - NodeGroupID *string `json:"nodeGroupID,omitempty"` + NewReplicaCount *int64 `json:"newReplicaCount,omitempty"` + NodeGroupID *string `json:"nodeGroupID,omitempty"` + PreferredAvailabilityZones []*string `json:"preferredAvailabilityZones,omitempty"` + PreferredOutpostARNs []*string `json:"preferredOutpostARNs,omitempty"` } // The endpoint from which data should be migrated. @@ -245,6 +279,13 @@ type CustomerNodeEndpoint struct { Port *int64 `json:"port,omitempty"` } +// The data storage limit. +type DataStorage struct { + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` + Unit *string `json:"unit,omitempty"` +} + // Configuration details of either a CloudWatch Logs destination or Kinesis // Data Firehose destination. type DestinationDetails struct { @@ -261,8 +302,15 @@ type EC2SecurityGroup struct { Status *string `json:"status,omitempty"` } +// The configuration for the number of ElastiCache Processing Units (ECPU) the +// cache can consume per second. +type ECPUPerSecond struct { + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` +} + // Represents the information required for client programs to connect to a cache -// node. +// node. This value is read-only. type Endpoint struct { Address *string `json:"address,omitempty"` Port *int64 `json:"port,omitempty"` @@ -370,10 +418,10 @@ type NodeGroup struct { NodeGroupID *string `json:"nodeGroupID,omitempty"` NodeGroupMembers []*NodeGroupMember `json:"nodeGroupMembers,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. PrimaryEndpoint *Endpoint `json:"primaryEndpoint,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. ReaderEndpoint *Endpoint `json:"readerEndpoint,omitempty"` Slots *string `json:"slots,omitempty"` Status *string `json:"status,omitempty"` @@ -400,7 +448,7 @@ type NodeGroupMember struct { PreferredAvailabilityZone *string `json:"preferredAvailabilityZone,omitempty"` PreferredOutpostARN *string `json:"preferredOutpostARN,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. ReadEndpoint *Endpoint `json:"readEndpoint,omitempty"` } @@ -475,11 +523,13 @@ type PendingLogDeliveryConfiguration struct { // A group of settings that are applied to the cluster in the future, or that // are currently being applied. type PendingModifiedValues struct { - AuthTokenStatus *string `json:"authTokenStatus,omitempty"` - CacheNodeType *string `json:"cacheNodeType,omitempty"` - EngineVersion *string `json:"engineVersion,omitempty"` - LogDeliveryConfigurations []*PendingLogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` - NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + AuthTokenStatus *string `json:"authTokenStatus,omitempty"` + CacheNodeIDsToRemove []*string `json:"cacheNodeIDsToRemove,omitempty"` + CacheNodeType *string `json:"cacheNodeType,omitempty"` + EngineVersion *string `json:"engineVersion,omitempty"` + NumCacheNodes *int64 `json:"numCacheNodes,omitempty"` + TransitEncryptionEnabled *bool `json:"transitEncryptionEnabled,omitempty"` + TransitEncryptionMode *string `json:"transitEncryptionMode,omitempty"` } // Update action that has been processed for the corresponding apply/stop request @@ -502,8 +552,8 @@ type RegionalConfiguration struct { ReplicationGroupRegion *string `json:"replicationGroupRegion,omitempty"` } -// The settings to be applied to the Redis replication group, either immediately -// or during the next maintenance window. +// The settings to be applied to the Valkey or Redis OSS replication group, +// either immediately or during the next maintenance window. type ReplicationGroupPendingModifiedValues struct { AuthTokenStatus *string `json:"authTokenStatus,omitempty"` AutomaticFailoverStatus *string `json:"automaticFailoverStatus,omitempty"` @@ -515,7 +565,8 @@ type ReplicationGroupPendingModifiedValues struct { UserGroups *UserGroupsUpdateStatus `json:"userGroups,omitempty"` } -// Contains all of the attributes of a specific Redis replication group. +// Contains all of the attributes of a specific Valkey or Redis OSS replication +// group. type ReplicationGroup_SDK struct { ARN *string `json:"arn,omitempty"` AtRestEncryptionEnabled *bool `json:"atRestEncryptionEnabled,omitempty"` @@ -526,21 +577,24 @@ type ReplicationGroup_SDK struct { CacheNodeType *string `json:"cacheNodeType,omitempty"` ClusterEnabled *bool `json:"clusterEnabled,omitempty"` // Represents the information required for client programs to connect to a cache - // node. + // node. This value is read-only. ConfigurationEndpoint *Endpoint `json:"configurationEndpoint,omitempty"` DataTiering *string `json:"dataTiering,omitempty"` Description *string `json:"description,omitempty"` + Engine *string `json:"engine,omitempty"` // The name of the Global datastore and role of this replication group in the // Global datastore. GlobalReplicationGroupInfo *GlobalReplicationGroupInfo `json:"globalReplicationGroupInfo,omitempty"` + IPDiscovery *string `json:"ipDiscovery,omitempty"` KMSKeyID *string `json:"kmsKeyID,omitempty"` LogDeliveryConfigurations []*LogDeliveryConfiguration `json:"logDeliveryConfigurations,omitempty"` MemberClusters []*string `json:"memberClusters,omitempty"` MemberClustersOutpostARNs []*string `json:"memberClustersOutpostARNs,omitempty"` MultiAZ *string `json:"multiAZ,omitempty"` + NetworkType *string `json:"networkType,omitempty"` NodeGroups []*NodeGroup `json:"nodeGroups,omitempty"` - // The settings to be applied to the Redis replication group, either immediately - // or during the next maintenance window. + // The settings to be applied to the Valkey or Redis OSS replication group, + // either immediately or during the next maintenance window. PendingModifiedValues *ReplicationGroupPendingModifiedValues `json:"pendingModifiedValues,omitempty"` ReplicationGroupCreateTime *metav1.Time `json:"replicationGroupCreateTime,omitempty"` ReplicationGroupID *string `json:"replicationGroupID,omitempty"` @@ -598,7 +652,55 @@ type SecurityGroupMembership struct { Status *string `json:"status,omitempty"` } -// An update that you can apply to your Redis clusters. +// The configuration settings for a specific serverless cache. +type ServerlessCacheConfiguration struct { + Engine *string `json:"engine,omitempty"` + MajorEngineVersion *string `json:"majorEngineVersion,omitempty"` + ServerlessCacheName *string `json:"serverlessCacheName,omitempty"` +} + +// The resource representing a serverless cache snapshot. Available for Valkey, +// Redis OSS and Serverless Memcached only. +type ServerlessCacheSnapshot_SDK struct { + ARN *string `json:"arn,omitempty"` + BytesUsedForCache *string `json:"bytesUsedForCache,omitempty"` + CreateTime *metav1.Time `json:"createTime,omitempty"` + ExpiryTime *metav1.Time `json:"expiryTime,omitempty"` + KMSKeyID *string `json:"kmsKeyID,omitempty"` + // The configuration settings for a specific serverless cache. + ServerlessCacheConfiguration *ServerlessCacheConfiguration `json:"serverlessCacheConfiguration,omitempty"` + ServerlessCacheSnapshotName *string `json:"serverlessCacheSnapshotName,omitempty"` + SnapshotType *string `json:"snapshotType,omitempty"` + Status *string `json:"status,omitempty"` +} + +// The resource representing a serverless cache. +type ServerlessCache_SDK struct { + ARN *string `json:"arn,omitempty"` + // The usage limits for storage and ElastiCache Processing Units for the cache. + CacheUsageLimits *CacheUsageLimits `json:"cacheUsageLimits,omitempty"` + CreateTime *metav1.Time `json:"createTime,omitempty"` + DailySnapshotTime *string `json:"dailySnapshotTime,omitempty"` + Description *string `json:"description,omitempty"` + // Represents the information required for client programs to connect to a cache + // node. This value is read-only. + Endpoint *Endpoint `json:"endpoint,omitempty"` + Engine *string `json:"engine,omitempty"` + FullEngineVersion *string `json:"fullEngineVersion,omitempty"` + KMSKeyID *string `json:"kmsKeyID,omitempty"` + MajorEngineVersion *string `json:"majorEngineVersion,omitempty"` + // Represents the information required for client programs to connect to a cache + // node. This value is read-only. + ReaderEndpoint *Endpoint `json:"readerEndpoint,omitempty"` + SecurityGroupIDs []*string `json:"securityGroupIDs,omitempty"` + ServerlessCacheName *string `json:"serverlessCacheName,omitempty"` + SnapshotRetentionLimit *int64 `json:"snapshotRetentionLimit,omitempty"` + Status *string `json:"status,omitempty"` + SubnetIDs []*string `json:"subnetIDs,omitempty"` + UserGroupID *string `json:"userGroupID,omitempty"` +} + +// An update that you can apply to your Valkey or Redis OSS clusters. type ServiceUpdate struct { AutoUpdateAfterRecommendedApplyByDate *bool `json:"autoUpdateAfterRecommendedApplyByDate,omitempty"` Engine *string `json:"engine,omitempty"` @@ -616,8 +718,8 @@ type SlotMigration struct { ProgressPercentage *float64 `json:"progressPercentage,omitempty"` } -// Represents a copy of an entire Redis cluster as of the time when the snapshot -// was taken. +// Represents a copy of an entire Valkey or Redis OSS cluster as of the time +// when the snapshot was taken. type Snapshot_SDK struct { ARN *string `json:"arn,omitempty"` AutoMinorVersionUpgrade *bool `json:"autoMinorVersionUpgrade,omitempty"` @@ -720,6 +822,7 @@ type UserGroup_SDK struct { // Returns the updates being applied to the user group. PendingChanges *UserGroupPendingChanges `json:"pendingChanges,omitempty"` ReplicationGroups []*string `json:"replicationGroups,omitempty"` + ServerlessCaches []*string `json:"serverlessCaches,omitempty"` Status *string `json:"status,omitempty"` UserGroupID *string `json:"userGroupID,omitempty"` UserIDs []*string `json:"userIDs,omitempty"` diff --git a/apis/v1alpha1/user.go b/apis/v1alpha1/user.go index 1be62ed7..62aec14f 100644 --- a/apis/v1alpha1/user.go +++ b/apis/v1alpha1/user.go @@ -26,9 +26,13 @@ import ( type UserSpec struct { // Access permissions string used for this user. + // + // Regex Pattern: `\S` // +kubebuilder:validation:Required AccessString *string `json:"accessString"` // The current supported value is Redis. + // + // Regex Pattern: `^[a-zA-Z]*$` // +kubebuilder:validation:Required Engine *string `json:"engine"` // Indicates a password is not required for this user. @@ -40,6 +44,8 @@ type UserSpec struct { // tag key must be accompanied by a tag value, although null is accepted. Tags []*Tag `json:"tags,omitempty"` // The ID of the user. + // + // Regex Pattern: `^[a-zA-Z][a-zA-Z0-9\-]*$` // +kubebuilder:validation:Required UserID *string `json:"userID"` // The username of the user. @@ -54,7 +60,7 @@ type UserStatus struct { // constructed ARN for the resource // +kubebuilder:validation:Optional ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` - // All CRS managed by ACK have a common `Status.Conditions` member that + // All CRs managed by ACK have a common `Status.Conditions` member that // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource @@ -69,7 +75,7 @@ type UserStatus struct { // Access permissions string used for this user. // +kubebuilder:validation:Optional LastRequestedAccessString *string `json:"lastRequestedAccessString,omitempty"` - // The minimum engine version required, which is Redis 6.0 + // The minimum engine version required, which is Redis OSS 6.0 // +kubebuilder:validation:Optional MinimumEngineVersion *string `json:"minimumEngineVersion,omitempty"` // Indicates the user status. Can be "active", "modifying" or "deleting". diff --git a/apis/v1alpha1/user_group.go b/apis/v1alpha1/user_group.go index 1cd3b17f..61a9f667 100644 --- a/apis/v1alpha1/user_group.go +++ b/apis/v1alpha1/user_group.go @@ -25,11 +25,14 @@ import ( type UserGroupSpec struct { - // The current supported value is Redis. + // The current supported value is Redis user. + // + // Regex Pattern: `^[a-zA-Z]*$` // +kubebuilder:validation:Required Engine *string `json:"engine"` // A list of tags to be added to this resource. A tag is a key-value pair. A - // tag key must be accompanied by a tag value, although null is accepted. + // tag key must be accompanied by a tag value, although null is accepted. Available + // for Valkey and Redis OSS only. Tags []*Tag `json:"tags,omitempty"` // The ID of the user group. // +kubebuilder:validation:Required @@ -45,13 +48,13 @@ type UserGroupStatus struct { // constructed ARN for the resource // +kubebuilder:validation:Optional ACKResourceMetadata *ackv1alpha1.ResourceMetadata `json:"ackResourceMetadata"` - // All CRS managed by ACK have a common `Status.Conditions` member that + // All CRs managed by ACK have a common `Status.Conditions` member that // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource // +kubebuilder:validation:Optional Conditions []*ackv1alpha1.Condition `json:"conditions"` - // The minimum engine version required, which is Redis 6.0 + // The minimum engine version required, which is Redis OSS 6.0 // +kubebuilder:validation:Optional MinimumEngineVersion *string `json:"minimumEngineVersion,omitempty"` // A list of updates being applied to the user group. diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index bbe5ee1e..f4b04d0d 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // @@ -50,6 +49,32 @@ func (in *Authentication) DeepCopy() *Authentication { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationMode) DeepCopyInto(out *AuthenticationMode) { + *out = *in + if in.Passwords != nil { + in, out := &in.Passwords, &out.Passwords + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationMode. +func (in *AuthenticationMode) DeepCopy() *AuthenticationMode { + if in == nil { + return nil + } + out := new(AuthenticationMode) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) { *out = *in @@ -73,62 +98,119 @@ func (in *AvailabilityZone) DeepCopy() *AvailabilityZone { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = *in - if in.ARN != nil { - in, out := &in.ARN, &out.ARN - *out = new(string) - **out = **in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheCluster. +func (in *CacheCluster) DeepCopy() *CacheCluster { + if in == nil { + return nil } - if in.AtRestEncryptionEnabled != nil { - in, out := &in.AtRestEncryptionEnabled, &out.AtRestEncryptionEnabled - *out = new(bool) - **out = **in + out := new(CacheCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CacheCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c } - if in.AuthTokenEnabled != nil { - in, out := &in.AuthTokenEnabled, &out.AuthTokenEnabled - *out = new(bool) + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheClusterList) DeepCopyInto(out *CacheClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CacheCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheClusterList. +func (in *CacheClusterList) DeepCopy() *CacheClusterList { + if in == nil { + return nil + } + out := new(CacheClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CacheClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheClusterSpec) DeepCopyInto(out *CacheClusterSpec) { + *out = *in + if in.AZMode != nil { + in, out := &in.AZMode, &out.AZMode + *out = new(string) **out = **in } - if in.AuthTokenLastModifiedDate != nil { - in, out := &in.AuthTokenLastModifiedDate, &out.AuthTokenLastModifiedDate - *out = (*in).DeepCopy() + if in.AuthToken != nil { + in, out := &in.AuthToken, &out.AuthToken + *out = new(corev1alpha1.SecretKeyReference) + **out = **in } if in.AutoMinorVersionUpgrade != nil { in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade *out = new(bool) **out = **in } - if in.CacheClusterCreateTime != nil { - in, out := &in.CacheClusterCreateTime, &out.CacheClusterCreateTime - *out = (*in).DeepCopy() - } if in.CacheClusterID != nil { in, out := &in.CacheClusterID, &out.CacheClusterID *out = new(string) **out = **in } - if in.CacheClusterStatus != nil { - in, out := &in.CacheClusterStatus, &out.CacheClusterStatus - *out = new(string) - **out = **in - } if in.CacheNodeType != nil { in, out := &in.CacheNodeType, &out.CacheNodeType *out = new(string) **out = **in } - if in.CacheSubnetGroupName != nil { - in, out := &in.CacheSubnetGroupName, &out.CacheSubnetGroupName + if in.CacheParameterGroupName != nil { + in, out := &in.CacheParameterGroupName, &out.CacheParameterGroupName *out = new(string) **out = **in } - if in.ClientDownloadLandingPage != nil { - in, out := &in.ClientDownloadLandingPage, &out.ClientDownloadLandingPage + if in.CacheParameterGroupRef != nil { + in, out := &in.CacheParameterGroupRef, &out.CacheParameterGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.CacheSecurityGroupNames != nil { + in, out := &in.CacheSecurityGroupNames, &out.CacheSecurityGroupNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheSubnetGroupName != nil { + in, out := &in.CacheSubnetGroupName, &out.CacheSubnetGroupName *out = new(string) **out = **in } - if in.ConfigurationEndpoint != nil { - in, out := &in.ConfigurationEndpoint, &out.ConfigurationEndpoint - *out = new(Endpoint) + if in.CacheSubnetGroupRef != nil { + in, out := &in.CacheSubnetGroupRef, &out.CacheSubnetGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) (*in).DeepCopyInto(*out) } if in.Engine != nil { @@ -141,27 +223,68 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = new(string) **out = **in } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } if in.LogDeliveryConfigurations != nil { in, out := &in.LogDeliveryConfigurations, &out.LogDeliveryConfigurations - *out = make([]*LogDeliveryConfiguration, len(*in)) + *out = make([]*LogDeliveryConfigurationRequest, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(LogDeliveryConfiguration) + *out = new(LogDeliveryConfigurationRequest) (*in).DeepCopyInto(*out) } } } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.NotificationTopicARN != nil { + in, out := &in.NotificationTopicARN, &out.NotificationTopicARN + *out = new(string) + **out = **in + } + if in.NotificationTopicRef != nil { + in, out := &in.NotificationTopicRef, &out.NotificationTopicRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } if in.NumCacheNodes != nil { in, out := &in.NumCacheNodes, &out.NumCacheNodes *out = new(int64) **out = **in } + if in.OutpostMode != nil { + in, out := &in.OutpostMode, &out.OutpostMode + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int64) + **out = **in + } if in.PreferredAvailabilityZone != nil { in, out := &in.PreferredAvailabilityZone, &out.PreferredAvailabilityZone *out = new(string) **out = **in } + if in.PreferredAvailabilityZones != nil { + in, out := &in.PreferredAvailabilityZones, &out.PreferredAvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.PreferredMaintenanceWindow != nil { in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow *out = new(string) @@ -172,16 +295,70 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = new(string) **out = **in } + if in.PreferredOutpostARNs != nil { + in, out := &in.PreferredOutpostARNs, &out.PreferredOutpostARNs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.ReplicationGroupID != nil { in, out := &in.ReplicationGroupID, &out.ReplicationGroupID *out = new(string) **out = **in } - if in.ReplicationGroupLogDeliveryEnabled != nil { - in, out := &in.ReplicationGroupLogDeliveryEnabled, &out.ReplicationGroupLogDeliveryEnabled - *out = new(bool) + if in.ReplicationGroupRef != nil { + in, out := &in.ReplicationGroupRef, &out.ReplicationGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]*corev1alpha1.AWSResourceReferenceWrapper, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + } + } + if in.SnapshotARNs != nil { + in, out := &in.SnapshotARNs, &out.SnapshotARNs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnapshotName != nil { + in, out := &in.SnapshotName, &out.SnapshotName + *out = new(string) **out = **in } + if in.SnapshotRef != nil { + in, out := &in.SnapshotRef, &out.SnapshotRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } if in.SnapshotRetentionLimit != nil { in, out := &in.SnapshotRetentionLimit, &out.SnapshotRetentionLimit *out = new(int64) @@ -192,6 +369,17 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { *out = new(string) **out = **in } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } if in.TransitEncryptionEnabled != nil { in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled *out = new(bool) @@ -199,37 +387,350 @@ func (in *CacheCluster) DeepCopyInto(out *CacheCluster) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheCluster. -func (in *CacheCluster) DeepCopy() *CacheCluster { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheClusterSpec. +func (in *CacheClusterSpec) DeepCopy() *CacheClusterSpec { if in == nil { return nil } - out := new(CacheCluster) + out := new(CacheClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CacheEngineVersion) DeepCopyInto(out *CacheEngineVersion) { +func (in *CacheClusterStatus) DeepCopyInto(out *CacheClusterStatus) { *out = *in - if in.CacheEngineDescription != nil { - in, out := &in.CacheEngineDescription, &out.CacheEngineDescription - *out = new(string) - **out = **in + if in.ACKResourceMetadata != nil { + in, out := &in.ACKResourceMetadata, &out.ACKResourceMetadata + *out = new(corev1alpha1.ResourceMetadata) + (*in).DeepCopyInto(*out) } - if in.CacheEngineVersionDescription != nil { - in, out := &in.CacheEngineVersionDescription, &out.CacheEngineVersionDescription - *out = new(string) - **out = **in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]*corev1alpha1.Condition, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.Condition) + (*in).DeepCopyInto(*out) + } + } } - if in.CacheParameterGroupFamily != nil { - in, out := &in.CacheParameterGroupFamily, &out.CacheParameterGroupFamily - *out = new(string) + if in.AtRestEncryptionEnabled != nil { + in, out := &in.AtRestEncryptionEnabled, &out.AtRestEncryptionEnabled + *out = new(bool) **out = **in } - if in.Engine != nil { - in, out := &in.Engine, &out.Engine - *out = new(string) + if in.AuthTokenEnabled != nil { + in, out := &in.AuthTokenEnabled, &out.AuthTokenEnabled + *out = new(bool) + **out = **in + } + if in.AuthTokenLastModifiedDate != nil { + in, out := &in.AuthTokenLastModifiedDate, &out.AuthTokenLastModifiedDate + *out = (*in).DeepCopy() + } + if in.CacheClusterCreateTime != nil { + in, out := &in.CacheClusterCreateTime, &out.CacheClusterCreateTime + *out = (*in).DeepCopy() + } + if in.CacheClusterStatus != nil { + in, out := &in.CacheClusterStatus, &out.CacheClusterStatus + *out = new(string) + **out = **in + } + if in.CacheNodes != nil { + in, out := &in.CacheNodes, &out.CacheNodes + *out = make([]*CacheNode, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheNode) + (*in).DeepCopyInto(*out) + } + } + } + if in.CacheParameterGroup != nil { + in, out := &in.CacheParameterGroup, &out.CacheParameterGroup + *out = new(CacheParameterGroupStatus_SDK) + (*in).DeepCopyInto(*out) + } + if in.CacheSecurityGroups != nil { + in, out := &in.CacheSecurityGroups, &out.CacheSecurityGroups + *out = make([]*CacheSecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheSecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } + if in.ClientDownloadLandingPage != nil { + in, out := &in.ClientDownloadLandingPage, &out.ClientDownloadLandingPage + *out = new(string) + **out = **in + } + if in.ConfigurationEndpoint != nil { + in, out := &in.ConfigurationEndpoint, &out.ConfigurationEndpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + if in.NotificationConfiguration != nil { + in, out := &in.NotificationConfiguration, &out.NotificationConfiguration + *out = new(NotificationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.PendingModifiedValues != nil { + in, out := &in.PendingModifiedValues, &out.PendingModifiedValues + *out = new(PendingModifiedValues) + (*in).DeepCopyInto(*out) + } + if in.ReplicationGroupLogDeliveryEnabled != nil { + in, out := &in.ReplicationGroupLogDeliveryEnabled, &out.ReplicationGroupLogDeliveryEnabled + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*SecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheClusterStatus. +func (in *CacheClusterStatus) DeepCopy() *CacheClusterStatus { + if in == nil { + return nil + } + out := new(CacheClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheCluster_SDK) DeepCopyInto(out *CacheCluster_SDK) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.AtRestEncryptionEnabled != nil { + in, out := &in.AtRestEncryptionEnabled, &out.AtRestEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.AuthTokenEnabled != nil { + in, out := &in.AuthTokenEnabled, &out.AuthTokenEnabled + *out = new(bool) + **out = **in + } + if in.AuthTokenLastModifiedDate != nil { + in, out := &in.AuthTokenLastModifiedDate, &out.AuthTokenLastModifiedDate + *out = (*in).DeepCopy() + } + if in.AutoMinorVersionUpgrade != nil { + in, out := &in.AutoMinorVersionUpgrade, &out.AutoMinorVersionUpgrade + *out = new(bool) + **out = **in + } + if in.CacheClusterCreateTime != nil { + in, out := &in.CacheClusterCreateTime, &out.CacheClusterCreateTime + *out = (*in).DeepCopy() + } + if in.CacheClusterID != nil { + in, out := &in.CacheClusterID, &out.CacheClusterID + *out = new(string) + **out = **in + } + if in.CacheClusterStatus != nil { + in, out := &in.CacheClusterStatus, &out.CacheClusterStatus + *out = new(string) + **out = **in + } + if in.CacheNodeType != nil { + in, out := &in.CacheNodeType, &out.CacheNodeType + *out = new(string) + **out = **in + } + if in.CacheNodes != nil { + in, out := &in.CacheNodes, &out.CacheNodes + *out = make([]*CacheNode, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheNode) + (*in).DeepCopyInto(*out) + } + } + } + if in.CacheParameterGroup != nil { + in, out := &in.CacheParameterGroup, &out.CacheParameterGroup + *out = new(CacheParameterGroupStatus_SDK) + (*in).DeepCopyInto(*out) + } + if in.CacheSecurityGroups != nil { + in, out := &in.CacheSecurityGroups, &out.CacheSecurityGroups + *out = make([]*CacheSecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CacheSecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } + if in.CacheSubnetGroupName != nil { + in, out := &in.CacheSubnetGroupName, &out.CacheSubnetGroupName + *out = new(string) + **out = **in + } + if in.ClientDownloadLandingPage != nil { + in, out := &in.ClientDownloadLandingPage, &out.ClientDownloadLandingPage + *out = new(string) + **out = **in + } + if in.ConfigurationEndpoint != nil { + in, out := &in.ConfigurationEndpoint, &out.ConfigurationEndpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.EngineVersion != nil { + in, out := &in.EngineVersion, &out.EngineVersion + *out = new(string) + **out = **in + } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } + if in.NotificationConfiguration != nil { + in, out := &in.NotificationConfiguration, &out.NotificationConfiguration + *out = new(NotificationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.NumCacheNodes != nil { + in, out := &in.NumCacheNodes, &out.NumCacheNodes + *out = new(int64) + **out = **in + } + if in.PendingModifiedValues != nil { + in, out := &in.PendingModifiedValues, &out.PendingModifiedValues + *out = new(PendingModifiedValues) + (*in).DeepCopyInto(*out) + } + if in.PreferredAvailabilityZone != nil { + in, out := &in.PreferredAvailabilityZone, &out.PreferredAvailabilityZone + *out = new(string) + **out = **in + } + if in.PreferredMaintenanceWindow != nil { + in, out := &in.PreferredMaintenanceWindow, &out.PreferredMaintenanceWindow + *out = new(string) + **out = **in + } + if in.PreferredOutpostARN != nil { + in, out := &in.PreferredOutpostARN, &out.PreferredOutpostARN + *out = new(string) + **out = **in + } + if in.ReplicationGroupID != nil { + in, out := &in.ReplicationGroupID, &out.ReplicationGroupID + *out = new(string) + **out = **in + } + if in.ReplicationGroupLogDeliveryEnabled != nil { + in, out := &in.ReplicationGroupLogDeliveryEnabled, &out.ReplicationGroupLogDeliveryEnabled + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]*SecurityGroupMembership, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(SecurityGroupMembership) + (*in).DeepCopyInto(*out) + } + } + } + if in.SnapshotRetentionLimit != nil { + in, out := &in.SnapshotRetentionLimit, &out.SnapshotRetentionLimit + *out = new(int64) + **out = **in + } + if in.SnapshotWindow != nil { + in, out := &in.SnapshotWindow, &out.SnapshotWindow + *out = new(string) + **out = **in + } + if in.TransitEncryptionEnabled != nil { + in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheCluster_SDK. +func (in *CacheCluster_SDK) DeepCopy() *CacheCluster_SDK { + if in == nil { + return nil + } + out := new(CacheCluster_SDK) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheEngineVersion) DeepCopyInto(out *CacheEngineVersion) { + *out = *in + if in.CacheEngineDescription != nil { + in, out := &in.CacheEngineDescription, &out.CacheEngineDescription + *out = new(string) + **out = **in + } + if in.CacheEngineVersionDescription != nil { + in, out := &in.CacheEngineVersionDescription, &out.CacheEngineVersionDescription + *out = new(string) + **out = **in + } + if in.CacheParameterGroupFamily != nil { + in, out := &in.CacheParameterGroupFamily, &out.CacheParameterGroupFamily + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) **out = **in } if in.EngineVersion != nil { @@ -595,8 +1096,19 @@ func (in *CacheParameterGroupStatus) DeepCopy() *CacheParameterGroupStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheParameterGroupStatus_SDK) DeepCopyInto(out *CacheParameterGroupStatus_SDK) { *out = *in - if in.CacheParameterGroupName != nil { - in, out := &in.CacheParameterGroupName, &out.CacheParameterGroupName + if in.CacheNodeIDsToReboot != nil { + in, out := &in.CacheNodeIDsToReboot, &out.CacheNodeIDsToReboot + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.CacheParameterGroupName != nil { + in, out := &in.CacheParameterGroupName, &out.CacheParameterGroupName *out = new(string) **out = **in } @@ -800,6 +1312,17 @@ func (in *CacheSubnetGroupSpec) DeepCopyInto(out *CacheSubnetGroupSpec) { } } } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]*corev1alpha1.AWSResourceReferenceWrapper, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) @@ -927,6 +1450,31 @@ func (in *CacheSubnetGroup_SDK) DeepCopy() *CacheSubnetGroup_SDK { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheUsageLimits) DeepCopyInto(out *CacheUsageLimits) { + *out = *in + if in.DataStorage != nil { + in, out := &in.DataStorage, &out.DataStorage + *out = new(DataStorage) + (*in).DeepCopyInto(*out) + } + if in.ECPUPerSecond != nil { + in, out := &in.ECPUPerSecond, &out.ECPUPerSecond + *out = new(ECPUPerSecond) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheUsageLimits. +func (in *CacheUsageLimits) DeepCopy() *CacheUsageLimits { + if in == nil { + return nil + } + out := new(CacheUsageLimits) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloudWatchLogsDestinationDetails) DeepCopyInto(out *CloudWatchLogsDestinationDetails) { *out = *in @@ -960,6 +1508,28 @@ func (in *ConfigureShard) DeepCopyInto(out *ConfigureShard) { *out = new(string) **out = **in } + if in.PreferredAvailabilityZones != nil { + in, out := &in.PreferredAvailabilityZones, &out.PreferredAvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PreferredOutpostARNs != nil { + in, out := &in.PreferredOutpostARNs, &out.PreferredOutpostARNs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigureShard. @@ -997,6 +1567,36 @@ func (in *CustomerNodeEndpoint) DeepCopy() *CustomerNodeEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStorage) DeepCopyInto(out *DataStorage) { + *out = *in + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(int64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(int64) + **out = **in + } + if in.Unit != nil { + in, out := &in.Unit, &out.Unit + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStorage. +func (in *DataStorage) DeepCopy() *DataStorage { + if in == nil { + return nil + } + out := new(DataStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DestinationDetails) DeepCopyInto(out *DestinationDetails) { *out = *in @@ -1052,6 +1652,31 @@ func (in *EC2SecurityGroup) DeepCopy() *EC2SecurityGroup { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ECPUPerSecond) DeepCopyInto(out *ECPUPerSecond) { + *out = *in + if in.Maximum != nil { + in, out := &in.Maximum, &out.Maximum + *out = new(int64) + **out = **in + } + if in.Minimum != nil { + in, out := &in.Minimum, &out.Minimum + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ECPUPerSecond. +func (in *ECPUPerSecond) DeepCopy() *ECPUPerSecond { + if in == nil { + return nil + } + out := new(ECPUPerSecond) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = *in @@ -1867,6 +2492,17 @@ func (in *PendingModifiedValues) DeepCopyInto(out *PendingModifiedValues) { *out = new(string) **out = **in } + if in.CacheNodeIDsToRemove != nil { + in, out := &in.CacheNodeIDsToRemove, &out.CacheNodeIDsToRemove + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.CacheNodeType != nil { in, out := &in.CacheNodeType, &out.CacheNodeType *out = new(string) @@ -1877,22 +2513,21 @@ func (in *PendingModifiedValues) DeepCopyInto(out *PendingModifiedValues) { *out = new(string) **out = **in } - if in.LogDeliveryConfigurations != nil { - in, out := &in.LogDeliveryConfigurations, &out.LogDeliveryConfigurations - *out = make([]*PendingLogDeliveryConfiguration, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PendingLogDeliveryConfiguration) - (*in).DeepCopyInto(*out) - } - } - } if in.NumCacheNodes != nil { in, out := &in.NumCacheNodes, &out.NumCacheNodes *out = new(int64) **out = **in } + if in.TransitEncryptionEnabled != nil { + in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.TransitEncryptionMode != nil { + in, out := &in.TransitEncryptionMode, &out.TransitEncryptionMode + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PendingModifiedValues. @@ -2123,6 +2758,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(string) **out = **in } + if in.CacheParameterGroupRef != nil { + in, out := &in.CacheParameterGroupRef, &out.CacheParameterGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } if in.CacheSecurityGroupNames != nil { in, out := &in.CacheSecurityGroupNames, &out.CacheSecurityGroupNames *out = make([]*string, len(*in)) @@ -2139,6 +2779,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(string) **out = **in } + if in.CacheSubnetGroupRef != nil { + in, out := &in.CacheSubnetGroupRef, &out.CacheSubnetGroupRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } if in.DataTieringEnabled != nil { in, out := &in.DataTieringEnabled, &out.DataTieringEnabled *out = new(bool) @@ -2159,6 +2804,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(string) **out = **in } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } if in.KMSKeyID != nil { in, out := &in.KMSKeyID, &out.KMSKeyID *out = new(string) @@ -2180,6 +2830,11 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { *out = new(bool) **out = **in } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } if in.NodeGroupConfiguration != nil { in, out := &in.NodeGroupConfiguration, &out.NodeGroupConfiguration *out = make([]*NodeGroupConfiguration, len(*in)) @@ -2248,6 +2903,17 @@ func (in *ReplicationGroupSpec) DeepCopyInto(out *ReplicationGroupSpec) { } } } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]*corev1alpha1.AWSResourceReferenceWrapper, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + } + } if in.SnapshotARNs != nil { in, out := &in.SnapshotARNs, &out.SnapshotARNs *out = make([]*string, len(*in)) @@ -2541,11 +3207,21 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(string) **out = **in } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } if in.GlobalReplicationGroupInfo != nil { in, out := &in.GlobalReplicationGroupInfo, &out.GlobalReplicationGroupInfo *out = new(GlobalReplicationGroupInfo) (*in).DeepCopyInto(*out) } + if in.IPDiscovery != nil { + in, out := &in.IPDiscovery, &out.IPDiscovery + *out = new(string) + **out = **in + } if in.KMSKeyID != nil { in, out := &in.KMSKeyID, &out.KMSKeyID *out = new(string) @@ -2589,6 +3265,11 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(string) **out = **in } + if in.NetworkType != nil { + in, out := &in.NetworkType, &out.NetworkType + *out = new(string) + **out = **in + } if in.NodeGroups != nil { in, out := &in.NodeGroups, &out.NodeGroups *out = make([]*NodeGroup, len(*in)) @@ -2634,13 +3315,628 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { *out = new(string) **out = **in } - if in.TransitEncryptionEnabled != nil { - in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled - *out = new(bool) + if in.TransitEncryptionEnabled != nil { + in, out := &in.TransitEncryptionEnabled, &out.TransitEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.UserGroupIDs != nil { + in, out := &in.UserGroupIDs, &out.UserGroupIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationGroup_SDK. +func (in *ReplicationGroup_SDK) DeepCopy() *ReplicationGroup_SDK { + if in == nil { + return nil + } + out := new(ReplicationGroup_SDK) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReservedCacheNode) DeepCopyInto(out *ReservedCacheNode) { + *out = *in + if in.CacheNodeCount != nil { + in, out := &in.CacheNodeCount, &out.CacheNodeCount + *out = new(int64) + **out = **in + } + if in.CacheNodeType != nil { + in, out := &in.CacheNodeType, &out.CacheNodeType + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(int64) + **out = **in + } + if in.FixedPrice != nil { + in, out := &in.FixedPrice, &out.FixedPrice + *out = new(float64) + **out = **in + } + if in.OfferingType != nil { + in, out := &in.OfferingType, &out.OfferingType + *out = new(string) + **out = **in + } + if in.ProductDescription != nil { + in, out := &in.ProductDescription, &out.ProductDescription + *out = new(string) + **out = **in + } + if in.ReservationARN != nil { + in, out := &in.ReservationARN, &out.ReservationARN + *out = new(string) + **out = **in + } + if in.ReservedCacheNodeID != nil { + in, out := &in.ReservedCacheNodeID, &out.ReservedCacheNodeID + *out = new(string) + **out = **in + } + if in.ReservedCacheNodesOfferingID != nil { + in, out := &in.ReservedCacheNodesOfferingID, &out.ReservedCacheNodesOfferingID + *out = new(string) + **out = **in + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.UsagePrice != nil { + in, out := &in.UsagePrice, &out.UsagePrice + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservedCacheNode. +func (in *ReservedCacheNode) DeepCopy() *ReservedCacheNode { + if in == nil { + return nil + } + out := new(ReservedCacheNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReservedCacheNodesOffering) DeepCopyInto(out *ReservedCacheNodesOffering) { + *out = *in + if in.CacheNodeType != nil { + in, out := &in.CacheNodeType, &out.CacheNodeType + *out = new(string) + **out = **in + } + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(int64) + **out = **in + } + if in.FixedPrice != nil { + in, out := &in.FixedPrice, &out.FixedPrice + *out = new(float64) + **out = **in + } + if in.OfferingType != nil { + in, out := &in.OfferingType, &out.OfferingType + *out = new(string) + **out = **in + } + if in.ProductDescription != nil { + in, out := &in.ProductDescription, &out.ProductDescription + *out = new(string) + **out = **in + } + if in.ReservedCacheNodesOfferingID != nil { + in, out := &in.ReservedCacheNodesOfferingID, &out.ReservedCacheNodesOfferingID + *out = new(string) + **out = **in + } + if in.UsagePrice != nil { + in, out := &in.UsagePrice, &out.UsagePrice + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservedCacheNodesOffering. +func (in *ReservedCacheNodesOffering) DeepCopy() *ReservedCacheNodesOffering { + if in == nil { + return nil + } + out := new(ReservedCacheNodesOffering) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReshardingConfiguration) DeepCopyInto(out *ReshardingConfiguration) { + *out = *in + if in.NodeGroupID != nil { + in, out := &in.NodeGroupID, &out.NodeGroupID + *out = new(string) + **out = **in + } + if in.PreferredAvailabilityZones != nil { + in, out := &in.PreferredAvailabilityZones, &out.PreferredAvailabilityZones + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReshardingConfiguration. +func (in *ReshardingConfiguration) DeepCopy() *ReshardingConfiguration { + if in == nil { + return nil + } + out := new(ReshardingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReshardingStatus) DeepCopyInto(out *ReshardingStatus) { + *out = *in + if in.SlotMigration != nil { + in, out := &in.SlotMigration, &out.SlotMigration + *out = new(SlotMigration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReshardingStatus. +func (in *ReshardingStatus) DeepCopy() *ReshardingStatus { + if in == nil { + return nil + } + out := new(ReshardingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupMembership) DeepCopyInto(out *SecurityGroupMembership) { + *out = *in + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupMembership. +func (in *SecurityGroupMembership) DeepCopy() *SecurityGroupMembership { + if in == nil { + return nil + } + out := new(SecurityGroupMembership) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCache) DeepCopyInto(out *ServerlessCache) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCache. +func (in *ServerlessCache) DeepCopy() *ServerlessCache { + if in == nil { + return nil + } + out := new(ServerlessCache) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerlessCache) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheConfiguration) DeepCopyInto(out *ServerlessCacheConfiguration) { + *out = *in + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.MajorEngineVersion != nil { + in, out := &in.MajorEngineVersion, &out.MajorEngineVersion + *out = new(string) + **out = **in + } + if in.ServerlessCacheName != nil { + in, out := &in.ServerlessCacheName, &out.ServerlessCacheName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheConfiguration. +func (in *ServerlessCacheConfiguration) DeepCopy() *ServerlessCacheConfiguration { + if in == nil { + return nil + } + out := new(ServerlessCacheConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheList) DeepCopyInto(out *ServerlessCacheList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServerlessCache, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheList. +func (in *ServerlessCacheList) DeepCopy() *ServerlessCacheList { + if in == nil { + return nil + } + out := new(ServerlessCacheList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerlessCacheList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheSnapshot) DeepCopyInto(out *ServerlessCacheSnapshot) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheSnapshot. +func (in *ServerlessCacheSnapshot) DeepCopy() *ServerlessCacheSnapshot { + if in == nil { + return nil + } + out := new(ServerlessCacheSnapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerlessCacheSnapshot) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheSnapshotList) DeepCopyInto(out *ServerlessCacheSnapshotList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServerlessCacheSnapshot, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheSnapshotList. +func (in *ServerlessCacheSnapshotList) DeepCopy() *ServerlessCacheSnapshotList { + if in == nil { + return nil + } + out := new(ServerlessCacheSnapshotList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerlessCacheSnapshotList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheSnapshotSpec) DeepCopyInto(out *ServerlessCacheSnapshotSpec) { + *out = *in + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.KMSKeyRef != nil { + in, out := &in.KMSKeyRef, &out.KMSKeyRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.ServerlessCacheName != nil { + in, out := &in.ServerlessCacheName, &out.ServerlessCacheName + *out = new(string) + **out = **in + } + if in.ServerlessCacheRef != nil { + in, out := &in.ServerlessCacheRef, &out.ServerlessCacheRef + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + if in.ServerlessCacheSnapshotName != nil { + in, out := &in.ServerlessCacheSnapshotName, &out.ServerlessCacheSnapshotName + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheSnapshotSpec. +func (in *ServerlessCacheSnapshotSpec) DeepCopy() *ServerlessCacheSnapshotSpec { + if in == nil { + return nil + } + out := new(ServerlessCacheSnapshotSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheSnapshotStatus) DeepCopyInto(out *ServerlessCacheSnapshotStatus) { + *out = *in + if in.ACKResourceMetadata != nil { + in, out := &in.ACKResourceMetadata, &out.ACKResourceMetadata + *out = new(corev1alpha1.ResourceMetadata) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]*corev1alpha1.Condition, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.Condition) + (*in).DeepCopyInto(*out) + } + } + } + if in.BytesUsedForCache != nil { + in, out := &in.BytesUsedForCache, &out.BytesUsedForCache + *out = new(string) + **out = **in + } + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = (*in).DeepCopy() + } + if in.ExpiryTime != nil { + in, out := &in.ExpiryTime, &out.ExpiryTime + *out = (*in).DeepCopy() + } + if in.ServerlessCacheConfiguration != nil { + in, out := &in.ServerlessCacheConfiguration, &out.ServerlessCacheConfiguration + *out = new(ServerlessCacheConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SnapshotType != nil { + in, out := &in.SnapshotType, &out.SnapshotType + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheSnapshotStatus. +func (in *ServerlessCacheSnapshotStatus) DeepCopy() *ServerlessCacheSnapshotStatus { + if in == nil { + return nil + } + out := new(ServerlessCacheSnapshotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheSnapshot_SDK) DeepCopyInto(out *ServerlessCacheSnapshot_SDK) { + *out = *in + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.BytesUsedForCache != nil { + in, out := &in.BytesUsedForCache, &out.BytesUsedForCache + *out = new(string) + **out = **in + } + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = (*in).DeepCopy() + } + if in.ExpiryTime != nil { + in, out := &in.ExpiryTime, &out.ExpiryTime + *out = (*in).DeepCopy() + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.ServerlessCacheConfiguration != nil { + in, out := &in.ServerlessCacheConfiguration, &out.ServerlessCacheConfiguration + *out = new(ServerlessCacheConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ServerlessCacheSnapshotName != nil { + in, out := &in.ServerlessCacheSnapshotName, &out.ServerlessCacheSnapshotName + *out = new(string) + **out = **in + } + if in.SnapshotType != nil { + in, out := &in.SnapshotType, &out.SnapshotType + *out = new(string) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheSnapshot_SDK. +func (in *ServerlessCacheSnapshot_SDK) DeepCopy() *ServerlessCacheSnapshot_SDK { + if in == nil { + return nil + } + out := new(ServerlessCacheSnapshot_SDK) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerlessCacheSpec) DeepCopyInto(out *ServerlessCacheSpec) { + *out = *in + if in.CacheUsageLimits != nil { + in, out := &in.CacheUsageLimits, &out.CacheUsageLimits + *out = new(CacheUsageLimits) + (*in).DeepCopyInto(*out) + } + if in.DailySnapshotTime != nil { + in, out := &in.DailySnapshotTime, &out.DailySnapshotTime + *out = new(string) + **out = **in + } + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine + *out = new(string) + **out = **in + } + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in + } + if in.MajorEngineVersion != nil { + in, out := &in.MajorEngineVersion, &out.MajorEngineVersion + *out = new(string) + **out = **in + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SecurityGroupRefs != nil { + in, out := &in.SecurityGroupRefs, &out.SecurityGroupRefs + *out = make([]*corev1alpha1.AWSResourceReferenceWrapper, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + } + } + if in.ServerlessCacheName != nil { + in, out := &in.ServerlessCacheName, &out.ServerlessCacheName + *out = new(string) + **out = **in + } + if in.SnapshotARNsToRestore != nil { + in, out := &in.SnapshotARNsToRestore, &out.SnapshotARNsToRestore + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.SnapshotRetentionLimit != nil { + in, out := &in.SnapshotRetentionLimit, &out.SnapshotRetentionLimit + *out = new(int64) **out = **in } - if in.UserGroupIDs != nil { - in, out := &in.UserGroupIDs, &out.UserGroupIDs + if in.SubnetIDs != nil { + in, out := &in.SubnetIDs, &out.SubnetIDs *out = make([]*string, len(*in)) for i := range *in { if (*in)[i] != nil { @@ -2650,152 +3946,159 @@ func (in *ReplicationGroup_SDK) DeepCopyInto(out *ReplicationGroup_SDK) { } } } + if in.SubnetRefs != nil { + in, out := &in.SubnetRefs, &out.SubnetRefs + *out = make([]*corev1alpha1.AWSResourceReferenceWrapper, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.AWSResourceReferenceWrapper) + (*in).DeepCopyInto(*out) + } + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*Tag, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Tag) + (*in).DeepCopyInto(*out) + } + } + } + if in.UserGroupID != nil { + in, out := &in.UserGroupID, &out.UserGroupID + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationGroup_SDK. -func (in *ReplicationGroup_SDK) DeepCopy() *ReplicationGroup_SDK { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheSpec. +func (in *ServerlessCacheSpec) DeepCopy() *ServerlessCacheSpec { if in == nil { return nil } - out := new(ReplicationGroup_SDK) + out := new(ServerlessCacheSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReservedCacheNode) DeepCopyInto(out *ReservedCacheNode) { +func (in *ServerlessCacheStatus) DeepCopyInto(out *ServerlessCacheStatus) { *out = *in - if in.CacheNodeCount != nil { - in, out := &in.CacheNodeCount, &out.CacheNodeCount - *out = new(int64) - **out = **in - } - if in.CacheNodeType != nil { - in, out := &in.CacheNodeType, &out.CacheNodeType - *out = new(string) - **out = **in - } - if in.Duration != nil { - in, out := &in.Duration, &out.Duration - *out = new(int64) - **out = **in - } - if in.FixedPrice != nil { - in, out := &in.FixedPrice, &out.FixedPrice - *out = new(float64) - **out = **in - } - if in.OfferingType != nil { - in, out := &in.OfferingType, &out.OfferingType - *out = new(string) - **out = **in + if in.ACKResourceMetadata != nil { + in, out := &in.ACKResourceMetadata, &out.ACKResourceMetadata + *out = new(corev1alpha1.ResourceMetadata) + (*in).DeepCopyInto(*out) } - if in.ProductDescription != nil { - in, out := &in.ProductDescription, &out.ProductDescription - *out = new(string) - **out = **in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]*corev1alpha1.Condition, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(corev1alpha1.Condition) + (*in).DeepCopyInto(*out) + } + } } - if in.ReservationARN != nil { - in, out := &in.ReservationARN, &out.ReservationARN - *out = new(string) - **out = **in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = (*in).DeepCopy() } - if in.ReservedCacheNodeID != nil { - in, out := &in.ReservedCacheNodeID, &out.ReservedCacheNodeID - *out = new(string) - **out = **in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) } - if in.ReservedCacheNodesOfferingID != nil { - in, out := &in.ReservedCacheNodesOfferingID, &out.ReservedCacheNodesOfferingID + if in.FullEngineVersion != nil { + in, out := &in.FullEngineVersion, &out.FullEngineVersion *out = new(string) **out = **in } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() + if in.ReaderEndpoint != nil { + in, out := &in.ReaderEndpoint, &out.ReaderEndpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) } - if in.State != nil { - in, out := &in.State, &out.State + if in.Status != nil { + in, out := &in.Status, &out.Status *out = new(string) **out = **in } - if in.UsagePrice != nil { - in, out := &in.UsagePrice, &out.UsagePrice - *out = new(float64) - **out = **in - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservedCacheNode. -func (in *ReservedCacheNode) DeepCopy() *ReservedCacheNode { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCacheStatus. +func (in *ServerlessCacheStatus) DeepCopy() *ServerlessCacheStatus { if in == nil { return nil } - out := new(ReservedCacheNode) + out := new(ServerlessCacheStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReservedCacheNodesOffering) DeepCopyInto(out *ReservedCacheNodesOffering) { +func (in *ServerlessCache_SDK) DeepCopyInto(out *ServerlessCache_SDK) { *out = *in - if in.CacheNodeType != nil { - in, out := &in.CacheNodeType, &out.CacheNodeType + if in.ARN != nil { + in, out := &in.ARN, &out.ARN *out = new(string) **out = **in } - if in.Duration != nil { - in, out := &in.Duration, &out.Duration - *out = new(int64) - **out = **in + if in.CacheUsageLimits != nil { + in, out := &in.CacheUsageLimits, &out.CacheUsageLimits + *out = new(CacheUsageLimits) + (*in).DeepCopyInto(*out) } - if in.FixedPrice != nil { - in, out := &in.FixedPrice, &out.FixedPrice - *out = new(float64) - **out = **in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = (*in).DeepCopy() } - if in.OfferingType != nil { - in, out := &in.OfferingType, &out.OfferingType + if in.DailySnapshotTime != nil { + in, out := &in.DailySnapshotTime, &out.DailySnapshotTime *out = new(string) **out = **in } - if in.ProductDescription != nil { - in, out := &in.ProductDescription, &out.ProductDescription + if in.Description != nil { + in, out := &in.Description, &out.Description *out = new(string) **out = **in } - if in.ReservedCacheNodesOfferingID != nil { - in, out := &in.ReservedCacheNodesOfferingID, &out.ReservedCacheNodesOfferingID + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + if in.Engine != nil { + in, out := &in.Engine, &out.Engine *out = new(string) **out = **in } - if in.UsagePrice != nil { - in, out := &in.UsagePrice, &out.UsagePrice - *out = new(float64) + if in.FullEngineVersion != nil { + in, out := &in.FullEngineVersion, &out.FullEngineVersion + *out = new(string) **out = **in } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservedCacheNodesOffering. -func (in *ReservedCacheNodesOffering) DeepCopy() *ReservedCacheNodesOffering { - if in == nil { - return nil + if in.KMSKeyID != nil { + in, out := &in.KMSKeyID, &out.KMSKeyID + *out = new(string) + **out = **in } - out := new(ReservedCacheNodesOffering) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReshardingConfiguration) DeepCopyInto(out *ReshardingConfiguration) { - *out = *in - if in.NodeGroupID != nil { - in, out := &in.NodeGroupID, &out.NodeGroupID + if in.MajorEngineVersion != nil { + in, out := &in.MajorEngineVersion, &out.MajorEngineVersion *out = new(string) **out = **in } - if in.PreferredAvailabilityZones != nil { - in, out := &in.PreferredAvailabilityZones, &out.PreferredAvailabilityZones + if in.ReaderEndpoint != nil { + in, out := &in.ReaderEndpoint, &out.ReaderEndpoint + *out = new(Endpoint) + (*in).DeepCopyInto(*out) + } + if in.SecurityGroupIDs != nil { + in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs *out = make([]*string, len(*in)) for i := range *in { if (*in)[i] != nil { @@ -2805,59 +4108,45 @@ func (in *ReshardingConfiguration) DeepCopyInto(out *ReshardingConfiguration) { } } } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReshardingConfiguration. -func (in *ReshardingConfiguration) DeepCopy() *ReshardingConfiguration { - if in == nil { - return nil - } - out := new(ReshardingConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReshardingStatus) DeepCopyInto(out *ReshardingStatus) { - *out = *in - if in.SlotMigration != nil { - in, out := &in.SlotMigration, &out.SlotMigration - *out = new(SlotMigration) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReshardingStatus. -func (in *ReshardingStatus) DeepCopy() *ReshardingStatus { - if in == nil { - return nil - } - out := new(ReshardingStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityGroupMembership) DeepCopyInto(out *SecurityGroupMembership) { - *out = *in - if in.SecurityGroupID != nil { - in, out := &in.SecurityGroupID, &out.SecurityGroupID + if in.ServerlessCacheName != nil { + in, out := &in.ServerlessCacheName, &out.ServerlessCacheName *out = new(string) **out = **in } + if in.SnapshotRetentionLimit != nil { + in, out := &in.SnapshotRetentionLimit, &out.SnapshotRetentionLimit + *out = new(int64) + **out = **in + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) **out = **in } + if in.SubnetIDs != nil { + in, out := &in.SubnetIDs, &out.SubnetIDs + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.UserGroupID != nil { + in, out := &in.UserGroupID, &out.UserGroupID + *out = new(string) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupMembership. -func (in *SecurityGroupMembership) DeepCopy() *SecurityGroupMembership { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerlessCache_SDK. +func (in *ServerlessCache_SDK) DeepCopy() *ServerlessCache_SDK { if in == nil { return nil } - out := new(SecurityGroupMembership) + out := new(ServerlessCache_SDK) in.DeepCopyInto(out) return out } @@ -3820,6 +5109,17 @@ func (in *UserGroup_SDK) DeepCopyInto(out *UserGroup_SDK) { } } } + if in.ServerlessCaches != nil { + in, out := &in.ServerlessCaches, &out.ServerlessCaches + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } if in.Status != nil { in, out := &in.Status, &out.Status *out = new(string) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 5aac861e..cd7c4ada 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -16,27 +16,38 @@ package main import ( + "context" "os" + ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" + kmsapitypes "github.com/aws-controllers-k8s/kms-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackrt "github.com/aws-controllers-k8s/runtime/pkg/runtime" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackrtutil "github.com/aws-controllers-k8s/runtime/pkg/util" ackrtwebhook "github.com/aws-controllers-k8s/runtime/pkg/webhook" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + snsapitypes "github.com/aws-controllers-k8s/sns-controller/apis/v1alpha1" flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrlrt "sigs.k8s.io/controller-runtime" + ctrlrtcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlrthealthz "sigs.k8s.io/controller-runtime/pkg/healthz" ctrlrtmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ctrlrtwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" svctypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" + _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_cluster" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_parameter_group" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/cache_subnet_group" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/replication_group" + _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/serverless_cache" + _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/serverless_cache_snapshot" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/snapshot" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/user" _ "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource/user_group" @@ -45,11 +56,10 @@ import ( ) var ( - awsServiceAPIGroup = "elasticache.services.k8s.aws" - awsServiceAlias = "elasticache" - awsServiceEndpointsID = svcsdk.EndpointsID - scheme = runtime.NewScheme() - setupLog = ctrlrt.Log.WithName("setup") + awsServiceAPIGroup = "elasticache.services.k8s.aws" + awsServiceAlias = "elasticache" + scheme = runtime.NewScheme() + setupLog = ctrlrt.Log.WithName("setup") ) func init() { @@ -57,6 +67,9 @@ func init() { _ = svctypes.AddToScheme(scheme) _ = ackv1alpha1.AddToScheme(scheme) + _ = ec2apitypes.AddToScheme(scheme) + _ = kmsapitypes.AddToScheme(scheme) + _ = snsapitypes.AddToScheme(scheme) } func main() { @@ -65,7 +78,14 @@ func main() { flag.Parse() ackCfg.SetupLogger() - if err := ackCfg.Validate(); err != nil { + managerFactories := svcresource.GetManagerFactories() + resourceGVKs := make([]schema.GroupVersionKind, 0, len(managerFactories)) + for _, mf := range managerFactories { + resourceGVKs = append(resourceGVKs, mf.ResourceDescriptor().GroupVersionKind()) + } + + ctx := context.Background() + if err := ackCfg.Validate(ctx, ackcfg.WithGVKs(resourceGVKs)); err != nil { setupLog.Error( err, "Unable to create controller manager", "aws.service", awsServiceAlias, @@ -82,14 +102,47 @@ func main() { os.Exit(1) } + watchNamespaces := make(map[string]ctrlrtcache.Config, 0) + namespaces, err := ackCfg.GetWatchNamespaces() + if err != nil { + setupLog.Error( + err, "Unable to parse watch namespaces.", + "aws.service", ackCfg.WatchNamespace, + ) + os.Exit(1) + } + + for _, namespace := range namespaces { + watchNamespaces[namespace] = ctrlrtcache.Config{} + } + watchSelectors, err := ackCfg.ParseWatchSelectors() + if err != nil { + setupLog.Error( + err, "Unable to parse watch selectors.", + "aws.service", awsServiceAlias, + ) + os.Exit(1) + } mgr, err := ctrlrt.NewManager(ctrlrt.GetConfigOrDie(), ctrlrt.Options{ - Scheme: scheme, - Port: port, - Host: host, - MetricsBindAddress: ackCfg.MetricsAddr, - LeaderElection: ackCfg.EnableLeaderElection, - LeaderElectionID: awsServiceAPIGroup, - Namespace: ackCfg.WatchNamespace, + Scheme: scheme, + Cache: ctrlrtcache.Options{ + Scheme: scheme, + DefaultNamespaces: watchNamespaces, + DefaultLabelSelector: watchSelectors, + }, + WebhookServer: &ctrlrtwebhook.DefaultServer{ + Options: ctrlrtwebhook.Options{ + Port: port, + Host: host, + }, + }, + Metrics: metricsserver.Options{BindAddress: ackCfg.MetricsAddr}, + LeaderElection: ackCfg.EnableLeaderElection, + LeaderElectionID: "ack-" + awsServiceAPIGroup, + LeaderElectionNamespace: ackCfg.LeaderElectionNamespace, + HealthProbeBindAddress: ackCfg.HealthzAddr, + LivenessEndpointName: "/healthz", + ReadinessEndpointName: "/readyz", }) if err != nil { setupLog.Error( @@ -106,7 +159,7 @@ func main() { "aws.service", awsServiceAlias, ) sc := ackrt.NewServiceController( - awsServiceAlias, awsServiceAPIGroup, awsServiceEndpointsID, + awsServiceAlias, awsServiceAPIGroup, acktypes.VersionInfo{ version.GitCommit, version.GitVersion, @@ -128,7 +181,6 @@ func main() { err, "unable to register webhook "+webhook.UID(), "aws.service", awsServiceAlias, ) - } } } @@ -141,6 +193,21 @@ func main() { os.Exit(1) } + if err = mgr.AddHealthzCheck("health", ctrlrthealthz.Ping); err != nil { + setupLog.Error( + err, "unable to set up health check", + "aws.service", awsServiceAlias, + ) + os.Exit(1) + } + if err = mgr.AddReadyzCheck("check", ctrlrthealthz.Ping); err != nil { + setupLog.Error( + err, "unable to set up ready check", + "aws.service", awsServiceAlias, + ) + os.Exit(1) + } + setupLog.Info( "starting manager", "aws.service", awsServiceAlias, diff --git a/config/controller/deployment.yaml b/config/controller/deployment.yaml index 3b47cc8d..8380edc4 100644 --- a/config/controller/deployment.yaml +++ b/config/controller/deployment.yaml @@ -29,14 +29,20 @@ spec: - "$(AWS_REGION)" - --aws-endpoint-url - "$(AWS_ENDPOINT_URL)" - - --enable-development-logging - - "$(ACK_ENABLE_DEVELOPMENT_LOGGING)" + - --enable-development-logging=$(ACK_ENABLE_DEVELOPMENT_LOGGING) - --log-level - "$(ACK_LOG_LEVEL)" - --resource-tags - "$(ACK_RESOURCE_TAGS)" - --watch-namespace - "$(ACK_WATCH_NAMESPACE)" + - --enable-leader-election=$(ENABLE_LEADER_ELECTION) + - --leader-election-namespace + - "$(LEADER_ELECTION_NAMESPACE)" + - --reconcile-default-max-concurrent-syncs + - "$(RECONCILE_DEFAULT_MAX_CONCURRENT_SYNCS)" + - --feature-gates + - "$(FEATURE_GATES)" image: controller:latest name: controller ports: @@ -66,6 +72,14 @@ spec: value: "info" - name: ACK_RESOURCE_TAGS value: "services.k8s.aws/controller-version=%CONTROLLER_SERVICE%-%CONTROLLER_VERSION%,services.k8s.aws/namespace=%K8S_NAMESPACE%" + - name: ENABLE_LEADER_ELECTION + value: "false" + - name: LEADER_ELECTION_NAMESPACE + value: "ack-system" + - name: "RECONCILE_DEFAULT_MAX_CONCURRENT_SYNCS" + value: "1" + - name: "FEATURE_GATES" + value: "" securityContext: allowPrivilegeEscalation: false privileged: false @@ -73,8 +87,24 @@ spec: capabilities: drop: - ALL + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + securityContext: + seccompProfile: + type: RuntimeDefault terminationGracePeriodSeconds: 10 serviceAccountName: ack-elasticache-controller hostIPC: false - hostNetwork: false hostPID: false + hostNetwork: false + dnsPolicy: ClusterFirst diff --git a/config/controller/kustomization.yaml b/config/controller/kustomization.yaml index 82e8f285..367a1f22 100644 --- a/config/controller/kustomization.yaml +++ b/config/controller/kustomization.yaml @@ -6,4 +6,4 @@ kind: Kustomization images: - name: controller newName: public.ecr.aws/aws-controllers-k8s/elasticache-controller - newTag: v0.0.22 + newTag: 1.2.3 diff --git a/config/controller/service.yaml b/config/controller/service.yaml index f85a3d13..51c0e987 100644 --- a/config/controller/service.yaml +++ b/config/controller/service.yaml @@ -11,4 +11,4 @@ spec: port: 8080 targetPort: http protocol: TCP - type: NodePort + type: ClusterIP diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml new file mode 100644 index 00000000..4c3bc982 --- /dev/null +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheclusters.yaml @@ -0,0 +1,846 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: cacheclusters.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: CacheCluster + listKind: CacheClusterList + plural: cacheclusters + singular: cachecluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.engineVersion + name: VERSION + type: string + - jsonPath: .status.cacheClusterStatus + name: STATUS + type: string + - jsonPath: .status.configurationEndpoint.address + name: ENDPOINT + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CacheCluster is the Schema for the CacheClusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + CacheClusterSpec defines the desired state of CacheCluster. + + Contains all of the attributes of a specific cluster. + properties: + authToken: + description: |- + Reserved parameter. The password used to access a password protected server. + + Password constraints: + + * Must be only printable ASCII characters. + + * Must be at least 16 characters and no more than 128 characters in length. + properties: + key: + description: Key is the key within the secret + type: string + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + required: + - key + type: object + x-kubernetes-map-type: atomic + autoMinorVersionUpgrade: + description: |- + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes to opt-in to the next auto minor version + upgrade campaign. This parameter is disabled for previous versions. + type: boolean + azMode: + description: |- + Specifies whether the nodes in this Memcached cluster are created in a single + Availability Zone or created across multiple Availability Zones in the cluster's + region. + + This parameter is only supported for Memcached clusters. + + If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + assumes single-az mode. + type: string + cacheClusterID: + description: |- + The node group (shard) identifier. This parameter is stored as a lowercase + string. + + Constraints: + + * A name must contain from 1 to 50 alphanumeric characters or hyphens. + + * The first character must be a letter. + + * A name cannot end with a hyphen or contain two consecutive hyphens. + type: string + cacheNodeType: + description: |- + The compute and memory capacity of the nodes in the node group (shard). + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + * Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + * All current generation instance types are created in Amazon VPC by default. + + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. + type: string + cacheParameterGroupName: + description: |- + The name of the parameter group to associate with this cluster. If this argument + is omitted, the default parameter group for the specified engine is used. + You cannot use any parameter group which has cluster-enabled='yes' when creating + a cluster. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + cacheParameterGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + cacheSecurityGroupNames: + description: |- + A list of security group names to associate with this cluster. + + Use this parameter only when you are creating a cluster outside of an Amazon + Virtual Private Cloud (Amazon VPC). + items: + type: string + type: array + cacheSubnetGroupName: + description: |- + The name of the subnet group to be used for the cluster. + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + + If you're going to launch your cluster in an Amazon VPC, you need to create + a subnet group before you start creating a cluster. For more information, + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). + type: string + cacheSubnetGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + engine: + description: |- + The name of the cache engine to be used for this cluster. + + Valid values for this parameter are: memcached | redis + type: string + engineVersion: + description: |- + The version number of the cache engine to be used for this cluster. To view + the supported cache engine versions, use the DescribeCacheEngineVersions + operation. + + Important: You can upgrade to a newer engine version (see Selecting a Cache + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)), + but you cannot downgrade to an earlier engine version. If you want to use + an earlier engine version, you must delete the existing cluster or replication + group and create it anew with the earlier engine version. + type: string + ipDiscovery: + description: |- + The network type you choose when modifying a cluster, either ipv4 | ipv6. + IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine + version 6.2 and above or Memcached engine version 1.6.6 and above on all + instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + type: string + logDeliveryConfigurations: + description: Specifies the destination, format and type of the logs. + items: + description: Specifies the destination, format and type of the logs. + properties: + destinationDetails: + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. + properties: + cloudWatchLogsDetails: + description: The configuration details of the CloudWatch + Logs destination. + properties: + logGroup: + type: string + type: object + kinesisFirehoseDetails: + description: The configuration details of the Kinesis Data + Firehose destination. + properties: + deliveryStream: + type: string + type: object + type: object + destinationType: + type: string + enabled: + type: boolean + logFormat: + type: string + logType: + type: string + type: object + type: array + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string + notificationTopicARN: + description: |- + The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + (SNS) topic to which notifications are sent. + + The Amazon SNS topic owner must be the same as the cluster owner. + type: string + notificationTopicRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + numCacheNodes: + description: |- + The initial number of cache nodes that the cluster has. + + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. + + If you need more than 40 nodes for your Memcached cluster, please fill out + the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + format: int64 + type: integer + outpostMode: + description: |- + Specifies whether the nodes in the cluster are created in a single outpost + or across multiple outposts. + type: string + port: + description: The port number on which each of the cache nodes accepts + connections. + format: int64 + type: integer + preferredAvailabilityZone: + description: |- + The EC2 Availability Zone in which the cluster is created. + + All nodes belonging to this cluster are placed in the preferred Availability + Zone. If you want to create your nodes across multiple Availability Zones, + use PreferredAvailabilityZones. + + Default: System chosen Availability Zone. + type: string + preferredAvailabilityZones: + description: |- + A list of the Availability Zones in which cache nodes are created. The order + of the zones in the list is not important. + + This option is only supported on Memcached. + + If you are creating your cluster in an Amazon VPC (recommended) you can only + locate nodes in Availability Zones that are associated with the subnets in + the selected subnet group. + + The number of Availability Zones listed must equal the value of NumCacheNodes. + + If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + instead, or repeat the Availability Zone multiple times in the list. + + Default: System chosen Availability Zones. + items: + type: string + type: array + preferredMaintenanceWindow: + description: |- + Specifies the weekly time range during which maintenance on the cluster is + performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. + type: string + preferredOutpostARN: + description: The outpost ARN in which the cache cluster is created. + type: string + preferredOutpostARNs: + description: The outpost ARNs in which the cache cluster is created. + items: + type: string + type: array + replicationGroupID: + description: |- + The ID of the replication group to which this cluster should belong. If this + parameter is specified, the cluster is added to the specified replication + group as a read replica; otherwise, the cluster is a standalone primary that + is not part of any replication group. + + If the specified replication group is Multi-AZ enabled and the Availability + Zone is not specified, the cluster is created in Availability Zones that + provide the best spread of read replicas across Availability Zones. + + This parameter is only valid if the Engine parameter is redis. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + replicationGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + securityGroupIDs: + description: |- + One or more VPC security groups associated with the cluster. + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + items: + type: string + type: array + securityGroupRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array + snapshotARNs: + description: |- + A single-element string list containing an Amazon Resource Name (ARN) that + uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon + S3. The snapshot file is used to populate the node group (shard). The Amazon + S3 object name in the ARN cannot contain any commas. + + This parameter is only valid if the Engine parameter is redis. + + Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + items: + type: string + type: array + snapshotName: + description: |- + The name of a Valkey or Redis OSS snapshot from which to restore data into + the new node group (shard). The snapshot status changes to restoring while + the new node group (shard) is being created. + + This parameter is only valid if the Engine parameter is redis. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + snapshotRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + snapshotRetentionLimit: + description: |- + The number of days for which ElastiCache retains automatic snapshots before + deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + taken today is retained for 5 days before being deleted. + + This parameter is only valid if the Engine parameter is redis. + + Default: 0 (i.e., automatic backups are disabled for this cache cluster). + format: int64 + type: integer + snapshotWindow: + description: |- + The daily time range (in UTC) during which ElastiCache begins taking a daily + snapshot of your node group (shard). + + Example: 05:00-09:00 + + If you do not specify this parameter, ElastiCache automatically chooses an + appropriate time range. + + This parameter is only valid if the Engine parameter is redis. + type: string + tags: + description: A list of tags to be added to this resource. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + transitEncryptionEnabled: + description: A flag that enables in-transit encryption when set to + true. + type: boolean + required: + - cacheClusterID + type: object + status: + description: CacheClusterStatus defines the observed state of CacheCluster + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + atRestEncryptionEnabled: + description: |- + A flag that enables encryption at-rest when set to true. + + You cannot modify the value of AtRestEncryptionEnabled after the cluster + is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + to true when you create a cluster. + + Required: Only available when creating a replication group in an Amazon VPC + using Redis OSS version 3.2.6, 4.x or later. + + Default: false + type: boolean + authTokenEnabled: + description: |- + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. + + Default: false + type: boolean + authTokenLastModifiedDate: + description: The date the auth token was last modified + format: date-time + type: string + cacheClusterCreateTime: + description: The date and time when the cluster was created. + format: date-time + type: string + cacheClusterStatus: + description: |- + The current state of this cluster, one of the following values: available, + creating, deleted, deleting, incompatible-network, modifying, rebooting cluster + nodes, restore-failed, or snapshotting. + type: string + cacheNodes: + description: A list of cache nodes that are members of the cluster. + items: + description: |- + Represents an individual cache node within a cluster. Each cache node runs + its own instance of the cluster's protocol-compliant caching software - either + Memcached, Valkey or Redis OSS. + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + * Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + * All current generation instance types are created in Amazon VPC by default. + + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. + properties: + cacheNodeCreateTime: + format: date-time + type: string + cacheNodeID: + type: string + cacheNodeStatus: + type: string + customerAvailabilityZone: + type: string + customerOutpostARN: + type: string + endpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + parameterGroupStatus: + type: string + sourceCacheNodeID: + type: string + type: object + type: array + cacheParameterGroup: + description: Status of the cache parameter group. + properties: + cacheNodeIDsToReboot: + items: + type: string + type: array + cacheParameterGroupName: + type: string + parameterApplyStatus: + type: string + type: object + cacheSecurityGroups: + description: A list of cache security group elements, composed of + name and status sub-elements. + items: + description: Represents a cluster's status within a particular cache + security group. + properties: + cacheSecurityGroupName: + type: string + status: + type: string + type: object + type: array + clientDownloadLandingPage: + description: |- + The URL of the web page where you can download the latest ElastiCache client + library. + type: string + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + configurationEndpoint: + description: |- + Represents a Memcached cluster endpoint which can be used by an application + to connect to any node in the cluster. The configuration endpoint will always + have .cfg in it. + + Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + notificationConfiguration: + description: |- + Describes a notification topic and its status. Notification topics are used + for publishing ElastiCache events to subscribers using Amazon Simple Notification + Service (SNS). + properties: + topicARN: + type: string + topicStatus: + type: string + type: object + pendingModifiedValues: + description: |- + A group of settings that are applied to the cluster in the future, or that + are currently being applied. + properties: + authTokenStatus: + type: string + cacheNodeIDsToRemove: + items: + type: string + type: array + cacheNodeType: + type: string + engineVersion: + type: string + numCacheNodes: + format: int64 + type: integer + transitEncryptionEnabled: + type: boolean + transitEncryptionMode: + type: string + type: object + replicationGroupLogDeliveryEnabled: + description: |- + A boolean value indicating whether log delivery is enabled for the replication + group. + type: boolean + securityGroups: + description: A list of VPC Security Groups associated with the cluster. + items: + description: Represents a single cache security group and its status. + properties: + securityGroupID: + type: string + status: + type: string + type: object + type: array + transitEncryptionMode: + description: |- + A setting that allows you to migrate your clients to use in-transit encryption, + with no downtime. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml index c1129618..07453f3b 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cacheparametergroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: cacheparametergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -22,26 +21,35 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "CacheParameterGroupSpec defines the desired state of CacheParameterGroup. - \n Represents the output of a CreateCacheParameterGroup operation." + description: |- + CacheParameterGroupSpec defines the desired state of CacheParameterGroup. + + Represents the output of a CreateCacheParameterGroup operation. properties: cacheParameterGroupFamily: - description: "The name of the cache parameter group family that the - cache parameter group can be used with. \n Valid values are: memcached1.4 - | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | - redis4.0 | redis5.0 | redis6.x" + description: |- + The name of the cache parameter group family that the cache parameter group + can be used with. + + Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | + redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 type: string cacheParameterGroupName: description: A user-specified name for the cache parameter group. @@ -51,10 +59,10 @@ spec: group. type: string parameterNameValues: - description: An array of parameter names and values for the parameter - update. You must supply at least one parameter name and value; subsequent - arguments are optional. A maximum of 20 parameters may be modified - per request. + description: |- + An array of parameter names and values for the parameter update. You must + supply at least one parameter name and value; subsequent arguments are optional. + A maximum of 20 parameters may be modified per request. items: description: Describes a name-value pair that is used to update the value of a parameter. @@ -66,16 +74,16 @@ spec: type: object type: array tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -93,24 +101,25 @@ spec: description: CacheParameterGroupStatus defines the observed state of CacheParameterGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -121,14 +130,16 @@ spec: - region type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -154,12 +165,14 @@ spec: type: object type: array events: - description: A list of events. Each element in the list contains detailed - information about one event. + description: |- + A list of events. Each element in the list contains detailed information + about one event. items: - description: Represents a single occurrence of something interesting - within the system. Some examples of events are creating a cluster, - adding or removing a cache node, or rebooting a node. + description: |- + Represents a single occurrence of something interesting within the system. + Some examples of events are creating a cluster, adding or removing a cache + node, or rebooting a node. properties: date: format: date-time @@ -179,8 +192,9 @@ spec: parameters: description: A list of Parameter instances. items: - description: Describes an individual setting that controls some - aspect of ElastiCache behavior. + description: |- + Describes an individual setting that controls some aspect of ElastiCache + behavior. properties: allowedValues: type: string diff --git a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml index 239672d1..e92d339a 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: cachesubnetgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,46 +20,78 @@ spec: description: CacheSubnetGroup is the Schema for the CacheSubnetGroups API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "CacheSubnetGroupSpec defines the desired state of CacheSubnetGroup. - \n Represents the output of one of the following operations: \n * CreateCacheSubnetGroup - \n * ModifyCacheSubnetGroup" + description: |- + CacheSubnetGroupSpec defines the desired state of CacheSubnetGroup. + + Represents the output of one of the following operations: + + * CreateCacheSubnetGroup + + * ModifyCacheSubnetGroup properties: cacheSubnetGroupDescription: description: A description for the cache subnet group. type: string cacheSubnetGroupName: - description: "A name for the cache subnet group. This value is stored - as a lowercase string. \n Constraints: Must contain no more than - 255 alphanumeric characters or hyphens. \n Example: mysubnetgroup" + description: |- + A name for the cache subnet group. This value is stored as a lowercase string. + + Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + + Example: mysubnetgroup type: string subnetIDs: description: A list of VPC subnet IDs for the cache subnet group. items: type: string type: array + subnetRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -72,30 +103,30 @@ spec: required: - cacheSubnetGroupDescription - cacheSubnetGroupName - - subnetIDs type: object status: description: CacheSubnetGroupStatus defines the observed state of CacheSubnetGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -106,14 +137,16 @@ spec: - region type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -139,12 +172,14 @@ spec: type: object type: array events: - description: A list of events. Each element in the list contains detailed - information about one event. + description: |- + A list of events. Each element in the list contains detailed information + about one event. items: - description: Represents a single occurrence of something interesting - within the system. Some examples of events are creating a cluster, - adding or removing a cache node, or rebooting a node. + description: |- + Represents a single occurrence of something interesting within the system. + Some examples of events are creating a cluster, adding or removing a cache + node, or rebooting a node. properties: date: format: date-time @@ -160,9 +195,10 @@ spec: subnets: description: A list of subnets associated with the cache subnet group. items: - description: Represents the subnet associated with a cluster. This - parameter refers to subnets defined in Amazon Virtual Private - Cloud (Amazon VPC) and used with ElastiCache. + description: |- + Represents the subnet associated with a cluster. This parameter refers to + subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with + ElastiCache. properties: subnetAvailabilityZone: description: Describes an Availability Zone in which the cluster @@ -182,8 +218,9 @@ spec: type: object type: array vpcID: - description: The Amazon Virtual Private Cloud identifier (VPC ID) - of the cache subnet group. + description: |- + The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + group. type: string type: object type: object diff --git a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml index 6bc450eb..ff5c41bc 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_replicationgroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: replicationgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,52 +20,68 @@ spec: description: ReplicationGroup is the Schema for the ReplicationGroups API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "ReplicationGroupSpec defines the desired state of ReplicationGroup. - \n Contains all of the attributes of a specific Redis replication group." + description: |- + ReplicationGroupSpec defines the desired state of ReplicationGroup. + + Contains all of the attributes of a specific Valkey or Redis OSS replication + group. properties: atRestEncryptionEnabled: - description: "A flag that enables encryption at rest when set to true. - \n You cannot modify the value of AtRestEncryptionEnabled after - the replication group is created. To enable encryption at rest on - a replication group you must set AtRestEncryptionEnabled to true - when you create the replication group. \n Required: Only available - when creating a replication group in an Amazon VPC using redis version - 3.2.6, 4.x or later. \n Default: false" + description: |- + A flag that enables encryption at rest when set to true. + + You cannot modify the value of AtRestEncryptionEnabled after the replication + group is created. To enable encryption at rest on a replication group you + must set AtRestEncryptionEnabled to true when you create the replication + group. + + Required: Only available when creating a replication group in an Amazon VPC + using Redis OSS version 3.2.6, 4.x or later. + + Default: false type: boolean authToken: - description: "Reserved parameter. The password used to access a password - protected server. \n AuthToken can be specified only on replication - groups where TransitEncryptionEnabled is true. \n For HIPAA compliance, - you must specify TransitEncryptionEnabled as true, an AuthToken, - and a CacheSubnetGroup. \n Password constraints: \n * Must be only - printable ASCII characters. \n * Must be at least 16 characters - and no more than 128 characters in length. \n * The only permitted - printable special characters are !, &, #, $, ^, <, >, and -. Other - printable special characters cannot be used in the AUTH token. \n - For more information, see AUTH password (http://redis.io/commands/AUTH) - at http://redis.io/commands/AUTH." + description: |- + Reserved parameter. The password used to access a password protected server. + + AuthToken can be specified only on replication groups where TransitEncryptionEnabled + is true. + + For HIPAA compliance, you must specify TransitEncryptionEnabled as true, + an AuthToken, and a CacheSubnetGroup. + + Password constraints: + + * Must be only printable ASCII characters. + + * Must be at least 16 characters and no more than 128 characters in length. properties: key: description: Key is the key within the secret type: string name: - description: Name is unique within a namespace to reference a + description: name is unique within a namespace to reference a secret resource. type: string namespace: - description: Namespace defines the space within which the secret + description: namespace defines the space within which the secret name must be unique. type: string required: @@ -74,72 +89,109 @@ spec: type: object x-kubernetes-map-type: atomic automaticFailoverEnabled: - description: "Specifies whether a read-only replica is automatically - promoted to read/write primary if the existing primary fails. \n - AutomaticFailoverEnabled must be enabled for Redis (cluster mode - enabled) replication groups. \n Default: false" + description: |- + Specifies whether a read-only replica is automatically promoted to read/write + primary if the existing primary fails. + + AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster + mode enabled) replication groups. + + Default: false type: boolean cacheNodeType: - description: "The compute and memory capacity of the nodes in the - node group (shard). \n The following node types are supported by - ElastiCache. Generally speaking, the current generation types provide - more memory and computational power at lower cost when compared - to their equivalent previous generation counterparts. \n * General - purpose: Current generation: M6g node types (available only for - Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, - cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: - cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - cache.m4.10xlarge T4g node types (available only for Redis engine - version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, - cache.t2.small, cache.t2.medium Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - cache.m3.xlarge, cache.m3.2xlarge \n * Compute optimized: Previous - generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) - C1 node types: cache.c1.xlarge \n * Memory optimized with data tiering: - Current generation: R6gd node types (available only for Redis engine - version 6.2 onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, - cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge \n - * Memory optimized: Current generation: R6g node types (available - only for Redis engine version 5.0.6 onward and for Memcached engine - version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - \n Additional node type info \n * All current generation instance - types are created in Amazon VPC by default. \n * Redis append-only - files (AOF) are not supported for T1 or T2 instances. \n * Redis - Multi-AZ with automatic failover is not supported on T1 instances. - \n * Redis configuration variables appendonly and appendfsync are - not supported on Redis version 2.8.22 and later." + description: |- + The compute and memory capacity of the nodes in the node group (shard). + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + * Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + * All current generation instance types are created in Amazon VPC by default. + + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: - description: "The name of the parameter group to associate with this - replication group. If this argument is omitted, the default cache - parameter group for the specified engine is used. \n If you are - running Redis version 3.2.4 or later, only one node group (shard), - and want to use a default parameter group, we recommend that you - specify the parameter group by name. \n * To create a Redis (cluster - mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. - \n * To create a Redis (cluster mode enabled) replication group, - use CacheParameterGroupName=default.redis3.2.cluster.on." + description: |- + The name of the parameter group to associate with this replication group. + If this argument is omitted, the default cache parameter group for the specified + engine is used. + + If you are running Valkey or Redis OSS version 3.2.4 or later, only one node + group (shard), and want to use a default parameter group, we recommend that + you specify the parameter group by name. + + * To create a Valkey or Redis OSS (cluster mode disabled) replication + group, use CacheParameterGroupName=default.redis3.2. + + * To create a Valkey or Redis OSS (cluster mode enabled) replication group, + use CacheParameterGroupName=default.redis3.2.cluster.on. type: string + cacheParameterGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object cacheSecurityGroupNames: description: A list of cache security group names to associate with this replication group. @@ -147,35 +199,63 @@ spec: type: string type: array cacheSubnetGroupName: - description: "The name of the cache subnet group to be used for the - replication group. \n If you're going to launch your cluster in - an Amazon VPC, you need to create a subnet group before you start - creating a cluster. For more information, see Subnets and Subnet - Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html)." + description: |- + The name of the cache subnet group to be used for the replication group. + + If you're going to launch your cluster in an Amazon VPC, you need to create + a subnet group before you start creating a cluster. For more information, + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). type: string + cacheSubnetGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object dataTieringEnabled: - description: Enables data tiering. Data tiering is only supported - for replication groups using the r6gd node type. This parameter - must be set to true when using r6gd nodes. For more information, - see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + description: |- + Enables data tiering. Data tiering is only supported for replication groups + using the r6gd node type. This parameter must be set to true when using r6gd + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: boolean description: description: A user-created description for the replication group. type: string engine: - description: The name of the cache engine to be used for the clusters - in this replication group. Must be Redis. + description: |- + The name of the cache engine to be used for the clusters in this replication + group. The value must be set to Redis. type: string engineVersion: - description: "The version number of the cache engine to be used for - the clusters in this replication group. To view the supported cache - engine versions, use the DescribeCacheEngineVersions operation. - \n Important: You can upgrade to a newer engine version (see Selecting - a Cache Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) - in the ElastiCache User Guide, but you cannot downgrade to an earlier - engine version. If you want to use an earlier engine version, you - must delete the existing cluster or replication group and create - it anew with the earlier engine version." + description: |- + The version number of the cache engine to be used for the clusters in this + replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions + operation. + + Important: You can upgrade to a newer engine version (see Selecting a Cache + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)) + in the ElastiCache User Guide, but you cannot downgrade to an earlier engine + version. If you want to use an earlier engine version, you must delete the + existing cluster or replication group and create it anew with the earlier + engine version. + type: string + ipDiscovery: + description: |- + The network type you choose when creating a replication group, either ipv4 + | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis + OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above + on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). type: string kmsKeyID: description: The ID of the KMS key used to encrypt the disk in the @@ -187,8 +267,9 @@ spec: description: Specifies the destination, format and type of the logs. properties: destinationDetails: - description: Configuration details of either a CloudWatch Logs - destination or Kinesis Data Firehose destination. + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. properties: cloudWatchLogsDetails: description: The configuration details of the CloudWatch @@ -216,25 +297,37 @@ spec: type: object type: array multiAZEnabled: - description: 'A flag indicating if you have Multi-AZ enabled to enhance - fault tolerance. For more information, see Minimizing Downtime: - Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html).' + description: |- + A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). type: boolean + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf nodeGroupConfiguration: - description: "A list of node group (shard) configuration options. - Each node group (shard) configuration has the following members: - PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, - and Slots. \n If you're creating a Redis (cluster mode disabled) - or a Redis (cluster mode enabled) replication group, you can use - this parameter to individually configure each node group (shard), - or you can omit this parameter. However, it is required when seeding - a Redis (cluster mode enabled) cluster from a S3 rdb file. You must - configure each node group (shard) using this parameter because you - must specify the slots for each node group." + description: |- + A list of node group (shard) configuration options. Each node group (shard) + configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, + ReplicaCount, and Slots. + + If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey + or Redis OSS (cluster mode enabled) replication group, you can use this parameter + to individually configure each node group (shard), or you can omit this parameter. + However, it is required when seeding a Valkey or Redis OSS (cluster mode + enabled) cluster from a S3 rdb file. You must configure each node group (shard) + using this parameter because you must specify the slots for each node group. items: - description: 'Node group (shard) configuration options. Each node - group (shard) configuration has the following: Slots, PrimaryAvailabilityZone, - ReplicaAvailabilityZones, ReplicaCount.' + description: |- + Node group (shard) configuration options. Each node group (shard) configuration + has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, + ReplicaCount. properties: nodeGroupID: type: string @@ -258,15 +351,20 @@ spec: type: object type: array notificationTopicARN: - description: "The Amazon Resource Name (ARN) of the Amazon Simple - Notification Service (SNS) topic to which notifications are sent. - \n The Amazon SNS topic owner must be the same as the cluster owner." + description: |- + The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + (SNS) topic to which notifications are sent. + + The Amazon SNS topic owner must be the same as the cluster owner. type: string numNodeGroups: - description: "An optional parameter that specifies the number of node - groups (shards) for this Redis (cluster mode enabled) replication - group. For Redis (cluster mode disabled) either omit this parameter - or set it to 1. \n Default: 1" + description: |- + An optional parameter that specifies the number of node groups (shards) for + this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey + or Redis OSS (cluster mode disabled) either omit this parameter or set it + to 1. + + Default: 1 format: int64 type: integer port: @@ -275,101 +373,156 @@ spec: format: int64 type: integer preferredCacheClusterAZs: - description: "A list of EC2 Availability Zones in which the replication - group's clusters are created. The order of the Availability Zones - in the list is the order in which clusters are allocated. The primary - cluster is created in the first AZ in the list. \n This parameter - is not used if there is more than one node group (shard). You should - use NodeGroupConfiguration instead. \n If you are creating your - replication group in an Amazon VPC (recommended), you can only locate - clusters in Availability Zones associated with the subnets in the - selected subnet group. \n The number of Availability Zones listed - must equal the value of NumCacheClusters. \n Default: system chosen - Availability Zones." + description: |- + A list of EC2 Availability Zones in which the replication group's clusters + are created. The order of the Availability Zones in the list is the order + in which clusters are allocated. The primary cluster is created in the first + AZ in the list. + + This parameter is not used if there is more than one node group (shard). + You should use NodeGroupConfiguration instead. + + If you are creating your replication group in an Amazon VPC (recommended), + you can only locate clusters in Availability Zones associated with the subnets + in the selected subnet group. + + The number of Availability Zones listed must equal the value of NumCacheClusters. + + Default: system chosen Availability Zones. items: type: string type: array preferredMaintenanceWindow: - description: "Specifies the weekly time range during which maintenance - on the cluster is performed. It is specified as a range in the format - ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance - window is a 60 minute period. Valid values for ddd are: \n Specifies - the weekly time range during which maintenance on the cluster is + description: |- + Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. - \n Valid values for ddd are: \n * sun \n * mon \n * tue \n * wed - \n * thu \n * fri \n * sat \n Example: sun:23:00-mon:01:30" + + Valid values for ddd are: + + * sun + + * mon + + * tue + + * wed + + * thu + + * fri + + * sat + + Example: sun:23:00-mon:01:30 type: string primaryClusterID: - description: "The identifier of the cluster that serves as the primary - for this replication group. This cluster must already exist and - have a status of available. \n This parameter is not required if - NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified." + description: |- + The identifier of the cluster that serves as the primary for this replication + group. This cluster must already exist and have a status of available. + + This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup + is specified. type: string replicasPerNodeGroup: - description: An optional parameter that specifies the number of replica - nodes in each node group (shard). Valid values are 0 to 5. + description: |- + An optional parameter that specifies the number of replica nodes in each + node group (shard). Valid values are 0 to 5. format: int64 type: integer replicationGroupID: - description: "The replication group identifier. This parameter is - stored as a lowercase string. \n Constraints: \n * A name must contain - from 1 to 40 alphanumeric characters or hyphens. \n * The first - character must be a letter. \n * A name cannot end with a hyphen - or contain two consecutive hyphens." + description: |- + The replication group identifier. This parameter is stored as a lowercase + string. + + Constraints: + + * A name must contain from 1 to 40 alphanumeric characters or hyphens. + + * The first character must be a letter. + + * A name cannot end with a hyphen or contain two consecutive hyphens. type: string securityGroupIDs: - description: "One or more Amazon VPC security groups associated with - this replication group. \n Use this parameter only when you are - creating a replication group in an Amazon Virtual Private Cloud - (Amazon VPC)." + description: |- + One or more Amazon VPC security groups associated with this replication group. + + Use this parameter only when you are creating a replication group in an Amazon + Virtual Private Cloud (Amazon VPC). items: type: string type: array + securityGroupRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array snapshotARNs: - description: "A list of Amazon Resource Names (ARN) that uniquely - identify the Redis RDB snapshot files stored in Amazon S3. The snapshot - files are used to populate the new replication group. The Amazon - S3 object name in the ARN cannot contain any commas. The new replication - group will have the number of node groups (console: shards) specified - by the parameter NumNodeGroups or the number of node groups configured - by NodeGroupConfiguration regardless of the number of ARNs specified - here. \n Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb" + description: |- + A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or + Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are + used to populate the new replication group. The Amazon S3 object name in + the ARN cannot contain any commas. The new replication group will have the + number of node groups (console: shards) specified by the parameter NumNodeGroups + or the number of node groups configured by NodeGroupConfiguration regardless + of the number of ARNs specified here. + + Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb items: type: string type: array snapshotName: - description: The name of a snapshot from which to restore data into - the new replication group. The snapshot status changes to restoring - while the new replication group is being created. + description: |- + The name of a snapshot from which to restore data into the new replication + group. The snapshot status changes to restoring while the new replication + group is being created. type: string snapshotRetentionLimit: - description: "The number of days for which ElastiCache retains automatic - snapshots before deleting them. For example, if you set SnapshotRetentionLimit - to 5, a snapshot that was taken today is retained for 5 days before - being deleted. \n Default: 0 (i.e., automatic backups are disabled - for this cluster)." + description: |- + The number of days for which ElastiCache retains automatic snapshots before + deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + that was taken today is retained for 5 days before being deleted. + + Default: 0 (i.e., automatic backups are disabled for this cluster). format: int64 type: integer snapshotWindow: - description: "The daily time range (in UTC) during which ElastiCache - begins taking a daily snapshot of your node group (shard). \n Example: - 05:00-09:00 \n If you do not specify this parameter, ElastiCache - automatically chooses an appropriate time range." + description: |- + The daily time range (in UTC) during which ElastiCache begins taking a daily + snapshot of your node group (shard). + + Example: 05:00-09:00 + + If you do not specify this parameter, ElastiCache automatically chooses an + appropriate time range. type: string tags: - description: 'A list of tags to be added to this resource. Tags are - comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. - You can include multiple tags as shown following: Key=myKey, Value=myKeyValue - Key=mySecondKey, Value=mySecondKeyValue. Tags on replication groups - will be replicated to all nodes.' + description: |- + A list of tags to be added to this resource. Tags are comma-separated key,value + pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as + shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. + Tags on replication groups will be replicated to all nodes. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -379,18 +532,22 @@ spec: type: object type: array transitEncryptionEnabled: - description: "A flag that enables in-transit encryption when set to - true. \n You cannot modify the value of TransitEncryptionEnabled - after the cluster is created. To enable in-transit encryption on - a cluster you must set TransitEncryptionEnabled to true when you - create a cluster. \n This parameter is valid only if the Engine - parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or - later, and the cluster is being created in an Amazon VPC. \n If - you enable in-transit encryption, you must also specify a value - for CacheSubnetGroup. \n Required: Only available when creating - a replication group in an Amazon VPC using redis version 3.2.6, - 4.x or later. \n Default: false \n For HIPAA compliance, you must - specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup." + description: |- + A flag that enables in-transit encryption when set to true. + + This parameter is valid only if the Engine parameter is redis, the EngineVersion + parameter is 3.2.6, 4.x or later, and the cluster is being created in an + Amazon VPC. + + If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. + + Required: Only available when creating a replication group in an Amazon VPC + using Redis OSS version 3.2.6, 4.x or later. + + Default: false + + For HIPAA compliance, you must specify TransitEncryptionEnabled as true, + an AuthToken, and a CacheSubnetGroup. type: boolean userGroupIDs: description: The user group to associate with the replication group. @@ -405,24 +562,25 @@ spec: description: ReplicationGroupStatus defines the observed state of ReplicationGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -433,56 +591,66 @@ spec: - region type: object allowedScaleDownModifications: - description: A string list, each element of which specifies a cache - node type which you can use to scale your cluster or replication - group. When scaling down a Redis cluster or replication group using - ModifyCacheCluster or ModifyReplicationGroup, use a value from this - list for the CacheNodeType parameter. + description: |- + A string list, each element of which specifies a cache node type which you + can use to scale your cluster or replication group. When scaling down a Valkey + or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, + use a value from this list for the CacheNodeType parameter. items: type: string type: array allowedScaleUpModifications: - description: "A string list, each element of which specifies a cache - node type which you can use to scale your cluster or replication - group. \n When scaling up a Redis cluster or replication group using - ModifyCacheCluster or ModifyReplicationGroup, use a value from this - list for the CacheNodeType parameter." + description: |- + A string list, each element of which specifies a cache node type which you + can use to scale your cluster or replication group. + + When scaling up a Valkey or Redis OSS cluster or replication group using + ModifyCacheCluster or ModifyReplicationGroup, use a value from this list + for the CacheNodeType parameter. items: type: string type: array authTokenEnabled: - description: "A flag that enables using an AuthToken (password) when - issuing Redis commands. \n Default: false" + description: |- + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. + + Default: false type: boolean authTokenLastModifiedDate: description: The date the auth token was last modified format: date-time type: string autoMinorVersionUpgrade: - description: If you are running Redis engine version 6.0 or later, - set this parameter to yes if you want to opt-in to the next auto - minor version upgrade campaign. This parameter is disabled for previous - versions. + description: |- + If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 + and above, set this parameter to yes if you want to opt-in to the next auto + minor version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: - description: Indicates the status of automatic failover for this Redis - replication group. + description: |- + Indicates the status of automatic failover for this Valkey or Redis OSS replication + group. type: string clusterEnabled: - description: "A flag indicating whether or not this replication group - is cluster enabled; i.e., whether its data can be partitioned across - multiple shards (API/CLI: node groups). \n Valid values: true | - false" + description: |- + A flag indicating whether or not this replication group is cluster enabled; + i.e., whether its data can be partitioned across multiple shards (API/CLI: + node groups). + + Valid values: true | false type: boolean conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -508,8 +676,9 @@ spec: type: object type: array configurationEndpoint: - description: The configuration endpoint for this replication group. - Use the configuration endpoint to connect to this replication group. + description: |- + The configuration endpoint for this replication group. Use the configuration + endpoint to connect to this replication group. properties: address: type: string @@ -518,18 +687,20 @@ spec: type: integer type: object dataTiering: - description: Enables data tiering. Data tiering is only supported - for replication groups using the r6gd node type. This parameter - must be set to true when using r6gd nodes. For more information, - see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + description: |- + Enables data tiering. Data tiering is only supported for replication groups + using the r6gd node type. This parameter must be set to true when using r6gd + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string events: - description: A list of events. Each element in the list contains detailed - information about one event. + description: |- + A list of events. Each element in the list contains detailed information + about one event. items: - description: Represents a single occurrence of something interesting - within the system. Some examples of events are creating a cluster, - adding or removing a cache node, or rebooting a node. + description: |- + Represents a single occurrence of something interesting within the system. + Some examples of events are creating a cluster, adding or removing a cache + node, or rebooting a node. properties: date: format: date-time @@ -543,8 +714,9 @@ spec: type: object type: array globalReplicationGroupInfo: - description: The name of the Global datastore and role of this replication - group in the Global datastore. + description: |- + The name of the Global datastore and role of this replication group in the + Global datastore. properties: globalReplicationGroupID: type: string @@ -557,8 +729,9 @@ spec: description: Returns the destination, format and type of the logs. properties: destinationDetails: - description: Configuration details of either a CloudWatch Logs - destination or Kinesis Data Firehose destination. + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. properties: cloudWatchLogsDetails: description: The configuration details of the CloudWatch @@ -599,19 +772,21 @@ spec: type: string type: array multiAZ: - description: 'A flag indicating if you have Multi-AZ enabled to enhance - fault tolerance. For more information, see Minimizing Downtime: - Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html)' + description: |- + A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) type: string nodeGroups: - description: A list of node groups in this replication group. For - Redis (cluster mode disabled) replication groups, this is a single-element - list. For Redis (cluster mode enabled) replication groups, the list + description: |- + A list of node groups in this replication group. For Valkey or Redis OSS + (cluster mode disabled) replication groups, this is a single-element list. + For Valkey or Redis OSS (cluster mode enabled) replication groups, the list contains an entry for each node group (shard). items: - description: Represents a collection of cache nodes in a replication - group. One node in the node group is the read/write primary node. - All the other nodes are read-only Replica nodes. + description: |- + Represents a collection of cache nodes in a replication group. One node in + the node group is the read/write primary node. All the other nodes are read-only + Replica nodes. properties: nodeGroupID: type: string @@ -631,8 +806,9 @@ spec: preferredOutpostARN: type: string readEndpoint: - description: Represents the information required for client - programs to connect to a cache node. + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. properties: address: type: string @@ -643,8 +819,9 @@ spec: type: object type: array primaryEndpoint: - description: Represents the information required for client - programs to connect to a cache node. + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. properties: address: type: string @@ -653,8 +830,9 @@ spec: type: integer type: object readerEndpoint: - description: Represents the information required for client - programs to connect to a cache node. + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. properties: address: type: string @@ -669,8 +847,9 @@ spec: type: object type: array pendingModifiedValues: - description: A group of settings to be applied to the replication - group, either immediately or during the next maintenance window. + description: |- + A group of settings to be applied to the replication group, either immediately + or during the next maintenance window. properties: authTokenStatus: type: string @@ -681,8 +860,9 @@ spec: description: The log delivery configurations being modified properties: destinationDetails: - description: Configuration details of either a CloudWatch - Logs destination or Kinesis Data Firehose destination. + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. properties: cloudWatchLogsDetails: description: The configuration details of the CloudWatch @@ -738,12 +918,14 @@ spec: format: date-time type: string snapshottingClusterID: - description: The cluster ID that is used as the daily snapshot source - for the replication group. + description: |- + The cluster ID that is used as the daily snapshot source for the replication + group. type: string status: - description: The current state of this replication group - creating, - available, modifying, deleting, create-failed, snapshotting. + description: |- + The current state of this replication group - creating, available, modifying, + deleting, create-failed, snapshotting. type: string type: object type: object diff --git a/config/crd/bases/elasticache.services.k8s.aws_serverlesscaches.yaml b/config/crd/bases/elasticache.services.k8s.aws_serverlesscaches.yaml new file mode 100644 index 00000000..0024598f --- /dev/null +++ b/config/crd/bases/elasticache.services.k8s.aws_serverlesscaches.yaml @@ -0,0 +1,325 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: serverlesscaches.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: ServerlessCache + listKind: ServerlessCacheList + plural: serverlesscaches + singular: serverlesscache + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .status.endpoint.address + name: ENDPOINT + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServerlessCache is the Schema for the ServerlessCaches API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + ServerlessCacheSpec defines the desired state of ServerlessCache. + + The resource representing a serverless cache. + properties: + cacheUsageLimits: + description: |- + Sets the cache usage limits for storage and ElastiCache Processing Units + for the cache. + properties: + dataStorage: + description: The data storage limit. + properties: + maximum: + format: int64 + type: integer + minimum: + format: int64 + type: integer + unit: + type: string + type: object + eCPUPerSecond: + description: |- + The configuration for the number of ElastiCache Processing Units (ECPU) the + cache can consume per second. + properties: + maximum: + format: int64 + type: integer + minimum: + format: int64 + type: integer + type: object + type: object + dailySnapshotTime: + description: |- + The daily time that snapshots will be created from the new serverless cache. + By default this number is populated with 0, i.e. no snapshots will be created + on an automatic daily basis. Available for Valkey, Redis OSS and Serverless + Memcached only. + type: string + description: + description: |- + User-provided description for the serverless cache. The default is NULL, + i.e. if no description is provided then an empty string will be returned. + The maximum length is 255 characters. + type: string + engine: + description: The name of the cache engine to be used for creating + the serverless cache. + type: string + kmsKeyID: + description: |- + ARN of the customer managed key for encrypting the data at rest. If no KMS + key is provided, a default service key is used. + type: string + majorEngineVersion: + description: |- + The version of the cache engine that will be used to create the serverless + cache. + type: string + securityGroupIDs: + description: |- + A list of the one or more VPC security groups to be associated with the serverless + cache. The security group will authorize traffic access for the VPC end-point + (private-link). If no other information is given this will be the VPC’s + Default Security Group that is associated with the cluster VPC end-point. + items: + type: string + type: array + securityGroupRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array + serverlessCacheName: + description: |- + User-provided identifier for the serverless cache. This parameter is stored + as a lowercase string. + type: string + snapshotARNsToRestore: + description: |- + The ARN(s) of the snapshot that the new serverless cache will be created + from. Available for Valkey, Redis OSS and Serverless Memcached only. + items: + type: string + type: array + snapshotRetentionLimit: + description: |- + The number of snapshots that will be retained for the serverless cache that + is being created. As new snapshots beyond this limit are added, the oldest + snapshots will be deleted on a rolling basis. Available for Valkey, Redis + OSS and Serverless Memcached only. + format: int64 + type: integer + subnetIDs: + description: |- + A list of the identifiers of the subnets where the VPC endpoint for the serverless + cache will be deployed. All the subnetIds must belong to the same VPC. + items: + type: string + type: array + subnetRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array + tags: + description: |- + The list of tags (key, value) pairs to be added to the serverless cache resource. + Default is NULL. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + userGroupID: + description: |- + The identifier of the UserGroup to be associated with the serverless cache. + Available for Valkey and Redis OSS only. Default is NULL. + type: string + required: + - engine + - serverlessCacheName + type: object + status: + description: ServerlessCacheStatus defines the observed state of ServerlessCache + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + createTime: + description: When the serverless cache was created. + format: date-time + type: string + endpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + fullEngineVersion: + description: |- + The name and version number of the engine the serverless cache is compatible + with. + type: string + readerEndpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + status: + description: |- + The current status of the serverless cache. The allowed values are CREATING, + AVAILABLE, DELETING, CREATE-FAILED and MODIFYING. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml b/config/crd/bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml new file mode 100644 index 00000000..d75a44de --- /dev/null +++ b/config/crd/bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml @@ -0,0 +1,251 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: serverlesscachesnapshots.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: ServerlessCacheSnapshot + listKind: ServerlessCacheSnapshotList + plural: serverlesscachesnapshots + singular: serverlesscachesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServerlessCacheSnapshot is the Schema for the ServerlessCacheSnapshots + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + ServerlessCacheSnapshotSpec defines the desired state of ServerlessCacheSnapshot. + + The resource representing a serverless cache snapshot. Available for Valkey, + Redis OSS and Serverless Memcached only. + properties: + kmsKeyID: + description: |- + The ID of the KMS key used to encrypt the snapshot. Available for Valkey, + Redis OSS and Serverless Memcached only. Default: NULL + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + kmsKeyRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + serverlessCacheName: + description: |- + The name of an existing serverless cache. The snapshot is created from this + cache. Available for Valkey, Redis OSS and Serverless Memcached only. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + serverlessCacheRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + serverlessCacheSnapshotName: + description: |- + The name for the snapshot being created. Must be unique for the customer + account. Available for Valkey, Redis OSS and Serverless Memcached only. Must + be between 1 and 255 characters. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + tags: + description: |- + A list of tags to be added to the snapshot resource. A tag is a key-value + pair. Available for Valkey, Redis OSS and Serverless Memcached only. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + required: + - serverlessCacheSnapshotName + type: object + status: + description: ServerlessCacheSnapshotStatus defines the observed state + of ServerlessCacheSnapshot + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + bytesUsedForCache: + description: |- + The total size of a serverless cache snapshot, in bytes. Available for Valkey, + Redis OSS and Serverless Memcached only. + type: string + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + createTime: + description: |- + The date and time that the source serverless cache's metadata and cache data + set was obtained for the snapshot. Available for Valkey, Redis OSS and Serverless + Memcached only. + format: date-time + type: string + expiryTime: + description: |- + The time that the serverless cache snapshot will expire. Available for Valkey, + Redis OSS and Serverless Memcached only. + format: date-time + type: string + serverlessCacheConfiguration: + description: |- + The configuration of the serverless cache, at the time the snapshot was taken. + Available for Valkey, Redis OSS and Serverless Memcached only. + properties: + engine: + type: string + majorEngineVersion: + type: string + serverlessCacheName: + type: string + type: object + snapshotType: + description: |- + The type of snapshot of serverless cache. Available for Valkey, Redis OSS + and Serverless Memcached only. + type: string + status: + description: |- + The current status of the serverless cache. Available for Valkey, Redis OSS + and Serverless Memcached only. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml b/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml index 1f17abbe..fb2c9b28 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_snapshots.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: snapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,32 +20,41 @@ spec: description: Snapshot is the Schema for the Snapshots API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "SnapshotSpec defines the desired state of Snapshot. \n Represents - a copy of an entire Redis cluster as of the time when the snapshot was - taken." + description: |- + SnapshotSpec defines the desired state of Snapshot. + + Represents a copy of an entire Valkey or Redis OSS cluster as of the time + when the snapshot was taken. properties: cacheClusterID: - description: The identifier of an existing cluster. The snapshot is - created from this cluster. + description: |- + The identifier of an existing cluster. The snapshot is created from this + cluster. type: string kmsKeyID: description: The ID of the KMS key used to encrypt the snapshot. type: string replicationGroupID: - description: The identifier of an existing replication group. The - snapshot is created from this replication group. + description: |- + The identifier of an existing replication group. The snapshot is created + from this replication group. type: string snapshotName: description: A name for the snapshot being created. @@ -56,16 +64,16 @@ spec: copy. type: string tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -81,24 +89,25 @@ spec: description: SnapshotStatus defines the observed state of Snapshot properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -109,69 +118,80 @@ spec: - region type: object autoMinorVersionUpgrade: - description: If you are running Redis engine version 6.0 or later, - set this parameter to yes if you want to opt-in to the next auto - minor version upgrade campaign. This parameter is disabled for previous - versions. + description: |- + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes if you want to opt-in to the next auto minor + version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: - description: Indicates the status of automatic failover for the source - Redis replication group. + description: |- + Indicates the status of automatic failover for the source Valkey or Redis + OSS replication group. type: string cacheClusterCreateTime: description: The date and time when the source cluster was created. format: date-time type: string cacheNodeType: - description: "The name of the compute and memory capacity node type - for the source cluster. \n The following node types are supported - by ElastiCache. Generally speaking, the current generation types - provide more memory and computational power at lower cost when compared - to their equivalent previous generation counterparts. \n * General - purpose: Current generation: M6g node types (available only for - Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, - cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: - cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - cache.m4.10xlarge T4g node types (available only for Redis engine - version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, - cache.t2.small, cache.t2.medium Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - cache.m3.xlarge, cache.m3.2xlarge \n * Compute optimized: Previous - generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) - C1 node types: cache.c1.xlarge \n * Memory optimized with data tiering: - Current generation: R6gd node types (available only for Redis engine - version 6.2 onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, - cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge \n - * Memory optimized: Current generation: R6g node types (available - only for Redis engine version 5.0.6 onward and for Memcached engine - version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - \n Additional node type info \n * All current generation instance - types are created in Amazon VPC by default. \n * Redis append-only - files (AOF) are not supported for T1 or T2 instances. \n * Redis - Multi-AZ with automatic failover is not supported on T1 instances. - \n * Redis configuration variables appendonly and appendfsync are - not supported on Redis version 2.8.22 and later." + description: |- + The name of the compute and memory capacity node type for the source cluster. + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + * Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + * All current generation instance types are created in Amazon VPC by default. + + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: The cache parameter group that is associated with the @@ -182,14 +202,16 @@ spec: source cluster. type: string conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -215,10 +237,10 @@ spec: type: object type: array dataTiering: - description: Enables data tiering. Data tiering is only supported - for replication groups using the r6gd node type. This parameter - must be set to true when using r6gd nodes. For more information, - see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + description: |- + Enables data tiering. Data tiering is only supported for replication groups + using the r6gd node type. This parameter must be set to true when using r6gd + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string engine: description: The name of the cache engine (memcached or redis) used @@ -244,9 +266,10 @@ spec: cacheSize: type: string nodeGroupConfiguration: - description: 'Node group (shard) configuration options. Each - node group (shard) configuration has the following: Slots, - PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.' + description: |- + Node group (shard) configuration options. Each node group (shard) configuration + has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, + ReplicaCount. properties: nodeGroupID: type: string @@ -276,16 +299,18 @@ spec: type: object type: array numCacheNodes: - description: "The number of cache nodes in the source cluster. \n - For clusters running Redis, this value must be 1. For clusters running - Memcached, this value must be between 1 and 40." + description: |- + The number of cache nodes in the source cluster. + + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. format: int64 type: integer numNodeGroups: - description: The number of node groups (shards) in this snapshot. - When restoring from a snapshot, the number of node groups (shards) - in the snapshot and in the restored replication group must be the - same. + description: |- + The number of node groups (shards) in this snapshot. When restoring from + a snapshot, the number of node groups (shards) in the snapshot and in the + restored replication group must be the same. format: int64 type: integer port: @@ -298,12 +323,28 @@ spec: cluster is located. type: string preferredMaintenanceWindow: - description: "Specifies the weekly time range during which maintenance - on the cluster is performed. It is specified as a range in the format - ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance - window is a 60 minute period. \n Valid values for ddd are: \n * - sun \n * mon \n * tue \n * wed \n * thu \n * fri \n * sat \n Example: - sun:23:00-mon:01:30" + description: |- + Specifies the weekly time range during which maintenance on the cluster is + performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. + + Valid values for ddd are: + + * sun + + * mon + + * tue + + * wed + + * thu + + * fri + + * sat + + Example: sun:23:00-mon:01:30 type: string preferredOutpostARN: description: The ARN (Amazon Resource Name) of the preferred outpost. @@ -312,34 +353,43 @@ spec: description: A description of the source replication group. type: string snapshotRetentionLimit: - description: "For an automatic snapshot, the number of days for which - ElastiCache retains the snapshot before deleting it. \n For manual - snapshots, this field reflects the SnapshotRetentionLimit for the - source cluster when the snapshot was created. This field is otherwise - ignored: Manual snapshots do not expire, and can only be deleted - using the DeleteSnapshot operation. \n Important If the value of - SnapshotRetentionLimit is set to zero (0), backups are turned off." + description: |- + For an automatic snapshot, the number of days for which ElastiCache retains + the snapshot before deleting it. + + For manual snapshots, this field reflects the SnapshotRetentionLimit for + the source cluster when the snapshot was created. This field is otherwise + ignored: Manual snapshots do not expire, and can only be deleted using the + DeleteSnapshot operation. + + Important If the value of SnapshotRetentionLimit is set to zero (0), backups + are turned off. format: int64 type: integer snapshotSource: - description: Indicates whether the snapshot is from an automatic backup - (automated) or was created manually (manual). + description: |- + Indicates whether the snapshot is from an automatic backup (automated) or + was created manually (manual). type: string snapshotStatus: - description: 'The status of the snapshot. Valid values: creating | - available | restoring | copying | deleting.' + description: |- + The status of the snapshot. Valid values: creating | available | restoring + | copying | deleting. type: string snapshotWindow: - description: The daily time range during which ElastiCache takes daily - snapshots of the source cluster. + description: |- + The daily time range during which ElastiCache takes daily snapshots of the + source cluster. type: string topicARN: - description: The Amazon Resource Name (ARN) for the topic used by - the source cluster for publishing notifications. + description: |- + The Amazon Resource Name (ARN) for the topic used by the source cluster for + publishing notifications. type: string vpcID: - description: The Amazon Virtual Private Cloud identifier (VPC ID) - of the cache subnet group for the source cluster. + description: |- + The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + group for the source cluster. type: string type: object type: object diff --git a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml index e76b3d28..c3a4cc92 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_usergroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: usergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,33 +20,42 @@ spec: description: UserGroup is the Schema for the UserGroups API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: engine: - description: The current supported value is Redis. + description: |- + The current supported value is Redis user. + + Regex Pattern: `^[a-zA-Z]*$` type: string tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. Available + for Valkey and Redis OSS only. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -72,24 +80,25 @@ spec: description: UserGroupStatus defines the observed state of UserGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -100,14 +109,16 @@ spec: - region type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -133,7 +144,8 @@ spec: type: object type: array minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string pendingChanges: description: A list of updates being applied to the user group. diff --git a/config/crd/bases/elasticache.services.k8s.aws_users.yaml b/config/crd/bases/elasticache.services.k8s.aws_users.yaml index a148c668..f07896c8 100644 --- a/config/crd/bases/elasticache.services.k8s.aws_users.yaml +++ b/config/crd/bases/elasticache.services.k8s.aws_users.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: users.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,44 +20,57 @@ spec: description: User is the Schema for the Users API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: accessString: - description: Access permissions string used for this user. + description: |- + Access permissions string used for this user. + + Regex Pattern: `\S` type: string engine: - description: The current supported value is Redis. + description: |- + The current supported value is Redis. + + Regex Pattern: `^[a-zA-Z]*$` type: string noPasswordRequired: description: Indicates a password is not required for this user. type: boolean passwords: - description: Passwords used for this user. You can create up to two - passwords for each user. + description: |- + Passwords used for this user. You can create up to two passwords for each + user. items: - description: SecretKeyReference combines a k8s corev1.SecretReference - with a specific key within the referred-to Secret + description: |- + SecretKeyReference combines a k8s corev1.SecretReference with a + specific key within the referred-to Secret properties: key: description: Key is the key within the secret type: string name: - description: Name is unique within a namespace to reference + description: name is unique within a namespace to reference a secret resource. type: string namespace: - description: Namespace defines the space within which the secret + description: namespace defines the space within which the secret name must be unique. type: string required: @@ -67,16 +79,16 @@ spec: x-kubernetes-map-type: atomic type: array tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -86,7 +98,10 @@ spec: type: object type: array userID: - description: The ID of the user. + description: |- + The ID of the user. + + Regex Pattern: `^[a-zA-Z][a-zA-Z0-9\-]*$` type: string userName: description: The username of the user. @@ -101,24 +116,25 @@ spec: description: UserStatus defines the observed state of User properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -138,14 +154,16 @@ spec: type: string type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -177,7 +195,8 @@ spec: description: Access permissions string used for this user. type: string minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string status: description: Indicates the user status. Can be "active", "modifying" diff --git a/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml b/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml index 7dca541d..b7be3224 100644 --- a/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml +++ b/config/crd/common/bases/services.k8s.aws_adoptedresources.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: adoptedresources.services.k8s.aws spec: group: services.k8s.aws @@ -21,14 +20,19 @@ spec: description: AdoptedResource is the schema for the AdoptedResource API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -42,126 +46,144 @@ spec: additionalKeys: additionalProperties: type: string - description: AdditionalKeys represents any additional arbitrary - identifiers used when describing the target resource. + description: |- + AdditionalKeys represents any additional arbitrary identifiers used when + describing the target resource. type: object arn: - description: ARN is the AWS Resource Name for the resource. It - is a globally unique identifier. + description: |- + ARN is the AWS Resource Name for the resource. It is a globally + unique identifier. type: string nameOrID: - description: NameOrId is a user-supplied string identifier for - the resource. It may or may not be globally unique, depending - on the type of resource. + description: |- + NameOrId is a user-supplied string identifier for the resource. It may + or may not be globally unique, depending on the type of resource. type: string type: object kubernetes: - description: ResourceWithMetadata provides the values necessary to - create a Kubernetes resource and override any of its metadata values. + description: |- + ResourceWithMetadata provides the values necessary to create a + Kubernetes resource and override any of its metadata values. properties: group: type: string kind: type: string metadata: - description: "ObjectMeta is metadata that all persisted resources - must have, which includes all objects users must create. It - is not possible to use `metav1.ObjectMeta` inside spec, as the - controller-gen automatically converts this to an arbitrary string-string - map. https://github.com/kubernetes-sigs/controller-tools/issues/385 - \n Active discussion about inclusion of this field in the spec - is happening in this PR: https://github.com/kubernetes-sigs/controller-tools/pull/395 - \n Until this is allowed, or if it never is, we will produce - a subset of the object meta that contains only the fields which - the user is allowed to modify in the metadata." + description: |- + ObjectMeta is metadata that all persisted resources must have, which includes all objects + users must create. + It is not possible to use `metav1.ObjectMeta` inside spec, as the controller-gen + automatically converts this to an arbitrary string-string map. + https://github.com/kubernetes-sigs/controller-tools/issues/385 + + Active discussion about inclusion of this field in the spec is happening in this PR: + https://github.com/kubernetes-sigs/controller-tools/pull/395 + + Until this is allowed, or if it never is, we will produce a subset of the object meta + that contains only the fields which the user is allowed to modify in the metadata. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object generateName: - description: "GenerateName is an optional prefix, used by - the server, to generate a unique name ONLY IF the Name field - has not been provided. If this field is used, the name returned - to the client will be different than the name passed. This - value will also be combined with a unique suffix. The provided - value has the same validation rules as the Name field, and - may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is - specified and the generated name exists, the server will - NOT return a 409 - instead, it will either return 201 Created - or 500 with Reason ServerTimeout indicating a unique name - could not be found in the time allotted, and the client - should retry (optionally after the time indicated in the - Retry-After header). \n Applied only if Name is not specified. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique + name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique suffix. + The provided value has the same validation rules as the Name field, + and may be truncated by the length of the suffix required to make the value + unique on the server. + + If this field is specified and the generated name exists, the server will + NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + ServerTimeout indicating a unique name could not be found in the time allotted, and the client + should retry (optionally after the time indicated in the Retry-After header). + + Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency type: string labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object name: - description: 'Name must be unique within a namespace. Is required - when creating resources, although some resources may allow - a client to request the generation of an appropriate name - automatically. Name is primarily intended for creation idempotence - and configuration definition. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names type: string namespace: - description: "Namespace defines the space within each name - must be unique. An empty namespace is equivalent to the - \"default\" namespace, but \"default\" is the canonical - representation. Not all objects are required to be scoped - to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. - More info: http://kubernetes.io/docs/user-guide/namespaces" + description: |- + Namespace defines the space within each name must be unique. An empty namespace is + equivalent to the "default" namespace, but "default" is the canonical representation. + Not all objects are required to be scoped to a namespace - the value of this field for + those objects will be empty. + + Must be a DNS_LABEL. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/namespaces type: string ownerReferences: - description: List of objects depended by this object. If ALL - objects in the list have been deleted, this object will - be garbage collected. If this object is managed by a controller, - then an entry in this list will point to this controller, - with the controller field set to true. There cannot be more - than one managing controller. + description: |- + List of objects depended by this object. If ALL objects in the list have + been deleted, this object will be garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. items: - description: OwnerReference contains enough information - to let you identify an owning object. An owning object - must be in the same namespace as the dependent, or be - cluster-scoped, so there is no namespace field. + description: |- + OwnerReference contains enough information to let you identify an owning + object. An owning object must be in the same namespace as the dependent, or + be cluster-scoped, so there is no namespace field. properties: apiVersion: description: API version of the referent. type: string blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the - key-value store until this reference is removed. Defaults - to false. To set this field, a user needs "delete" - permission of the owner, otherwise 422 (Unprocessable - Entity) will be returned. + description: |- + If true, AND if the owner has the "foregroundDeletion" finalizer, then + the owner cannot be deleted from the key-value store until this + reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this field and enforces the foreground deletion. + Defaults to false. + To set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. type: boolean controller: description: If true, this reference points to the managing controller. type: boolean kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names type: string uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids type: string required: - apiVersion @@ -185,13 +207,14 @@ spec: AdoptedResource. properties: conditions: - description: A collection of `ackv1alpha1.Condition` objects that - describe the various terminal states of the adopted resource CR - and its target custom resource + description: |- + A collection of `ackv1alpha1.Condition` objects that describe the various + terminal states of the adopted resource CR and its target custom resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status diff --git a/config/crd/common/bases/services.k8s.aws_fieldexports.yaml b/config/crd/common/bases/services.k8s.aws_fieldexports.yaml index 4a7ab61b..49b4f383 100644 --- a/config/crd/common/bases/services.k8s.aws_fieldexports.yaml +++ b/config/crd/common/bases/services.k8s.aws_fieldexports.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: fieldexports.services.k8s.aws spec: group: services.k8s.aws @@ -21,14 +20,19 @@ spec: description: FieldExport is the schema for the FieldExport API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -36,15 +40,17 @@ spec: description: FieldExportSpec defines the desired state of the FieldExport. properties: from: - description: ResourceFieldSelector provides the values necessary to - identify an individual field on an individual K8s resource. + description: |- + ResourceFieldSelector provides the values necessary to identify an individual + field on an individual K8s resource. properties: path: type: string resource: - description: NamespacedResource provides all the values necessary - to identify an ACK resource of a given type (within the same - namespace as the custom resource containing this type). + description: |- + NamespacedResource provides all the values necessary to identify an ACK + resource of a given type (within the same namespace as the custom resource + containing this type). properties: group: type: string @@ -62,16 +68,18 @@ spec: - resource type: object to: - description: FieldExportTarget provides the values necessary to identify - the output path for a field export. + description: |- + FieldExportTarget provides the values necessary to identify the + output path for a field export. properties: key: description: Key overrides the default value (`.`) for the FieldExport target type: string kind: - description: FieldExportOutputType represents all types that can - be produced by a field export operation + description: |- + FieldExportOutputType represents all types that can be produced by a field + export operation enum: - configmap - secret @@ -94,12 +102,14 @@ spec: description: FieldExportStatus defines the observed status of the FieldExport. properties: conditions: - description: A collection of `ackv1alpha1.Condition` objects that - describe the various recoverable states of the field CR + description: |- + A collection of `ackv1alpha1.Condition` objects that describe the various + recoverable states of the field CR items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 23ef8671..967b537c 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,11 +1,13 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -bases: - - common resources: + - common + - bases/elasticache.services.k8s.aws_cacheclusters.yaml - bases/elasticache.services.k8s.aws_cacheparametergroups.yaml - bases/elasticache.services.k8s.aws_cachesubnetgroups.yaml - bases/elasticache.services.k8s.aws_replicationgroups.yaml + - bases/elasticache.services.k8s.aws_serverlesscaches.yaml + - bases/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml - bases/elasticache.services.k8s.aws_snapshots.yaml - bases/elasticache.services.k8s.aws_users.yaml - bases/elasticache.services.k8s.aws_usergroups.yaml diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index b4521337..c89f8ed4 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -12,7 +12,7 @@ #commonLabels: # someName: someValue -bases: +resources: - ../crd - ../rbac - ../controller diff --git a/config/rbac/cluster-role-controller.yaml b/config/rbac/cluster-role-controller.yaml index 220b9502..8e3b8920 100644 --- a/config/rbac/cluster-role-controller.yaml +++ b/config/rbac/cluster-role-controller.yaml @@ -2,13 +2,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: ack-elasticache-controller rules: - apiGroups: - "" resources: - configmaps + - secrets verbs: - get - list @@ -23,98 +23,27 @@ rules: - list - watch - apiGroups: - - "" + - ec2.services.k8s.aws resources: - - secrets + - securitygroups + - securitygroups/status + - subnets + - subnets/status verbs: - get - list - - patch - - watch - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cacheparametergroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - cachesubnetgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cachesubnetgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - replicationgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - replicationgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: + - serverlesscaches + - serverlesscachesnapshots - snapshots - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - snapshots/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - usergroups + - users verbs: - create - delete @@ -126,35 +55,32 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters/status + - cacheparametergroups/status + - cachesubnetgroups/status + - replicationgroups/status + - serverlesscaches/status + - serverlesscachesnapshots/status + - snapshots/status - usergroups/status + - users/status verbs: - get - patch - update - apiGroups: - - elasticache.services.k8s.aws + - kms.services.k8s.aws resources: - - users + - keys + - keys/status verbs: - - create - - delete - get - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - users/status - verbs: - - get - - patch - - update - apiGroups: - services.k8s.aws resources: - adoptedresources + - fieldexports verbs: - create - delete @@ -167,27 +93,16 @@ rules: - services.k8s.aws resources: - adoptedresources/status + - fieldexports/status verbs: - get - patch - update - apiGroups: - - services.k8s.aws + - sns.services.k8s.aws resources: - - fieldexports + - topics + - topics/status verbs: - - create - - delete - get - list - - patch - - update - - watch -- apiGroups: - - services.k8s.aws - resources: - - fieldexports/status - verbs: - - get - - patch - - update diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index eb7df60a..d9acdeeb 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -4,4 +4,5 @@ resources: - role-reader.yaml - role-writer.yaml - service-account.yaml - +- leader-election-role.yaml +- leader-election-role-binding.yaml diff --git a/config/rbac/leader-election-role-binding.yaml b/config/rbac/leader-election-role-binding.yaml new file mode 100644 index 00000000..c48717c8 --- /dev/null +++ b/config/rbac/leader-election-role-binding.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + namespace: ack-system + name: elasticache-leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: elasticache-leader-election-role +subjects: +- kind: ServiceAccount + name: ack-elasticache-controller + namespace: ack-system diff --git a/config/rbac/leader-election-role.yaml b/config/rbac/leader-election-role.yaml new file mode 100644 index 00000000..b80e9772 --- /dev/null +++ b/config/rbac/leader-election-role.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: elasticache-leader-election-role + namespace: ack-system +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/role-reader.yaml b/config/rbac/role-reader.yaml index 397046b6..801dbccf 100644 --- a/config/rbac/role-reader.yaml +++ b/config/rbac/role-reader.yaml @@ -9,9 +9,12 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups + - serverlesscaches + - serverlesscachesnapshots - snapshots - users - usergroups diff --git a/config/rbac/role-writer.yaml b/config/rbac/role-writer.yaml index 2afde512..b45826c5 100644 --- a/config/rbac/role-writer.yaml +++ b/config/rbac/role-writer.yaml @@ -9,9 +9,12 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups + - serverlesscaches + - serverlesscachesnapshots - snapshots - users - usergroups @@ -26,9 +29,12 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups + - serverlesscaches + - serverlesscachesnapshots - snapshots - users - usergroups diff --git a/generator.yaml b/generator.yaml index 98f9babf..d8f2d78e 100644 --- a/generator.yaml +++ b/generator.yaml @@ -1,4 +1,96 @@ resources: + CacheCluster: + fields: + CacheSubnetGroupName: + references: + resource: CacheSubnetGroup + path: Spec.CacheSubnetGroupName + CacheParameterGroupName: + references: + resource: CacheParameterGroup + path: Spec.CacheParameterGroupName + is_immutable: true + ReplicationGroupID: + references: + resource: ReplicationGroup + path: Spec.ReplicationGroupID + is_immutable: true + SnapshotName: + references: + resource: Snapshot + path: Spec.SnapshotName + is_immutable: true + NotificationTopicARN: + references: + service_name: sns + resource: Topic + path: Status.ACKResourceMetadata.ARN + SecurityGroupIDs: + references: + resource: SecurityGroup + service_name: ec2 + path: Status.ID + AuthToken: + is_secret: true + PreferredAvailabilityZone: + late_initialize: {} + PreferredAvailabilityZones: + compare: + is_ignored: true + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: VERSION + json_path: .spec.engineVersion + type: string + index: 10 + - name: STATUS + json_path: .status.cacheClusterStatus + type: string + index: 20 + - name: ENDPOINT + json_path: .status.configurationEndpoint.address + type: string + index: 30 + priority: 1 + exceptions: + errors: + 404: + code: CacheClusterNotFound + terminal_codes: + - ReplicationGroupNotFoundFault + - InvalidReplicationGroupStateFault + - CacheClusterAlreadyExistsFault + - InsufficientCacheClusterCapacityFault + - CacheSecurityGroupNotFoundFault + - CacheSubnetGroupNotFoundFault + - ClusterQuotaForCustomerExceededFault + - NodeQuotaForClusterExceededFault + - NodeQuotaForCustomerExceededFault + - CacheParameterGroupNotFoundFault + - InvalidVPCNetworkStateFault + - TagQuotaPerResource + - InvalidParameterValue + - InvalidParameterCombination + hooks: + sdk_create_post_set_output: + template_path: hooks/cache_cluster/sdk_create_post_set_output.go.tpl + sdk_delete_pre_build_request: + template_path: hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl + sdk_read_many_post_build_request: + template_path: hooks/cache_cluster/sdk_read_many_post_build_request.go.tpl + sdk_read_many_post_set_output: + template_path: hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl + sdk_update_pre_build_request: + template_path: hooks/cache_cluster/sdk_update_pre_build_request.go.tpl + sdk_update_post_build_request: + template_path: hooks/cache_cluster/sdk_update_post_build_request.go.tpl + sdk_update_post_set_output: + template_path: hooks/cache_cluster/sdk_update_post_set_output.go.tpl + delta_post_compare: + code: "modifyDelta(delta, a, b)" CacheSubnetGroup: exceptions: errors: @@ -13,11 +105,19 @@ resources: - InvalidParameterValue - InvalidParameterCombination fields: + SubnetIDs: + references: + service_name: ec2 + resource: Subnet + path: Status.SubnetID Events: is_read_only: true from: operation: DescribeEvents path: Events + hooks: + sdk_read_many_post_set_output: + template_path: hooks/cache_subnet_group/sdk_read_many_post_set_output.go.tpl ReplicationGroup: exceptions: terminal_codes: @@ -47,6 +147,19 @@ resources: AutomaticFailoverEnabled: compare: is_ignored: true + CacheParameterGroupName: + references: + resource: CacheParameterGroup + path: Spec.CacheParameterGroupName + CacheSubnetGroupName: + references: + resource: CacheSubnetGroup + path: Spec.CacheSubnetGroupName + SecurityGroupIDs: + references: + resource: SecurityGroup + service_name: ec2 + path: Status.ID Events: is_read_only: true from: @@ -67,6 +180,8 @@ resources: PrimaryClusterId: # note: "PrimaryClusterID" will not function properly compare: is_ignored: true + NetworkType: + is_immutable: true hooks: sdk_read_many_post_set_output: template_path: hooks/replication_group/sdk_read_many_post_set_output.go.tpl @@ -74,6 +189,8 @@ resources: template_path: hooks/replication_group/sdk_delete_pre_build_request.go.tpl sdk_delete_post_request: template_path: hooks/replication_group/sdk_delete_post_request.go.tpl + sdk_update_pre_build_request: + template_path: hooks/replication_group/sdk_update_pre_build_request.go.tpl sdk_update_post_build_request: template_path: hooks/replication_group/sdk_update_post_build_request.go.tpl delta_post_compare: @@ -81,7 +198,7 @@ resources: sdk_file_end: template_path: hooks/replication_group/sdk_file_end.go.tpl sdk_file_end_set_output_post_populate: - code: "rm.customSetOutput(obj, ko) // custom set output from obj" + code: "rm.customSetOutput(ctx, *obj, ko) // custom set output from obj" renames: operations: CreateReplicationGroup: @@ -112,7 +229,6 @@ resources: terminal_codes: - CacheParameterGroupAlreadyExists - CacheParameterGroupQuotaExceeded - - InvalidCacheParameterGroupState - InvalidGlobalReplicationGroupState - InvalidParameterCombination - InvalidParameterValue @@ -145,7 +261,6 @@ resources: - InvalidParameterValue - InvalidParameterCombination - InvalidUserState - - DefaultUserAssociatedToUserGroup fields: LastRequestedAccessString: is_read_only: true @@ -184,7 +299,105 @@ resources: - TagQuotaPerResourceExceeded update_operation: custom_method_name: customUpdateUserGroup + ServerlessCache: + update_operation: + custom_method_name: customUpdateServerlessCache + fields: + SecurityGroupIDs: + references: + resource: SecurityGroup + service_name: ec2 + path: Status.ID + SubnetIDs: + references: + service_name: ec2 + resource: Subnet + path: Status.SubnetID + UserGroupIDs: + references: + resource: UserGroup + path: Spec.UserGroupID + synced: + when: + - path: Status.Status + in: + - available + - create_failed + exceptions: + terminal_codes: + - ServerlessCacheAlreadyExistsFault + - ServerlessCacheQuotaForCustomerExceededFault + - InvalidParameterValue + - InvalidParameterCombination + - InvalidVPCNetworkStateFault + - TagQuotaPerResourceExceeded + - InvalidKMSKeyFault + hooks: + sdk_read_many_post_set_output: + template_path: hooks/serverless_cache/sdk_read_many_post_set_output.go.tpl + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: STATUS + json_path: .status.status + type: string + index: 10 + - name: ENDPOINT + json_path: .status.endpoint.address + type: string + index: 20 + ServerlessCacheSnapshot: + update_operation: + custom_method_name: customUpdateServerlessCacheSnapshot + fields: + ServerlessCacheName: + is_immutable: true + references: + resource: ServerlessCache + path: Spec.ServerlessCacheName + ServerlessCacheSnapshotName: + is_immutable: true + kmsKeyId: + is_immutable: true + references: + service_name: kms + resource: Key + path: Status.ACKResourceMetadata.ARN + exceptions: + errors: + 404: + code: ServerlessCacheSnapshotNotFoundFault + terminal_codes: + - ServerlessCacheSnapshotAlreadyExistsFault + - InvalidParameterValueException + hooks: + sdk_create_post_set_output: + template_path: hooks/serverless_cache_snapshot/sdk_create_post_set_output.go.tpl + sdk_read_many_post_set_output: + template_path: hooks/serverless_cache_snapshot/sdk_read_many_post_set_output.go.tpl + print: + add_age_column: true + add_synced_column: true + order_by: index + additional_columns: + - name: STATUS + json_path: .status.status + type: string + index: 10 + synced: + when: + - path: Status.Status + in: + - available + - create_failed operations: + DescribeServerlessCaches: + operation_type: + - List + resource_name: + ServerlessCache DescribeCacheSubnetGroups: set_output_custom_method_name: CustomDescribeCacheSubnetGroupsSetOutput DescribeReplicationGroups: @@ -195,7 +408,7 @@ operations: custom_implementation: CustomModifyReplicationGroup set_output_custom_method_name: CustomModifyReplicationGroupSetOutput override_values: - ApplyImmediately: true + ApplyImmediately: aws.Bool(true) CreateSnapshot: custom_implementation: CustomCreateSnapshot set_output_custom_method_name: CustomCreateSnapshotSetOutput @@ -214,10 +427,15 @@ operations: set_output_custom_method_name: CustomCreateUserGroupSetOutput DescribeUserGroups: set_output_custom_method_name: CustomDescribeUserGroupsSetOutput + CreateCacheCluster: + set_output_custom_method_name: customCreateCacheClusterSetOutput + ModifyCacheCluster: + set_output_custom_method_name: customModifyCacheClusterSetOutput + override_values: + ApplyImmediately: aws.Bool(true) ignore: resource_names: - GlobalReplicationGroup - - CacheCluster - CacheSecurityGroup field_paths: - DescribeSnapshotsInput.CacheClusterId @@ -232,3 +450,27 @@ ignore: - CreateReplicationGroupInput.GlobalReplicationGroupId - CreateReplicationGroupInput.AutoMinorVersionUpgrade - CreateReplicationGroupInput.NumCacheClusters + - CacheCluster.LogDeliveryConfigurations + - PendingModifiedValues.LogDeliveryConfigurations + - CreateUserInput.AuthenticationMode + - ModifyUserInput.AuthenticationMode + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.SupportedNetworkTypes + - CreateCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - ModifyCacheSubnetGroupOutput.CacheSubnetGroup.Subnets.SupportedNetworkTypes + - CreateUserGroupOutput.ServerlessCaches + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionEnabled + # - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionEnabled + # - ModifyReplicationGroupInput.TransitEncryptionEnabled + # - CreateReplicationGroupInput.TransitEncryptionEnabled + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.TransitEncryptionMode + - CreateReplicationGroupInput.TransitEncryptionMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.TransitEncryptionMode + - CreateReplicationGroupOutput.ReplicationGroup.ClusterMode + - CreateReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - CreateReplicationGroupInput.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.ClusterMode + - ModifyReplicationGroupOutput.ReplicationGroup.PendingModifiedValues.ClusterMode + - Subnet.SupportedNetworkTypes + - CreateReplicationGroupInput.ServerlessCacheSnapshotName \ No newline at end of file diff --git a/go.mod b/go.mod index 000d199b..83263cda 100644 --- a/go.mod +++ b/go.mod @@ -1,75 +1,93 @@ module github.com/aws-controllers-k8s/elasticache-controller -go 1.19 +go 1.24.0 + +toolchain go1.24.1 require ( - github.com/aws-controllers-k8s/runtime v0.24.0 - github.com/aws/aws-sdk-go v1.44.93 - github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v1.2.0 - github.com/google/go-cmp v0.5.5 + github.com/aws-controllers-k8s/ec2-controller v1.0.7 + github.com/aws-controllers-k8s/kms-controller v1.0.2 + github.com/aws-controllers-k8s/runtime v0.52.0 + github.com/aws-controllers-k8s/sns-controller v1.0.11 + github.com/aws/aws-sdk-go v1.49.0 + github.com/aws/aws-sdk-go-v2 v1.36.1 + github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.12 + github.com/aws/smithy-go v1.22.2 + github.com/go-logr/logr v1.4.2 github.com/pkg/errors v0.9.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.0 - go.uber.org/zap v1.19.1 - k8s.io/api v0.23.0 - k8s.io/apimachinery v0.23.0 - k8s.io/client-go v0.23.0 - sigs.k8s.io/controller-runtime v0.11.0 + k8s.io/api v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/client-go v0.32.1 + sigs.k8s.io/controller-runtime v0.20.4 ) require ( + github.com/aws/aws-sdk-go-v2/config v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-logr/zapr v1.2.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/google/uuid v1.1.2 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/itchyny/gojq v0.12.6 // indirect github.com/itchyny/timefmt-go v0.1.3 // indirect github.com/jaypipes/envutil v1.0.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.11.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.28.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/samber/lo v1.37.0 // indirect - github.com/stretchr/objx v0.2.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.27.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.7.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/apiextensions-apiserver v0.23.0 // indirect - k8s.io/component-base v0.23.0 // indirect - k8s.io/klog/v2 v2.30.0 // indirect - k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.32.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index c6956d36..1159aa35 100644 --- a/go.sum +++ b/go.sum @@ -1,968 +1,250 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws-controllers-k8s/runtime v0.24.0 h1:F53HtC1YDHXcjtlRl2Q1xU0/7TRFvy9IlMFp0rTYeWE= -github.com/aws-controllers-k8s/runtime v0.24.0/go.mod h1:vBfkfwxAIULdvjHCfJNi6Pj3/QJJCrh+8uH3/ZfewVU= -github.com/aws/aws-sdk-go v1.44.93 h1:hAgd9fuaptBatSft27/5eBMdcA8+cIMqo96/tZ6rKl8= -github.com/aws/aws-sdk-go v1.44.93/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/aws-controllers-k8s/ec2-controller v1.0.7 h1:7MDu2bq8NFKbgzzgHYPFRT7bf+SmTchgWuq8ixKK0Jc= +github.com/aws-controllers-k8s/ec2-controller v1.0.7/go.mod h1:PvsQehgncHgcu9FiY13M45+GkVsKI98g7G83SrgH7vY= +github.com/aws-controllers-k8s/kms-controller v1.0.2 h1:v8nh/oaX/U6spCwBDaWyem7XXpzoP/MnkJyEjNOZN9s= +github.com/aws-controllers-k8s/kms-controller v1.0.2/go.mod h1:BeoijsyGjJ9G5VcDjpFdxBW0IxaeKXYX497XmUJiPSQ= +github.com/aws-controllers-k8s/runtime v0.52.0 h1:Q5UIAn6SSBr60t/DiU/zr6NLBlUuK2AG3yy2ma/9gDU= +github.com/aws-controllers-k8s/runtime v0.52.0/go.mod h1:OkUJN+Ds799JLYZsMJrO2vDJ4snxUeHK2MgrQHbU+Qc= +github.com/aws-controllers-k8s/sns-controller v1.0.11 h1:nnkywTHzO64y7RrrfoPNyYf1TOkkQHtlg+S0jEPKUZ8= +github.com/aws-controllers-k8s/sns-controller v1.0.11/go.mod h1:ODQIDZR3hHQqcyif4UXVFQfEzTaWU1jqFtVr83K2p9M= +github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= +github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= +github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.12 h1:jOcCDjNCWNdJmkXyKiIP/HGorjcdmeOmGLZmU4XiydM= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.12/go.mod h1:AwS8/VfBl4lEHfbhvKcP2v8DyMx9olcVvz2Y0ygiWxA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/itchyny/gojq v0.12.6 h1:VjaFn59Em2wTxDNGcrRkDK9ZHMNa8IksOgL13sLL4d0= github.com/itchyny/gojq v0.12.6/go.mod h1:ZHrkfu7A+RbZLy5J1/JKpS4poEqrzItSTGDItqsfP0A= github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU= github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A= github.com/jaypipes/envutil v1.0.0 h1:u6Vwy9HwruFihoZrL0bxDLCa/YNadGVwKyPElNmZWow= github.com/jaypipes/envutil v1.0.0/go.mod h1:vgIRDly+xgBq0eeZRcflOHMMobMwgC6MkMbxo/Nw65M= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= -github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 h1:3MTrJm4PyNL9NBqvYDSj3DHl46qQakyfqfWo4jgfaEM= -golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= -k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= -k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= -k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= -k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ= -k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= -k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= -k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= -k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= -k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= -k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8= -k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= -sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ= -sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= +k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/helm/Chart.yaml b/helm/Chart.yaml index 90258cb1..7dbe0b2d 100644 --- a/helm/Chart.yaml +++ b/helm/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 name: elasticache-chart description: A Helm chart for the ACK service controller for Amazon ElastiCache (ElastiCache) -version: v0.0.22 -appVersion: v0.0.22 +version: 1.2.3 +appVersion: 1.2.3 home: https://github.com/aws-controllers-k8s/elasticache-controller icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png sources: diff --git a/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml new file mode 100644 index 00000000..68799015 --- /dev/null +++ b/helm/crds/elasticache.services.k8s.aws_cacheclusters.yaml @@ -0,0 +1,846 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: cacheclusters.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: CacheCluster + listKind: CacheClusterList + plural: cacheclusters + singular: cachecluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.engineVersion + name: VERSION + type: string + - jsonPath: .status.cacheClusterStatus + name: STATUS + type: string + - jsonPath: .status.configurationEndpoint.address + name: ENDPOINT + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CacheCluster is the Schema for the CacheClusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + CacheClusterSpec defines the desired state of CacheCluster. + + Contains all of the attributes of a specific cluster. + properties: + authToken: + description: |- + Reserved parameter. The password used to access a password protected server. + + Password constraints: + + - Must be only printable ASCII characters. + + - Must be at least 16 characters and no more than 128 characters in length. + properties: + key: + description: Key is the key within the secret + type: string + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + required: + - key + type: object + x-kubernetes-map-type: atomic + autoMinorVersionUpgrade: + description: |- + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes to opt-in to the next auto minor version + upgrade campaign. This parameter is disabled for previous versions. + type: boolean + azMode: + description: |- + Specifies whether the nodes in this Memcached cluster are created in a single + Availability Zone or created across multiple Availability Zones in the cluster's + region. + + This parameter is only supported for Memcached clusters. + + If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + assumes single-az mode. + type: string + cacheClusterID: + description: |- + The node group (shard) identifier. This parameter is stored as a lowercase + string. + + Constraints: + + - A name must contain from 1 to 50 alphanumeric characters or hyphens. + + - The first character must be a letter. + + - A name cannot end with a hyphen or contain two consecutive hyphens. + type: string + cacheNodeType: + description: |- + The compute and memory capacity of the nodes in the node group (shard). + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + - General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + - Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + - Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + - All current generation instance types are created in Amazon VPC by default. + + - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + - The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. + type: string + cacheParameterGroupName: + description: |- + The name of the parameter group to associate with this cluster. If this argument + is omitted, the default parameter group for the specified engine is used. + You cannot use any parameter group which has cluster-enabled='yes' when creating + a cluster. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + cacheParameterGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + cacheSecurityGroupNames: + description: |- + A list of security group names to associate with this cluster. + + Use this parameter only when you are creating a cluster outside of an Amazon + Virtual Private Cloud (Amazon VPC). + items: + type: string + type: array + cacheSubnetGroupName: + description: |- + The name of the subnet group to be used for the cluster. + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + + If you're going to launch your cluster in an Amazon VPC, you need to create + a subnet group before you start creating a cluster. For more information, + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). + type: string + cacheSubnetGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + engine: + description: |- + The name of the cache engine to be used for this cluster. + + Valid values for this parameter are: memcached | redis + type: string + engineVersion: + description: |- + The version number of the cache engine to be used for this cluster. To view + the supported cache engine versions, use the DescribeCacheEngineVersions + operation. + + Important: You can upgrade to a newer engine version (see Selecting a Cache + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)), + but you cannot downgrade to an earlier engine version. If you want to use + an earlier engine version, you must delete the existing cluster or replication + group and create it anew with the earlier engine version. + type: string + ipDiscovery: + description: |- + The network type you choose when modifying a cluster, either ipv4 | ipv6. + IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine + version 6.2 and above or Memcached engine version 1.6.6 and above on all + instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). + type: string + logDeliveryConfigurations: + description: Specifies the destination, format and type of the logs. + items: + description: Specifies the destination, format and type of the logs. + properties: + destinationDetails: + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. + properties: + cloudWatchLogsDetails: + description: The configuration details of the CloudWatch + Logs destination. + properties: + logGroup: + type: string + type: object + kinesisFirehoseDetails: + description: The configuration details of the Kinesis Data + Firehose destination. + properties: + deliveryStream: + type: string + type: object + type: object + destinationType: + type: string + enabled: + type: boolean + logFormat: + type: string + logType: + type: string + type: object + type: array + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string + notificationTopicARN: + description: |- + The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + (SNS) topic to which notifications are sent. + + The Amazon SNS topic owner must be the same as the cluster owner. + type: string + notificationTopicRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + numCacheNodes: + description: |- + The initial number of cache nodes that the cluster has. + + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. + + If you need more than 40 nodes for your Memcached cluster, please fill out + the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + format: int64 + type: integer + outpostMode: + description: |- + Specifies whether the nodes in the cluster are created in a single outpost + or across multiple outposts. + type: string + port: + description: The port number on which each of the cache nodes accepts + connections. + format: int64 + type: integer + preferredAvailabilityZone: + description: |- + The EC2 Availability Zone in which the cluster is created. + + All nodes belonging to this cluster are placed in the preferred Availability + Zone. If you want to create your nodes across multiple Availability Zones, + use PreferredAvailabilityZones. + + Default: System chosen Availability Zone. + type: string + preferredAvailabilityZones: + description: |- + A list of the Availability Zones in which cache nodes are created. The order + of the zones in the list is not important. + + This option is only supported on Memcached. + + If you are creating your cluster in an Amazon VPC (recommended) you can only + locate nodes in Availability Zones that are associated with the subnets in + the selected subnet group. + + The number of Availability Zones listed must equal the value of NumCacheNodes. + + If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + instead, or repeat the Availability Zone multiple times in the list. + + Default: System chosen Availability Zones. + items: + type: string + type: array + preferredMaintenanceWindow: + description: |- + Specifies the weekly time range during which maintenance on the cluster is + performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. + type: string + preferredOutpostARN: + description: The outpost ARN in which the cache cluster is created. + type: string + preferredOutpostARNs: + description: The outpost ARNs in which the cache cluster is created. + items: + type: string + type: array + replicationGroupID: + description: |- + The ID of the replication group to which this cluster should belong. If this + parameter is specified, the cluster is added to the specified replication + group as a read replica; otherwise, the cluster is a standalone primary that + is not part of any replication group. + + If the specified replication group is Multi-AZ enabled and the Availability + Zone is not specified, the cluster is created in Availability Zones that + provide the best spread of read replicas across Availability Zones. + + This parameter is only valid if the Engine parameter is redis. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + replicationGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + securityGroupIDs: + description: |- + One or more VPC security groups associated with the cluster. + + Use this parameter only when you are creating a cluster in an Amazon Virtual + Private Cloud (Amazon VPC). + items: + type: string + type: array + securityGroupRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array + snapshotARNs: + description: |- + A single-element string list containing an Amazon Resource Name (ARN) that + uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon + S3. The snapshot file is used to populate the node group (shard). The Amazon + S3 object name in the ARN cannot contain any commas. + + This parameter is only valid if the Engine parameter is redis. + + Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + items: + type: string + type: array + snapshotName: + description: |- + The name of a Valkey or Redis OSS snapshot from which to restore data into + the new node group (shard). The snapshot status changes to restoring while + the new node group (shard) is being created. + + This parameter is only valid if the Engine parameter is redis. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + snapshotRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + snapshotRetentionLimit: + description: |- + The number of days for which ElastiCache retains automatic snapshots before + deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + taken today is retained for 5 days before being deleted. + + This parameter is only valid if the Engine parameter is redis. + + Default: 0 (i.e., automatic backups are disabled for this cache cluster). + format: int64 + type: integer + snapshotWindow: + description: |- + The daily time range (in UTC) during which ElastiCache begins taking a daily + snapshot of your node group (shard). + + Example: 05:00-09:00 + + If you do not specify this parameter, ElastiCache automatically chooses an + appropriate time range. + + This parameter is only valid if the Engine parameter is redis. + type: string + tags: + description: A list of tags to be added to this resource. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + transitEncryptionEnabled: + description: A flag that enables in-transit encryption when set to + true. + type: boolean + required: + - cacheClusterID + type: object + status: + description: CacheClusterStatus defines the observed state of CacheCluster + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + atRestEncryptionEnabled: + description: |- + A flag that enables encryption at-rest when set to true. + + You cannot modify the value of AtRestEncryptionEnabled after the cluster + is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + to true when you create a cluster. + + Required: Only available when creating a replication group in an Amazon VPC + using Redis OSS version 3.2.6, 4.x or later. + + Default: false + type: boolean + authTokenEnabled: + description: |- + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. + + Default: false + type: boolean + authTokenLastModifiedDate: + description: The date the auth token was last modified + format: date-time + type: string + cacheClusterCreateTime: + description: The date and time when the cluster was created. + format: date-time + type: string + cacheClusterStatus: + description: |- + The current state of this cluster, one of the following values: available, + creating, deleted, deleting, incompatible-network, modifying, rebooting cluster + nodes, restore-failed, or snapshotting. + type: string + cacheNodes: + description: A list of cache nodes that are members of the cluster. + items: + description: |- + Represents an individual cache node within a cluster. Each cache node runs + its own instance of the cluster's protocol-compliant caching software - either + Memcached, Valkey or Redis OSS. + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + - General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + - Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + - Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + - All current generation instance types are created in Amazon VPC by default. + + - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + - The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. + properties: + cacheNodeCreateTime: + format: date-time + type: string + cacheNodeID: + type: string + cacheNodeStatus: + type: string + customerAvailabilityZone: + type: string + customerOutpostARN: + type: string + endpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + parameterGroupStatus: + type: string + sourceCacheNodeID: + type: string + type: object + type: array + cacheParameterGroup: + description: Status of the cache parameter group. + properties: + cacheNodeIDsToReboot: + items: + type: string + type: array + cacheParameterGroupName: + type: string + parameterApplyStatus: + type: string + type: object + cacheSecurityGroups: + description: A list of cache security group elements, composed of + name and status sub-elements. + items: + description: Represents a cluster's status within a particular cache + security group. + properties: + cacheSecurityGroupName: + type: string + status: + type: string + type: object + type: array + clientDownloadLandingPage: + description: |- + The URL of the web page where you can download the latest ElastiCache client + library. + type: string + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + configurationEndpoint: + description: |- + Represents a Memcached cluster endpoint which can be used by an application + to connect to any node in the cluster. The configuration endpoint will always + have .cfg in it. + + Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + notificationConfiguration: + description: |- + Describes a notification topic and its status. Notification topics are used + for publishing ElastiCache events to subscribers using Amazon Simple Notification + Service (SNS). + properties: + topicARN: + type: string + topicStatus: + type: string + type: object + pendingModifiedValues: + description: |- + A group of settings that are applied to the cluster in the future, or that + are currently being applied. + properties: + authTokenStatus: + type: string + cacheNodeIDsToRemove: + items: + type: string + type: array + cacheNodeType: + type: string + engineVersion: + type: string + numCacheNodes: + format: int64 + type: integer + transitEncryptionEnabled: + type: boolean + transitEncryptionMode: + type: string + type: object + replicationGroupLogDeliveryEnabled: + description: |- + A boolean value indicating whether log delivery is enabled for the replication + group. + type: boolean + securityGroups: + description: A list of VPC Security Groups associated with the cluster. + items: + description: Represents a single cache security group and its status. + properties: + securityGroupID: + type: string + status: + type: string + type: object + type: array + transitEncryptionMode: + description: |- + A setting that allows you to migrate your clients to use in-transit encryption, + with no downtime. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml b/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml index c1129618..07453f3b 100644 --- a/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cacheparametergroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: cacheparametergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -22,26 +21,35 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "CacheParameterGroupSpec defines the desired state of CacheParameterGroup. - \n Represents the output of a CreateCacheParameterGroup operation." + description: |- + CacheParameterGroupSpec defines the desired state of CacheParameterGroup. + + Represents the output of a CreateCacheParameterGroup operation. properties: cacheParameterGroupFamily: - description: "The name of the cache parameter group family that the - cache parameter group can be used with. \n Valid values are: memcached1.4 - | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | - redis4.0 | redis5.0 | redis6.x" + description: |- + The name of the cache parameter group family that the cache parameter group + can be used with. + + Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | + redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis7 type: string cacheParameterGroupName: description: A user-specified name for the cache parameter group. @@ -51,10 +59,10 @@ spec: group. type: string parameterNameValues: - description: An array of parameter names and values for the parameter - update. You must supply at least one parameter name and value; subsequent - arguments are optional. A maximum of 20 parameters may be modified - per request. + description: |- + An array of parameter names and values for the parameter update. You must + supply at least one parameter name and value; subsequent arguments are optional. + A maximum of 20 parameters may be modified per request. items: description: Describes a name-value pair that is used to update the value of a parameter. @@ -66,16 +74,16 @@ spec: type: object type: array tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -93,24 +101,25 @@ spec: description: CacheParameterGroupStatus defines the observed state of CacheParameterGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -121,14 +130,16 @@ spec: - region type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -154,12 +165,14 @@ spec: type: object type: array events: - description: A list of events. Each element in the list contains detailed - information about one event. + description: |- + A list of events. Each element in the list contains detailed information + about one event. items: - description: Represents a single occurrence of something interesting - within the system. Some examples of events are creating a cluster, - adding or removing a cache node, or rebooting a node. + description: |- + Represents a single occurrence of something interesting within the system. + Some examples of events are creating a cluster, adding or removing a cache + node, or rebooting a node. properties: date: format: date-time @@ -179,8 +192,9 @@ spec: parameters: description: A list of Parameter instances. items: - description: Describes an individual setting that controls some - aspect of ElastiCache behavior. + description: |- + Describes an individual setting that controls some aspect of ElastiCache + behavior. properties: allowedValues: type: string diff --git a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml index ddf6f43f..56b43aeb 100644 --- a/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_cachesubnetgroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: cachesubnetgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,46 +20,78 @@ spec: description: CacheSubnetGroup is the Schema for the CacheSubnetGroups API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "CacheSubnetGroupSpec defines the desired state of CacheSubnetGroup. - \n Represents the output of one of the following operations: \n - CreateCacheSubnetGroup - \n - ModifyCacheSubnetGroup" + description: |- + CacheSubnetGroupSpec defines the desired state of CacheSubnetGroup. + + Represents the output of one of the following operations: + + - CreateCacheSubnetGroup + + - ModifyCacheSubnetGroup properties: cacheSubnetGroupDescription: description: A description for the cache subnet group. type: string cacheSubnetGroupName: - description: "A name for the cache subnet group. This value is stored - as a lowercase string. \n Constraints: Must contain no more than - 255 alphanumeric characters or hyphens. \n Example: mysubnetgroup" + description: |- + A name for the cache subnet group. This value is stored as a lowercase string. + + Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + + Example: mysubnetgroup type: string subnetIDs: description: A list of VPC subnet IDs for the cache subnet group. items: type: string type: array + subnetRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -72,30 +103,30 @@ spec: required: - cacheSubnetGroupDescription - cacheSubnetGroupName - - subnetIDs type: object status: description: CacheSubnetGroupStatus defines the observed state of CacheSubnetGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -106,14 +137,16 @@ spec: - region type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -139,12 +172,14 @@ spec: type: object type: array events: - description: A list of events. Each element in the list contains detailed - information about one event. + description: |- + A list of events. Each element in the list contains detailed information + about one event. items: - description: Represents a single occurrence of something interesting - within the system. Some examples of events are creating a cluster, - adding or removing a cache node, or rebooting a node. + description: |- + Represents a single occurrence of something interesting within the system. + Some examples of events are creating a cluster, adding or removing a cache + node, or rebooting a node. properties: date: format: date-time @@ -160,9 +195,10 @@ spec: subnets: description: A list of subnets associated with the cache subnet group. items: - description: Represents the subnet associated with a cluster. This - parameter refers to subnets defined in Amazon Virtual Private - Cloud (Amazon VPC) and used with ElastiCache. + description: |- + Represents the subnet associated with a cluster. This parameter refers to + subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with + ElastiCache. properties: subnetAvailabilityZone: description: Describes an Availability Zone in which the cluster @@ -182,8 +218,9 @@ spec: type: object type: array vpcID: - description: The Amazon Virtual Private Cloud identifier (VPC ID) - of the cache subnet group. + description: |- + The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + group. type: string type: object type: object diff --git a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml index 47055dfb..8c553cd0 100644 --- a/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_replicationgroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: replicationgroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,52 +20,68 @@ spec: description: ReplicationGroup is the Schema for the ReplicationGroups API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "ReplicationGroupSpec defines the desired state of ReplicationGroup. - \n Contains all of the attributes of a specific Redis replication group." + description: |- + ReplicationGroupSpec defines the desired state of ReplicationGroup. + + Contains all of the attributes of a specific Valkey or Redis OSS replication + group. properties: atRestEncryptionEnabled: - description: "A flag that enables encryption at rest when set to true. - \n You cannot modify the value of AtRestEncryptionEnabled after - the replication group is created. To enable encryption at rest on - a replication group you must set AtRestEncryptionEnabled to true - when you create the replication group. \n Required: Only available - when creating a replication group in an Amazon VPC using redis version - 3.2.6, 4.x or later. \n Default: false" + description: |- + A flag that enables encryption at rest when set to true. + + You cannot modify the value of AtRestEncryptionEnabled after the replication + group is created. To enable encryption at rest on a replication group you + must set AtRestEncryptionEnabled to true when you create the replication + group. + + Required: Only available when creating a replication group in an Amazon VPC + using Redis OSS version 3.2.6, 4.x or later. + + Default: false type: boolean authToken: - description: "Reserved parameter. The password used to access a password - protected server. \n AuthToken can be specified only on replication - groups where TransitEncryptionEnabled is true. \n For HIPAA compliance, - you must specify TransitEncryptionEnabled as true, an AuthToken, - and a CacheSubnetGroup. \n Password constraints: \n - Must be only - printable ASCII characters. \n - Must be at least 16 characters - and no more than 128 characters in length. \n - The only permitted - printable special characters are !, &, #, $, ^, <, >, and -. Other - printable special characters cannot be used in the AUTH token. \n - For more information, see AUTH password (http://redis.io/commands/AUTH) - at http://redis.io/commands/AUTH." + description: |- + Reserved parameter. The password used to access a password protected server. + + AuthToken can be specified only on replication groups where TransitEncryptionEnabled + is true. + + For HIPAA compliance, you must specify TransitEncryptionEnabled as true, + an AuthToken, and a CacheSubnetGroup. + + Password constraints: + + - Must be only printable ASCII characters. + + - Must be at least 16 characters and no more than 128 characters in length. properties: key: description: Key is the key within the secret type: string name: - description: Name is unique within a namespace to reference a + description: name is unique within a namespace to reference a secret resource. type: string namespace: - description: Namespace defines the space within which the secret + description: namespace defines the space within which the secret name must be unique. type: string required: @@ -74,72 +89,109 @@ spec: type: object x-kubernetes-map-type: atomic automaticFailoverEnabled: - description: "Specifies whether a read-only replica is automatically - promoted to read/write primary if the existing primary fails. \n - AutomaticFailoverEnabled must be enabled for Redis (cluster mode - enabled) replication groups. \n Default: false" + description: |- + Specifies whether a read-only replica is automatically promoted to read/write + primary if the existing primary fails. + + AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster + mode enabled) replication groups. + + Default: false type: boolean cacheNodeType: - description: "The compute and memory capacity of the nodes in the - node group (shard). \n The following node types are supported by - ElastiCache. Generally speaking, the current generation types provide - more memory and computational power at lower cost when compared - to their equivalent previous generation counterparts. \n - General - purpose: Current generation: M6g node types (available only for - Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, - cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: - cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - cache.m4.10xlarge T4g node types (available only for Redis engine - version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, - cache.t2.small, cache.t2.medium Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - cache.m3.xlarge, cache.m3.2xlarge \n - Compute optimized: Previous - generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) - C1 node types: cache.c1.xlarge \n - Memory optimized with data tiering: - Current generation: R6gd node types (available only for Redis engine - version 6.2 onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, - cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge \n - - Memory optimized: Current generation: R6g node types (available - only for Redis engine version 5.0.6 onward and for Memcached engine - version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - \n Additional node type info \n - All current generation instance - types are created in Amazon VPC by default. \n - Redis append-only - files (AOF) are not supported for T1 or T2 instances. \n - Redis - Multi-AZ with automatic failover is not supported on T1 instances. - \n - Redis configuration variables appendonly and appendfsync are - not supported on Redis version 2.8.22 and later." + description: |- + The compute and memory capacity of the nodes in the node group (shard). + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + - General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + - Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + - Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + - All current generation instance types are created in Amazon VPC by default. + + - Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + - Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + - The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: - description: "The name of the parameter group to associate with this - replication group. If this argument is omitted, the default cache - parameter group for the specified engine is used. \n If you are - running Redis version 3.2.4 or later, only one node group (shard), - and want to use a default parameter group, we recommend that you - specify the parameter group by name. \n - To create a Redis (cluster - mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. - \n - To create a Redis (cluster mode enabled) replication group, - use CacheParameterGroupName=default.redis3.2.cluster.on." + description: |- + The name of the parameter group to associate with this replication group. + If this argument is omitted, the default cache parameter group for the specified + engine is used. + + If you are running Valkey or Redis OSS version 3.2.4 or later, only one node + group (shard), and want to use a default parameter group, we recommend that + you specify the parameter group by name. + + - To create a Valkey or Redis OSS (cluster mode disabled) replication + group, use CacheParameterGroupName=default.redis3.2. + + - To create a Valkey or Redis OSS (cluster mode enabled) replication group, + use CacheParameterGroupName=default.redis3.2.cluster.on. type: string + cacheParameterGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object cacheSecurityGroupNames: description: A list of cache security group names to associate with this replication group. @@ -147,35 +199,63 @@ spec: type: string type: array cacheSubnetGroupName: - description: "The name of the cache subnet group to be used for the - replication group. \n If you're going to launch your cluster in - an Amazon VPC, you need to create a subnet group before you start - creating a cluster. For more information, see Subnets and Subnet - Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html)." + description: |- + The name of the cache subnet group to be used for the replication group. + + If you're going to launch your cluster in an Amazon VPC, you need to create + a subnet group before you start creating a cluster. For more information, + see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SubnetGroups.html). type: string + cacheSubnetGroupRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object dataTieringEnabled: - description: Enables data tiering. Data tiering is only supported - for replication groups using the r6gd node type. This parameter - must be set to true when using r6gd nodes. For more information, - see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + description: |- + Enables data tiering. Data tiering is only supported for replication groups + using the r6gd node type. This parameter must be set to true when using r6gd + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: boolean description: description: A user-created description for the replication group. type: string engine: - description: The name of the cache engine to be used for the clusters - in this replication group. Must be Redis. + description: |- + The name of the cache engine to be used for the clusters in this replication + group. The value must be set to Redis. type: string engineVersion: - description: "The version number of the cache engine to be used for - the clusters in this replication group. To view the supported cache - engine versions, use the DescribeCacheEngineVersions operation. - \n Important: You can upgrade to a newer engine version (see Selecting - a Cache Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) - in the ElastiCache User Guide, but you cannot downgrade to an earlier - engine version. If you want to use an earlier engine version, you - must delete the existing cluster or replication group and create - it anew with the earlier engine version." + description: |- + The version number of the cache engine to be used for the clusters in this + replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions + operation. + + Important: You can upgrade to a newer engine version (see Selecting a Cache + Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/SelectEngine.html#VersionManagement)) + in the ElastiCache User Guide, but you cannot downgrade to an earlier engine + version. If you want to use an earlier engine version, you must delete the + existing cluster or replication group and create it anew with the earlier + engine version. + type: string + ipDiscovery: + description: |- + The network type you choose when creating a replication group, either ipv4 + | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis + OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above + on all instances built on the Nitro system (http://aws.amazon.com/ec2/nitro/). type: string kmsKeyID: description: The ID of the KMS key used to encrypt the disk in the @@ -187,8 +267,9 @@ spec: description: Specifies the destination, format and type of the logs. properties: destinationDetails: - description: Configuration details of either a CloudWatch Logs - destination or Kinesis Data Firehose destination. + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. properties: cloudWatchLogsDetails: description: The configuration details of the CloudWatch @@ -216,25 +297,37 @@ spec: type: object type: array multiAZEnabled: - description: 'A flag indicating if you have Multi-AZ enabled to enhance - fault tolerance. For more information, see Minimizing Downtime: - Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html).' + description: |- + A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html). type: boolean + networkType: + description: |- + Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads + using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached + engine version 1.6.6 and above on all instances built on the Nitro system + (http://aws.amazon.com/ec2/nitro/). + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf nodeGroupConfiguration: - description: "A list of node group (shard) configuration options. - Each node group (shard) configuration has the following members: - PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, - and Slots. \n If you're creating a Redis (cluster mode disabled) - or a Redis (cluster mode enabled) replication group, you can use - this parameter to individually configure each node group (shard), - or you can omit this parameter. However, it is required when seeding - a Redis (cluster mode enabled) cluster from a S3 rdb file. You must - configure each node group (shard) using this parameter because you - must specify the slots for each node group." + description: |- + A list of node group (shard) configuration options. Each node group (shard) + configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, + ReplicaCount, and Slots. + + If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey + or Redis OSS (cluster mode enabled) replication group, you can use this parameter + to individually configure each node group (shard), or you can omit this parameter. + However, it is required when seeding a Valkey or Redis OSS (cluster mode + enabled) cluster from a S3 rdb file. You must configure each node group (shard) + using this parameter because you must specify the slots for each node group. items: - description: 'Node group (shard) configuration options. Each node - group (shard) configuration has the following: Slots, PrimaryAvailabilityZone, - ReplicaAvailabilityZones, ReplicaCount.' + description: |- + Node group (shard) configuration options. Each node group (shard) configuration + has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, + ReplicaCount. properties: nodeGroupID: type: string @@ -258,15 +351,20 @@ spec: type: object type: array notificationTopicARN: - description: "The Amazon Resource Name (ARN) of the Amazon Simple - Notification Service (SNS) topic to which notifications are sent. - \n The Amazon SNS topic owner must be the same as the cluster owner." + description: |- + The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + (SNS) topic to which notifications are sent. + + The Amazon SNS topic owner must be the same as the cluster owner. type: string numNodeGroups: - description: "An optional parameter that specifies the number of node - groups (shards) for this Redis (cluster mode enabled) replication - group. For Redis (cluster mode disabled) either omit this parameter - or set it to 1. \n Default: 1" + description: |- + An optional parameter that specifies the number of node groups (shards) for + this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey + or Redis OSS (cluster mode disabled) either omit this parameter or set it + to 1. + + Default: 1 format: int64 type: integer port: @@ -275,101 +373,156 @@ spec: format: int64 type: integer preferredCacheClusterAZs: - description: "A list of EC2 Availability Zones in which the replication - group's clusters are created. The order of the Availability Zones - in the list is the order in which clusters are allocated. The primary - cluster is created in the first AZ in the list. \n This parameter - is not used if there is more than one node group (shard). You should - use NodeGroupConfiguration instead. \n If you are creating your - replication group in an Amazon VPC (recommended), you can only locate - clusters in Availability Zones associated with the subnets in the - selected subnet group. \n The number of Availability Zones listed - must equal the value of NumCacheClusters. \n Default: system chosen - Availability Zones." + description: |- + A list of EC2 Availability Zones in which the replication group's clusters + are created. The order of the Availability Zones in the list is the order + in which clusters are allocated. The primary cluster is created in the first + AZ in the list. + + This parameter is not used if there is more than one node group (shard). + You should use NodeGroupConfiguration instead. + + If you are creating your replication group in an Amazon VPC (recommended), + you can only locate clusters in Availability Zones associated with the subnets + in the selected subnet group. + + The number of Availability Zones listed must equal the value of NumCacheClusters. + + Default: system chosen Availability Zones. items: type: string type: array preferredMaintenanceWindow: - description: "Specifies the weekly time range during which maintenance - on the cluster is performed. It is specified as a range in the format - ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance - window is a 60 minute period. Valid values for ddd are: \n Specifies - the weekly time range during which maintenance on the cluster is + description: |- + Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. - \n Valid values for ddd are: \n - sun \n - mon \n - tue \n - wed - \n - thu \n - fri \n - sat \n Example: sun:23:00-mon:01:30" + + Valid values for ddd are: + + - sun + + - mon + + - tue + + - wed + + - thu + + - fri + + - sat + + Example: sun:23:00-mon:01:30 type: string primaryClusterID: - description: "The identifier of the cluster that serves as the primary - for this replication group. This cluster must already exist and - have a status of available. \n This parameter is not required if - NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup is specified." + description: |- + The identifier of the cluster that serves as the primary for this replication + group. This cluster must already exist and have a status of available. + + This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup + is specified. type: string replicasPerNodeGroup: - description: An optional parameter that specifies the number of replica - nodes in each node group (shard). Valid values are 0 to 5. + description: |- + An optional parameter that specifies the number of replica nodes in each + node group (shard). Valid values are 0 to 5. format: int64 type: integer replicationGroupID: - description: "The replication group identifier. This parameter is - stored as a lowercase string. \n Constraints: \n - A name must contain - from 1 to 40 alphanumeric characters or hyphens. \n - The first - character must be a letter. \n - A name cannot end with a hyphen - or contain two consecutive hyphens." + description: |- + The replication group identifier. This parameter is stored as a lowercase + string. + + Constraints: + + - A name must contain from 1 to 40 alphanumeric characters or hyphens. + + - The first character must be a letter. + + - A name cannot end with a hyphen or contain two consecutive hyphens. type: string securityGroupIDs: - description: "One or more Amazon VPC security groups associated with - this replication group. \n Use this parameter only when you are - creating a replication group in an Amazon Virtual Private Cloud - (Amazon VPC)." + description: |- + One or more Amazon VPC security groups associated with this replication group. + + Use this parameter only when you are creating a replication group in an Amazon + Virtual Private Cloud (Amazon VPC). items: type: string type: array + securityGroupRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array snapshotARNs: - description: "A list of Amazon Resource Names (ARN) that uniquely - identify the Redis RDB snapshot files stored in Amazon S3. The snapshot - files are used to populate the new replication group. The Amazon - S3 object name in the ARN cannot contain any commas. The new replication - group will have the number of node groups (console: shards) specified - by the parameter NumNodeGroups or the number of node groups configured - by NodeGroupConfiguration regardless of the number of ARNs specified - here. \n Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb" + description: |- + A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or + Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are + used to populate the new replication group. The Amazon S3 object name in + the ARN cannot contain any commas. The new replication group will have the + number of node groups (console: shards) specified by the parameter NumNodeGroups + or the number of node groups configured by NodeGroupConfiguration regardless + of the number of ARNs specified here. + + Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb items: type: string type: array snapshotName: - description: The name of a snapshot from which to restore data into - the new replication group. The snapshot status changes to restoring - while the new replication group is being created. + description: |- + The name of a snapshot from which to restore data into the new replication + group. The snapshot status changes to restoring while the new replication + group is being created. type: string snapshotRetentionLimit: - description: "The number of days for which ElastiCache retains automatic - snapshots before deleting them. For example, if you set SnapshotRetentionLimit - to 5, a snapshot that was taken today is retained for 5 days before - being deleted. \n Default: 0 (i.e., automatic backups are disabled - for this cluster)." + description: |- + The number of days for which ElastiCache retains automatic snapshots before + deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + that was taken today is retained for 5 days before being deleted. + + Default: 0 (i.e., automatic backups are disabled for this cluster). format: int64 type: integer snapshotWindow: - description: "The daily time range (in UTC) during which ElastiCache - begins taking a daily snapshot of your node group (shard). \n Example: - 05:00-09:00 \n If you do not specify this parameter, ElastiCache - automatically chooses an appropriate time range." + description: |- + The daily time range (in UTC) during which ElastiCache begins taking a daily + snapshot of your node group (shard). + + Example: 05:00-09:00 + + If you do not specify this parameter, ElastiCache automatically chooses an + appropriate time range. type: string tags: - description: 'A list of tags to be added to this resource. Tags are - comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. - You can include multiple tags as shown following: Key=myKey, Value=myKeyValue - Key=mySecondKey, Value=mySecondKeyValue. Tags on replication groups - will be replicated to all nodes.' + description: |- + A list of tags to be added to this resource. Tags are comma-separated key,value + pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as + shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. + Tags on replication groups will be replicated to all nodes. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -379,18 +532,22 @@ spec: type: object type: array transitEncryptionEnabled: - description: "A flag that enables in-transit encryption when set to - true. \n You cannot modify the value of TransitEncryptionEnabled - after the cluster is created. To enable in-transit encryption on - a cluster you must set TransitEncryptionEnabled to true when you - create a cluster. \n This parameter is valid only if the Engine - parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or - later, and the cluster is being created in an Amazon VPC. \n If - you enable in-transit encryption, you must also specify a value - for CacheSubnetGroup. \n Required: Only available when creating - a replication group in an Amazon VPC using redis version 3.2.6, - 4.x or later. \n Default: false \n For HIPAA compliance, you must - specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup." + description: |- + A flag that enables in-transit encryption when set to true. + + This parameter is valid only if the Engine parameter is redis, the EngineVersion + parameter is 3.2.6, 4.x or later, and the cluster is being created in an + Amazon VPC. + + If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. + + Required: Only available when creating a replication group in an Amazon VPC + using Redis OSS version 3.2.6, 4.x or later. + + Default: false + + For HIPAA compliance, you must specify TransitEncryptionEnabled as true, + an AuthToken, and a CacheSubnetGroup. type: boolean userGroupIDs: description: The user group to associate with the replication group. @@ -405,24 +562,25 @@ spec: description: ReplicationGroupStatus defines the observed state of ReplicationGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -433,56 +591,66 @@ spec: - region type: object allowedScaleDownModifications: - description: A string list, each element of which specifies a cache - node type which you can use to scale your cluster or replication - group. When scaling down a Redis cluster or replication group using - ModifyCacheCluster or ModifyReplicationGroup, use a value from this - list for the CacheNodeType parameter. + description: |- + A string list, each element of which specifies a cache node type which you + can use to scale your cluster or replication group. When scaling down a Valkey + or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, + use a value from this list for the CacheNodeType parameter. items: type: string type: array allowedScaleUpModifications: - description: "A string list, each element of which specifies a cache - node type which you can use to scale your cluster or replication - group. \n When scaling up a Redis cluster or replication group using - ModifyCacheCluster or ModifyReplicationGroup, use a value from this - list for the CacheNodeType parameter." + description: |- + A string list, each element of which specifies a cache node type which you + can use to scale your cluster or replication group. + + When scaling up a Valkey or Redis OSS cluster or replication group using + ModifyCacheCluster or ModifyReplicationGroup, use a value from this list + for the CacheNodeType parameter. items: type: string type: array authTokenEnabled: - description: "A flag that enables using an AuthToken (password) when - issuing Redis commands. \n Default: false" + description: |- + A flag that enables using an AuthToken (password) when issuing Valkey or + Redis OSS commands. + + Default: false type: boolean authTokenLastModifiedDate: description: The date the auth token was last modified format: date-time type: string autoMinorVersionUpgrade: - description: If you are running Redis engine version 6.0 or later, - set this parameter to yes if you want to opt-in to the next auto - minor version upgrade campaign. This parameter is disabled for previous - versions. + description: |- + If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 + and above, set this parameter to yes if you want to opt-in to the next auto + minor version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: - description: Indicates the status of automatic failover for this Redis - replication group. + description: |- + Indicates the status of automatic failover for this Valkey or Redis OSS replication + group. type: string clusterEnabled: - description: "A flag indicating whether or not this replication group - is cluster enabled; i.e., whether its data can be partitioned across - multiple shards (API/CLI: node groups). \n Valid values: true | - false" + description: |- + A flag indicating whether or not this replication group is cluster enabled; + i.e., whether its data can be partitioned across multiple shards (API/CLI: + node groups). + + Valid values: true | false type: boolean conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -508,8 +676,9 @@ spec: type: object type: array configurationEndpoint: - description: The configuration endpoint for this replication group. - Use the configuration endpoint to connect to this replication group. + description: |- + The configuration endpoint for this replication group. Use the configuration + endpoint to connect to this replication group. properties: address: type: string @@ -518,18 +687,20 @@ spec: type: integer type: object dataTiering: - description: Enables data tiering. Data tiering is only supported - for replication groups using the r6gd node type. This parameter - must be set to true when using r6gd nodes. For more information, - see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + description: |- + Enables data tiering. Data tiering is only supported for replication groups + using the r6gd node type. This parameter must be set to true when using r6gd + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string events: - description: A list of events. Each element in the list contains detailed - information about one event. + description: |- + A list of events. Each element in the list contains detailed information + about one event. items: - description: Represents a single occurrence of something interesting - within the system. Some examples of events are creating a cluster, - adding or removing a cache node, or rebooting a node. + description: |- + Represents a single occurrence of something interesting within the system. + Some examples of events are creating a cluster, adding or removing a cache + node, or rebooting a node. properties: date: format: date-time @@ -543,8 +714,9 @@ spec: type: object type: array globalReplicationGroupInfo: - description: The name of the Global datastore and role of this replication - group in the Global datastore. + description: |- + The name of the Global datastore and role of this replication group in the + Global datastore. properties: globalReplicationGroupID: type: string @@ -557,8 +729,9 @@ spec: description: Returns the destination, format and type of the logs. properties: destinationDetails: - description: Configuration details of either a CloudWatch Logs - destination or Kinesis Data Firehose destination. + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. properties: cloudWatchLogsDetails: description: The configuration details of the CloudWatch @@ -599,19 +772,21 @@ spec: type: string type: array multiAZ: - description: 'A flag indicating if you have Multi-AZ enabled to enhance - fault tolerance. For more information, see Minimizing Downtime: - Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html)' + description: |- + A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/dg/AutoFailover.html) type: string nodeGroups: - description: A list of node groups in this replication group. For - Redis (cluster mode disabled) replication groups, this is a single-element - list. For Redis (cluster mode enabled) replication groups, the list + description: |- + A list of node groups in this replication group. For Valkey or Redis OSS + (cluster mode disabled) replication groups, this is a single-element list. + For Valkey or Redis OSS (cluster mode enabled) replication groups, the list contains an entry for each node group (shard). items: - description: Represents a collection of cache nodes in a replication - group. One node in the node group is the read/write primary node. - All the other nodes are read-only Replica nodes. + description: |- + Represents a collection of cache nodes in a replication group. One node in + the node group is the read/write primary node. All the other nodes are read-only + Replica nodes. properties: nodeGroupID: type: string @@ -631,8 +806,9 @@ spec: preferredOutpostARN: type: string readEndpoint: - description: Represents the information required for client - programs to connect to a cache node. + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. properties: address: type: string @@ -643,8 +819,9 @@ spec: type: object type: array primaryEndpoint: - description: Represents the information required for client - programs to connect to a cache node. + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. properties: address: type: string @@ -653,8 +830,9 @@ spec: type: integer type: object readerEndpoint: - description: Represents the information required for client - programs to connect to a cache node. + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. properties: address: type: string @@ -669,8 +847,9 @@ spec: type: object type: array pendingModifiedValues: - description: A group of settings to be applied to the replication - group, either immediately or during the next maintenance window. + description: |- + A group of settings to be applied to the replication group, either immediately + or during the next maintenance window. properties: authTokenStatus: type: string @@ -681,8 +860,9 @@ spec: description: The log delivery configurations being modified properties: destinationDetails: - description: Configuration details of either a CloudWatch - Logs destination or Kinesis Data Firehose destination. + description: |- + Configuration details of either a CloudWatch Logs destination or Kinesis + Data Firehose destination. properties: cloudWatchLogsDetails: description: The configuration details of the CloudWatch @@ -738,12 +918,14 @@ spec: format: date-time type: string snapshottingClusterID: - description: The cluster ID that is used as the daily snapshot source - for the replication group. + description: |- + The cluster ID that is used as the daily snapshot source for the replication + group. type: string status: - description: The current state of this replication group - creating, - available, modifying, deleting, create-failed, snapshotting. + description: |- + The current state of this replication group - creating, available, modifying, + deleting, create-failed, snapshotting. type: string type: object type: object diff --git a/helm/crds/elasticache.services.k8s.aws_serverlesscaches.yaml b/helm/crds/elasticache.services.k8s.aws_serverlesscaches.yaml new file mode 100644 index 00000000..0024598f --- /dev/null +++ b/helm/crds/elasticache.services.k8s.aws_serverlesscaches.yaml @@ -0,0 +1,325 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: serverlesscaches.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: ServerlessCache + listKind: ServerlessCacheList + plural: serverlesscaches + singular: serverlesscache + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .status.endpoint.address + name: ENDPOINT + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServerlessCache is the Schema for the ServerlessCaches API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + ServerlessCacheSpec defines the desired state of ServerlessCache. + + The resource representing a serverless cache. + properties: + cacheUsageLimits: + description: |- + Sets the cache usage limits for storage and ElastiCache Processing Units + for the cache. + properties: + dataStorage: + description: The data storage limit. + properties: + maximum: + format: int64 + type: integer + minimum: + format: int64 + type: integer + unit: + type: string + type: object + eCPUPerSecond: + description: |- + The configuration for the number of ElastiCache Processing Units (ECPU) the + cache can consume per second. + properties: + maximum: + format: int64 + type: integer + minimum: + format: int64 + type: integer + type: object + type: object + dailySnapshotTime: + description: |- + The daily time that snapshots will be created from the new serverless cache. + By default this number is populated with 0, i.e. no snapshots will be created + on an automatic daily basis. Available for Valkey, Redis OSS and Serverless + Memcached only. + type: string + description: + description: |- + User-provided description for the serverless cache. The default is NULL, + i.e. if no description is provided then an empty string will be returned. + The maximum length is 255 characters. + type: string + engine: + description: The name of the cache engine to be used for creating + the serverless cache. + type: string + kmsKeyID: + description: |- + ARN of the customer managed key for encrypting the data at rest. If no KMS + key is provided, a default service key is used. + type: string + majorEngineVersion: + description: |- + The version of the cache engine that will be used to create the serverless + cache. + type: string + securityGroupIDs: + description: |- + A list of the one or more VPC security groups to be associated with the serverless + cache. The security group will authorize traffic access for the VPC end-point + (private-link). If no other information is given this will be the VPC’s + Default Security Group that is associated with the cluster VPC end-point. + items: + type: string + type: array + securityGroupRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array + serverlessCacheName: + description: |- + User-provided identifier for the serverless cache. This parameter is stored + as a lowercase string. + type: string + snapshotARNsToRestore: + description: |- + The ARN(s) of the snapshot that the new serverless cache will be created + from. Available for Valkey, Redis OSS and Serverless Memcached only. + items: + type: string + type: array + snapshotRetentionLimit: + description: |- + The number of snapshots that will be retained for the serverless cache that + is being created. As new snapshots beyond this limit are added, the oldest + snapshots will be deleted on a rolling basis. Available for Valkey, Redis + OSS and Serverless Memcached only. + format: int64 + type: integer + subnetIDs: + description: |- + A list of the identifiers of the subnets where the VPC endpoint for the serverless + cache will be deployed. All the subnetIds must belong to the same VPC. + items: + type: string + type: array + subnetRefs: + items: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + type: array + tags: + description: |- + The list of tags (key, value) pairs to be added to the serverless cache resource. + Default is NULL. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + userGroupID: + description: |- + The identifier of the UserGroup to be associated with the serverless cache. + Available for Valkey and Redis OSS only. Default is NULL. + type: string + required: + - engine + - serverlessCacheName + type: object + status: + description: ServerlessCacheStatus defines the observed state of ServerlessCache + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + createTime: + description: When the serverless cache was created. + format: date-time + type: string + endpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + fullEngineVersion: + description: |- + The name and version number of the engine the serverless cache is compatible + with. + type: string + readerEndpoint: + description: |- + Represents the information required for client programs to connect to a cache + node. This value is read-only. + properties: + address: + type: string + port: + format: int64 + type: integer + type: object + status: + description: |- + The current status of the serverless cache. The allowed values are CREATING, + AVAILABLE, DELETING, CREATE-FAILED and MODIFYING. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm/crds/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml b/helm/crds/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml new file mode 100644 index 00000000..d75a44de --- /dev/null +++ b/helm/crds/elasticache.services.k8s.aws_serverlesscachesnapshots.yaml @@ -0,0 +1,251 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.2 + name: serverlesscachesnapshots.elasticache.services.k8s.aws +spec: + group: elasticache.services.k8s.aws + names: + kind: ServerlessCacheSnapshot + listKind: ServerlessCacheSnapshotList + plural: serverlesscachesnapshots + singular: serverlesscachesnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: STATUS + type: string + - jsonPath: .status.conditions[?(@.type=="ACK.ResourceSynced")].status + name: Synced + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: ServerlessCacheSnapshot is the Schema for the ServerlessCacheSnapshots + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + ServerlessCacheSnapshotSpec defines the desired state of ServerlessCacheSnapshot. + + The resource representing a serverless cache snapshot. Available for Valkey, + Redis OSS and Serverless Memcached only. + properties: + kmsKeyID: + description: |- + The ID of the KMS key used to encrypt the snapshot. Available for Valkey, + Redis OSS and Serverless Memcached only. Default: NULL + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + kmsKeyRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + serverlessCacheName: + description: |- + The name of an existing serverless cache. The snapshot is created from this + cache. Available for Valkey, Redis OSS and Serverless Memcached only. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + serverlessCacheRef: + description: "AWSResourceReferenceWrapper provides a wrapper around + *AWSResourceReference\ntype to provide more user friendly syntax + for references using 'from' field\nEx:\nAPIIDRef:\n\n\tfrom:\n\t + \ name: my-api" + properties: + from: + description: |- + AWSResourceReference provides all the values necessary to reference another + k8s resource for finding the identifier(Id/ARN/Name) + properties: + name: + type: string + namespace: + type: string + type: object + type: object + serverlessCacheSnapshotName: + description: |- + The name for the snapshot being created. Must be unique for the customer + account. Available for Valkey, Redis OSS and Serverless Memcached only. Must + be between 1 and 255 characters. + type: string + x-kubernetes-validations: + - message: Value is immutable once set + rule: self == oldSelf + tags: + description: |- + A list of tags to be added to the snapshot resource. A tag is a key-value + pair. Available for Valkey, Redis OSS and Serverless Memcached only. + items: + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value + is permitted. + properties: + key: + type: string + value: + type: string + type: object + type: array + required: + - serverlessCacheSnapshotName + type: object + status: + description: ServerlessCacheSnapshotStatus defines the observed state + of ServerlessCacheSnapshot + properties: + ackResourceMetadata: + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, + constructed ARN for the resource + properties: + arn: + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 + type: string + ownerAccountID: + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. + type: string + region: + description: Region is the AWS region in which the resource exists + or will exist. + type: string + required: + - ownerAccountID + - region + type: object + bytesUsedForCache: + description: |- + The total size of a serverless cache snapshot, in bytes. Available for Valkey, + Redis OSS and Serverless Memcached only. + type: string + conditions: + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource + items: + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type is the type of the Condition + type: string + required: + - status + - type + type: object + type: array + createTime: + description: |- + The date and time that the source serverless cache's metadata and cache data + set was obtained for the snapshot. Available for Valkey, Redis OSS and Serverless + Memcached only. + format: date-time + type: string + expiryTime: + description: |- + The time that the serverless cache snapshot will expire. Available for Valkey, + Redis OSS and Serverless Memcached only. + format: date-time + type: string + serverlessCacheConfiguration: + description: |- + The configuration of the serverless cache, at the time the snapshot was taken. + Available for Valkey, Redis OSS and Serverless Memcached only. + properties: + engine: + type: string + majorEngineVersion: + type: string + serverlessCacheName: + type: string + type: object + snapshotType: + description: |- + The type of snapshot of serverless cache. Available for Valkey, Redis OSS + and Serverless Memcached only. + type: string + status: + description: |- + The current status of the serverless cache. Available for Valkey, Redis OSS + and Serverless Memcached only. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm/crds/elasticache.services.k8s.aws_snapshots.yaml b/helm/crds/elasticache.services.k8s.aws_snapshots.yaml index 1f17abbe..fb2c9b28 100644 --- a/helm/crds/elasticache.services.k8s.aws_snapshots.yaml +++ b/helm/crds/elasticache.services.k8s.aws_snapshots.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: snapshots.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,32 +20,41 @@ spec: description: Snapshot is the Schema for the Snapshots API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: "SnapshotSpec defines the desired state of Snapshot. \n Represents - a copy of an entire Redis cluster as of the time when the snapshot was - taken." + description: |- + SnapshotSpec defines the desired state of Snapshot. + + Represents a copy of an entire Valkey or Redis OSS cluster as of the time + when the snapshot was taken. properties: cacheClusterID: - description: The identifier of an existing cluster. The snapshot is - created from this cluster. + description: |- + The identifier of an existing cluster. The snapshot is created from this + cluster. type: string kmsKeyID: description: The ID of the KMS key used to encrypt the snapshot. type: string replicationGroupID: - description: The identifier of an existing replication group. The - snapshot is created from this replication group. + description: |- + The identifier of an existing replication group. The snapshot is created + from this replication group. type: string snapshotName: description: A name for the snapshot being created. @@ -56,16 +64,16 @@ spec: copy. type: string tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -81,24 +89,25 @@ spec: description: SnapshotStatus defines the observed state of Snapshot properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -109,69 +118,80 @@ spec: - region type: object autoMinorVersionUpgrade: - description: If you are running Redis engine version 6.0 or later, - set this parameter to yes if you want to opt-in to the next auto - minor version upgrade campaign. This parameter is disabled for previous - versions. + description: |- + If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and + above, set this parameter to yes if you want to opt-in to the next auto minor + version upgrade campaign. This parameter is disabled for previous versions. type: boolean automaticFailover: - description: Indicates the status of automatic failover for the source - Redis replication group. + description: |- + Indicates the status of automatic failover for the source Valkey or Redis + OSS replication group. type: string cacheClusterCreateTime: description: The date and time when the source cluster was created. format: date-time type: string cacheNodeType: - description: "The name of the compute and memory capacity node type - for the source cluster. \n The following node types are supported - by ElastiCache. Generally speaking, the current generation types - provide more memory and computational power at lower cost when compared - to their equivalent previous generation counterparts. \n * General - purpose: Current generation: M6g node types (available only for - Redis engine version 5.0.6 onward and for Memcached engine version - 1.5.16 onward). cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, - cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: - cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, - cache.m4.10xlarge T4g node types (available only for Redis engine - version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, - cache.t2.small, cache.t2.medium Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) T1 node types: cache.t1.micro - M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, - cache.m3.xlarge, cache.m3.2xlarge \n * Compute optimized: Previous - generation: (not recommended. Existing clusters are still supported - but creation of new clusters is not supported for these types.) - C1 node types: cache.c1.xlarge \n * Memory optimized with data tiering: - Current generation: R6gd node types (available only for Redis engine - version 6.2 onward). cache.r6gd.xlarge, cache.r6gd.2xlarge, cache.r6gd.4xlarge, - cache.r6gd.8xlarge, cache.r6gd.12xlarge, cache.r6gd.16xlarge \n - * Memory optimized: Current generation: R6g node types (available - only for Redis engine version 5.0.6 onward and for Memcached engine - version 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, - cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - For region availability, see Supported Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) - R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. - Existing clusters are still supported but creation of new clusters - is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge - \n Additional node type info \n * All current generation instance - types are created in Amazon VPC by default. \n * Redis append-only - files (AOF) are not supported for T1 or T2 instances. \n * Redis - Multi-AZ with automatic failover is not supported on T1 instances. - \n * Redis configuration variables appendonly and appendfsync are - not supported on Redis version 2.8.22 and later." + description: |- + The name of the compute and memory capacity node type for the source cluster. + + The following node types are supported by ElastiCache. Generally speaking, + the current generation types provide more memory and computational power + at lower cost when compared to their equivalent previous generation counterparts. + + * General purpose: Current generation: M7g node types: cache.m7g.large, + cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, + cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + M6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + T4g node types (available only for Redis OSS engine version 5.0.6 onward + and Memcached engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, + cache.t4g.medium T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium + T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous + generation: (not recommended. Existing clusters are still supported but + creation of new clusters is not supported for these types.) T1 node types: + cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, + cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, + cache.m3.2xlarge + + * Compute optimized: Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) C1 node types: cache.c1.xlarge + + * Memory optimized: Current generation: R7g node types: cache.r7g.large, + cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, + cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see Supported + Node Types (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion) + R6g node types (available only for Redis OSS engine version 5.0.6 onward + and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, + cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, + cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for + these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + cache.r3.8xlarge + + Additional node type info + + * All current generation instance types are created in Amazon VPC by default. + + * Valkey or Redis OSS append-only files (AOF) are not supported for T1 + or T2 instances. + + * Valkey or Redis OSS Multi-AZ with automatic failover is not supported + on T1 instances. + + * The configuration variables appendonly and appendfsync are not supported + on Valkey, or on Redis OSS version 2.8.22 and later. type: string cacheParameterGroupName: description: The cache parameter group that is associated with the @@ -182,14 +202,16 @@ spec: source cluster. type: string conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -215,10 +237,10 @@ spec: type: object type: array dataTiering: - description: Enables data tiering. Data tiering is only supported - for replication groups using the r6gd node type. This parameter - must be set to true when using r6gd nodes. For more information, - see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html). + description: |- + Enables data tiering. Data tiering is only supported for replication groups + using the r6gd node type. This parameter must be set to true when using r6gd + nodes. For more information, see Data tiering (https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/data-tiering.html). type: string engine: description: The name of the cache engine (memcached or redis) used @@ -244,9 +266,10 @@ spec: cacheSize: type: string nodeGroupConfiguration: - description: 'Node group (shard) configuration options. Each - node group (shard) configuration has the following: Slots, - PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.' + description: |- + Node group (shard) configuration options. Each node group (shard) configuration + has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, + ReplicaCount. properties: nodeGroupID: type: string @@ -276,16 +299,18 @@ spec: type: object type: array numCacheNodes: - description: "The number of cache nodes in the source cluster. \n - For clusters running Redis, this value must be 1. For clusters running - Memcached, this value must be between 1 and 40." + description: |- + The number of cache nodes in the source cluster. + + For clusters running Valkey or Redis OSS, this value must be 1. For clusters + running Memcached, this value must be between 1 and 40. format: int64 type: integer numNodeGroups: - description: The number of node groups (shards) in this snapshot. - When restoring from a snapshot, the number of node groups (shards) - in the snapshot and in the restored replication group must be the - same. + description: |- + The number of node groups (shards) in this snapshot. When restoring from + a snapshot, the number of node groups (shards) in the snapshot and in the + restored replication group must be the same. format: int64 type: integer port: @@ -298,12 +323,28 @@ spec: cluster is located. type: string preferredMaintenanceWindow: - description: "Specifies the weekly time range during which maintenance - on the cluster is performed. It is specified as a range in the format - ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance - window is a 60 minute period. \n Valid values for ddd are: \n * - sun \n * mon \n * tue \n * wed \n * thu \n * fri \n * sat \n Example: - sun:23:00-mon:01:30" + description: |- + Specifies the weekly time range during which maintenance on the cluster is + performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + (24H Clock UTC). The minimum maintenance window is a 60 minute period. + + Valid values for ddd are: + + * sun + + * mon + + * tue + + * wed + + * thu + + * fri + + * sat + + Example: sun:23:00-mon:01:30 type: string preferredOutpostARN: description: The ARN (Amazon Resource Name) of the preferred outpost. @@ -312,34 +353,43 @@ spec: description: A description of the source replication group. type: string snapshotRetentionLimit: - description: "For an automatic snapshot, the number of days for which - ElastiCache retains the snapshot before deleting it. \n For manual - snapshots, this field reflects the SnapshotRetentionLimit for the - source cluster when the snapshot was created. This field is otherwise - ignored: Manual snapshots do not expire, and can only be deleted - using the DeleteSnapshot operation. \n Important If the value of - SnapshotRetentionLimit is set to zero (0), backups are turned off." + description: |- + For an automatic snapshot, the number of days for which ElastiCache retains + the snapshot before deleting it. + + For manual snapshots, this field reflects the SnapshotRetentionLimit for + the source cluster when the snapshot was created. This field is otherwise + ignored: Manual snapshots do not expire, and can only be deleted using the + DeleteSnapshot operation. + + Important If the value of SnapshotRetentionLimit is set to zero (0), backups + are turned off. format: int64 type: integer snapshotSource: - description: Indicates whether the snapshot is from an automatic backup - (automated) or was created manually (manual). + description: |- + Indicates whether the snapshot is from an automatic backup (automated) or + was created manually (manual). type: string snapshotStatus: - description: 'The status of the snapshot. Valid values: creating | - available | restoring | copying | deleting.' + description: |- + The status of the snapshot. Valid values: creating | available | restoring + | copying | deleting. type: string snapshotWindow: - description: The daily time range during which ElastiCache takes daily - snapshots of the source cluster. + description: |- + The daily time range during which ElastiCache takes daily snapshots of the + source cluster. type: string topicARN: - description: The Amazon Resource Name (ARN) for the topic used by - the source cluster for publishing notifications. + description: |- + The Amazon Resource Name (ARN) for the topic used by the source cluster for + publishing notifications. type: string vpcID: - description: The Amazon Virtual Private Cloud identifier (VPC ID) - of the cache subnet group for the source cluster. + description: |- + The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + group for the source cluster. type: string type: object type: object diff --git a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml index e76b3d28..c3a4cc92 100644 --- a/helm/crds/elasticache.services.k8s.aws_usergroups.yaml +++ b/helm/crds/elasticache.services.k8s.aws_usergroups.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: usergroups.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,33 +20,42 @@ spec: description: UserGroup is the Schema for the UserGroups API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: engine: - description: The current supported value is Redis. + description: |- + The current supported value is Redis user. + + Regex Pattern: `^[a-zA-Z]*$` type: string tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. Available + for Valkey and Redis OSS only. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -72,24 +80,25 @@ spec: description: UserGroupStatus defines the observed state of UserGroup properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -100,14 +109,16 @@ spec: - region type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -133,7 +144,8 @@ spec: type: object type: array minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string pendingChanges: description: A list of updates being applied to the user group. diff --git a/helm/crds/elasticache.services.k8s.aws_users.yaml b/helm/crds/elasticache.services.k8s.aws_users.yaml index a148c668..f07896c8 100644 --- a/helm/crds/elasticache.services.k8s.aws_users.yaml +++ b/helm/crds/elasticache.services.k8s.aws_users.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: users.elasticache.services.k8s.aws spec: group: elasticache.services.k8s.aws @@ -21,44 +20,57 @@ spec: description: User is the Schema for the Users API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: accessString: - description: Access permissions string used for this user. + description: |- + Access permissions string used for this user. + + Regex Pattern: `\S` type: string engine: - description: The current supported value is Redis. + description: |- + The current supported value is Redis. + + Regex Pattern: `^[a-zA-Z]*$` type: string noPasswordRequired: description: Indicates a password is not required for this user. type: boolean passwords: - description: Passwords used for this user. You can create up to two - passwords for each user. + description: |- + Passwords used for this user. You can create up to two passwords for each + user. items: - description: SecretKeyReference combines a k8s corev1.SecretReference - with a specific key within the referred-to Secret + description: |- + SecretKeyReference combines a k8s corev1.SecretReference with a + specific key within the referred-to Secret properties: key: description: Key is the key within the secret type: string name: - description: Name is unique within a namespace to reference + description: name is unique within a namespace to reference a secret resource. type: string namespace: - description: Namespace defines the space within which the secret + description: namespace defines the space within which the secret name must be unique. type: string required: @@ -67,16 +79,16 @@ spec: x-kubernetes-map-type: atomic type: array tags: - description: A list of tags to be added to this resource. A tag is - a key-value pair. A tag key must be accompanied by a tag value, - although null is accepted. + description: |- + A list of tags to be added to this resource. A tag is a key-value pair. A + tag key must be accompanied by a tag value, although null is accepted. items: - description: A tag that can be added to an ElastiCache cluster or - replication group. Tags are composed of a Key/Value pair. You - can use tags to categorize and track all your ElastiCache resources, - with the exception of global replication group. When you add or - remove tags on replication groups, those actions will be replicated - to all nodes in the replication group. A tag with a null Value + description: |- + A tag that can be added to an ElastiCache cluster or replication group. Tags + are composed of a Key/Value pair. You can use tags to categorize and track + all your ElastiCache resources, with the exception of global replication + group. When you add or remove tags on replication groups, those actions will + be replicated to all nodes in the replication group. A tag with a null Value is permitted. properties: key: @@ -86,7 +98,10 @@ spec: type: object type: array userID: - description: The ID of the user. + description: |- + The ID of the user. + + Regex Pattern: `^[a-zA-Z][a-zA-Z0-9\-]*$` type: string userName: description: The username of the user. @@ -101,24 +116,25 @@ spec: description: UserStatus defines the observed state of User properties: ackResourceMetadata: - description: All CRs managed by ACK have a common `Status.ACKResourceMetadata` - member that is used to contain resource sync state, account ownership, + description: |- + All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + that is used to contain resource sync state, account ownership, constructed ARN for the resource properties: arn: - description: 'ARN is the Amazon Resource Name for the resource. - This is a globally-unique identifier and is set only by the - ACK service controller once the controller has orchestrated - the creation of the resource OR when it has verified that an - "adopted" resource (a resource where the ARN annotation was - set by the Kubernetes user on the CR) exists and matches the - supplied CR''s Spec field values. TODO(vijat@): Find a better - strategy for resources that do not have ARN in CreateOutputResponse - https://github.com/aws/aws-controllers-k8s/issues/270' + description: |- + ARN is the Amazon Resource Name for the resource. This is a + globally-unique identifier and is set only by the ACK service controller + once the controller has orchestrated the creation of the resource OR + when it has verified that an "adopted" resource (a resource where the + ARN annotation was set by the Kubernetes user on the CR) exists and + matches the supplied CR's Spec field values. + https://github.com/aws/aws-controllers-k8s/issues/270 type: string ownerAccountID: - description: OwnerAccountID is the AWS Account ID of the account - that owns the backend AWS service API resource. + description: |- + OwnerAccountID is the AWS Account ID of the account that owns the + backend AWS service API resource. type: string region: description: Region is the AWS region in which the resource exists @@ -138,14 +154,16 @@ spec: type: string type: object conditions: - description: All CRS managed by ACK have a common `Status.Conditions` - member that contains a collection of `ackv1alpha1.Condition` objects - that describe the various terminal states of the CR and its backend - AWS service API resource + description: |- + All CRs managed by ACK have a common `Status.Conditions` member that + contains a collection of `ackv1alpha1.Condition` objects that describe + the various terminal states of the CR and its backend AWS service API + resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status @@ -177,7 +195,8 @@ spec: description: Access permissions string used for this user. type: string minimumEngineVersion: - description: The minimum engine version required, which is Redis 6.0 + description: The minimum engine version required, which is Redis OSS + 6.0 type: string status: description: Indicates the user status. Can be "active", "modifying" diff --git a/helm/crds/services.k8s.aws_adoptedresources.yaml b/helm/crds/services.k8s.aws_adoptedresources.yaml index 7dca541d..b7be3224 100644 --- a/helm/crds/services.k8s.aws_adoptedresources.yaml +++ b/helm/crds/services.k8s.aws_adoptedresources.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: adoptedresources.services.k8s.aws spec: group: services.k8s.aws @@ -21,14 +20,19 @@ spec: description: AdoptedResource is the schema for the AdoptedResource API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -42,126 +46,144 @@ spec: additionalKeys: additionalProperties: type: string - description: AdditionalKeys represents any additional arbitrary - identifiers used when describing the target resource. + description: |- + AdditionalKeys represents any additional arbitrary identifiers used when + describing the target resource. type: object arn: - description: ARN is the AWS Resource Name for the resource. It - is a globally unique identifier. + description: |- + ARN is the AWS Resource Name for the resource. It is a globally + unique identifier. type: string nameOrID: - description: NameOrId is a user-supplied string identifier for - the resource. It may or may not be globally unique, depending - on the type of resource. + description: |- + NameOrId is a user-supplied string identifier for the resource. It may + or may not be globally unique, depending on the type of resource. type: string type: object kubernetes: - description: ResourceWithMetadata provides the values necessary to - create a Kubernetes resource and override any of its metadata values. + description: |- + ResourceWithMetadata provides the values necessary to create a + Kubernetes resource and override any of its metadata values. properties: group: type: string kind: type: string metadata: - description: "ObjectMeta is metadata that all persisted resources - must have, which includes all objects users must create. It - is not possible to use `metav1.ObjectMeta` inside spec, as the - controller-gen automatically converts this to an arbitrary string-string - map. https://github.com/kubernetes-sigs/controller-tools/issues/385 - \n Active discussion about inclusion of this field in the spec - is happening in this PR: https://github.com/kubernetes-sigs/controller-tools/pull/395 - \n Until this is allowed, or if it never is, we will produce - a subset of the object meta that contains only the fields which - the user is allowed to modify in the metadata." + description: |- + ObjectMeta is metadata that all persisted resources must have, which includes all objects + users must create. + It is not possible to use `metav1.ObjectMeta` inside spec, as the controller-gen + automatically converts this to an arbitrary string-string map. + https://github.com/kubernetes-sigs/controller-tools/issues/385 + + Active discussion about inclusion of this field in the spec is happening in this PR: + https://github.com/kubernetes-sigs/controller-tools/pull/395 + + Until this is allowed, or if it never is, we will produce a subset of the object meta + that contains only the fields which the user is allowed to modify in the metadata. properties: annotations: additionalProperties: type: string - description: 'Annotations is an unstructured key value map - stored with a resource that may be set by external tools - to store and retrieve arbitrary metadata. They are not queryable - and should be preserved when modifying objects. More info: - http://kubernetes.io/docs/user-guide/annotations' + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations type: object generateName: - description: "GenerateName is an optional prefix, used by - the server, to generate a unique name ONLY IF the Name field - has not been provided. If this field is used, the name returned - to the client will be different than the name passed. This - value will also be combined with a unique suffix. The provided - value has the same validation rules as the Name field, and - may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is - specified and the generated name exists, the server will - NOT return a 409 - instead, it will either return 201 Created - or 500 with Reason ServerTimeout indicating a unique name - could not be found in the time allotted, and the client - should retry (optionally after the time indicated in the - Retry-After header). \n Applied only if Name is not specified. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + description: |- + GenerateName is an optional prefix, used by the server, to generate a unique + name ONLY IF the Name field has not been provided. + If this field is used, the name returned to the client will be different + than the name passed. This value will also be combined with a unique suffix. + The provided value has the same validation rules as the Name field, + and may be truncated by the length of the suffix required to make the value + unique on the server. + + If this field is specified and the generated name exists, the server will + NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + ServerTimeout indicating a unique name could not be found in the time allotted, and the client + should retry (optionally after the time indicated in the Retry-After header). + + Applied only if Name is not specified. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency type: string labels: additionalProperties: type: string - description: 'Map of string keys and values that can be used - to organize and categorize (scope and select) objects. May - match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels' + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels type: object name: - description: 'Name must be unique within a namespace. Is required - when creating resources, although some resources may allow - a client to request the generation of an appropriate name - automatically. Name is primarily intended for creation idempotence - and configuration definition. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name must be unique within a namespace. Is required when creating resources, although + some resources may allow a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence and configuration + definition. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names type: string namespace: - description: "Namespace defines the space within each name - must be unique. An empty namespace is equivalent to the - \"default\" namespace, but \"default\" is the canonical - representation. Not all objects are required to be scoped - to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. - More info: http://kubernetes.io/docs/user-guide/namespaces" + description: |- + Namespace defines the space within each name must be unique. An empty namespace is + equivalent to the "default" namespace, but "default" is the canonical representation. + Not all objects are required to be scoped to a namespace - the value of this field for + those objects will be empty. + + Must be a DNS_LABEL. + Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/namespaces type: string ownerReferences: - description: List of objects depended by this object. If ALL - objects in the list have been deleted, this object will - be garbage collected. If this object is managed by a controller, - then an entry in this list will point to this controller, - with the controller field set to true. There cannot be more - than one managing controller. + description: |- + List of objects depended by this object. If ALL objects in the list have + been deleted, this object will be garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, with the controller field set to true. + There cannot be more than one managing controller. items: - description: OwnerReference contains enough information - to let you identify an owning object. An owning object - must be in the same namespace as the dependent, or be - cluster-scoped, so there is no namespace field. + description: |- + OwnerReference contains enough information to let you identify an owning + object. An owning object must be in the same namespace as the dependent, or + be cluster-scoped, so there is no namespace field. properties: apiVersion: description: API version of the referent. type: string blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the - key-value store until this reference is removed. Defaults - to false. To set this field, a user needs "delete" - permission of the owner, otherwise 422 (Unprocessable - Entity) will be returned. + description: |- + If true, AND if the owner has the "foregroundDeletion" finalizer, then + the owner cannot be deleted from the key-value store until this + reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this field and enforces the foreground deletion. + Defaults to false. + To set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. type: boolean controller: description: If true, this reference points to the managing controller. type: boolean kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names type: string uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids type: string required: - apiVersion @@ -185,13 +207,14 @@ spec: AdoptedResource. properties: conditions: - description: A collection of `ackv1alpha1.Condition` objects that - describe the various terminal states of the adopted resource CR - and its target custom resource + description: |- + A collection of `ackv1alpha1.Condition` objects that describe the various + terminal states of the adopted resource CR and its target custom resource items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status diff --git a/helm/crds/services.k8s.aws_fieldexports.yaml b/helm/crds/services.k8s.aws_fieldexports.yaml index 4a7ab61b..49b4f383 100644 --- a/helm/crds/services.k8s.aws_fieldexports.yaml +++ b/helm/crds/services.k8s.aws_fieldexports.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.2 name: fieldexports.services.k8s.aws spec: group: services.k8s.aws @@ -21,14 +20,19 @@ spec: description: FieldExport is the schema for the FieldExport API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -36,15 +40,17 @@ spec: description: FieldExportSpec defines the desired state of the FieldExport. properties: from: - description: ResourceFieldSelector provides the values necessary to - identify an individual field on an individual K8s resource. + description: |- + ResourceFieldSelector provides the values necessary to identify an individual + field on an individual K8s resource. properties: path: type: string resource: - description: NamespacedResource provides all the values necessary - to identify an ACK resource of a given type (within the same - namespace as the custom resource containing this type). + description: |- + NamespacedResource provides all the values necessary to identify an ACK + resource of a given type (within the same namespace as the custom resource + containing this type). properties: group: type: string @@ -62,16 +68,18 @@ spec: - resource type: object to: - description: FieldExportTarget provides the values necessary to identify - the output path for a field export. + description: |- + FieldExportTarget provides the values necessary to identify the + output path for a field export. properties: key: description: Key overrides the default value (`.`) for the FieldExport target type: string kind: - description: FieldExportOutputType represents all types that can - be produced by a field export operation + description: |- + FieldExportOutputType represents all types that can be produced by a field + export operation enum: - configmap - secret @@ -94,12 +102,14 @@ spec: description: FieldExportStatus defines the observed status of the FieldExport. properties: conditions: - description: A collection of `ackv1alpha1.Condition` objects that - describe the various recoverable states of the field CR + description: |- + A collection of `ackv1alpha1.Condition` objects that describe the various + recoverable states of the field CR items: - description: Condition is the common struct used by all CRDs managed - by ACK service controllers to indicate terminal states of the - CR and its backend AWS service API resource + description: |- + Condition is the common struct used by all CRDs managed by ACK service + controllers to indicate terminal states of the CR and its backend AWS + service API resource properties: lastTransitionTime: description: Last time the condition transitioned from one status diff --git a/helm/templates/NOTES.txt b/helm/templates/NOTES.txt index 803c88a2..e423c3fb 100644 --- a/helm/templates/NOTES.txt +++ b/helm/templates/NOTES.txt @@ -1,5 +1,5 @@ {{ .Chart.Name }} has been installed. -This chart deploys "public.ecr.aws/aws-controllers-k8s/elasticache-controller:v0.0.22". +This chart deploys "public.ecr.aws/aws-controllers-k8s/elasticache-controller:1.2.3". Check its status by running: kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}" diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl index 973704c1..f08a46d4 100644 --- a/helm/templates/_helpers.tpl +++ b/helm/templates/_helpers.tpl @@ -1,5 +1,5 @@ {{/* The name of the application this chart installs */}} -{{- define "app.name" -}} +{{- define "ack-elasticache-controller.app.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -8,7 +8,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "app.fullname" -}} +{{- define "ack-elasticache-controller.app.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} @@ -22,27 +22,144 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{/* The name and version as used by the chart label */}} -{{- define "chart.name-version" -}} +{{- define "ack-elasticache-controller.chart.name-version" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* The name of the service account to use */}} -{{- define "service-account.name" -}} +{{- define "ack-elasticache-controller.service-account.name" -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} -{{- define "watch-namespace" -}} +{{- define "ack-elasticache-controller.watch-namespace" -}} {{- if eq .Values.installScope "namespace" -}} -{{- .Release.Namespace -}} +{{ .Values.watchNamespace | default .Release.Namespace }} {{- end -}} {{- end -}} {{/* The mount path for the shared credentials file */}} -{{- define "aws.credentials.secret_mount_path" -}} +{{- define "ack-elasticache-controller.aws.credentials.secret_mount_path" -}} {{- "/var/run/secrets/aws" -}} {{- end -}} {{/* The path the shared credentials file is mounted */}} -{{- define "aws.credentials.path" -}} -{{- printf "%s/%s" (include "aws.credentials.secret_mount_path" .) .Values.aws.credentials.secretKey -}} +{{- define "ack-elasticache-controller.aws.credentials.path" -}} +{{ $secret_mount_path := include "ack-elasticache-controller.aws.credentials.secret_mount_path" . }} +{{- printf "%s/%s" $secret_mount_path .Values.aws.credentials.secretKey -}} +{{- end -}} + +{{/* The rules a of ClusterRole or Role */}} +{{- define "ack-elasticache-controller.rbac-rules" -}} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - ec2.services.k8s.aws + resources: + - securitygroups + - securitygroups/status + - subnets + - subnets/status + verbs: + - get + - list +- apiGroups: + - elasticache.services.k8s.aws + resources: + - cacheclusters + - cacheparametergroups + - cachesubnetgroups + - replicationgroups + - serverlesscaches + - serverlesscachesnapshots + - snapshots + - usergroups + - users + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - elasticache.services.k8s.aws + resources: + - cacheclusters/status + - cacheparametergroups/status + - cachesubnetgroups/status + - replicationgroups/status + - serverlesscaches/status + - serverlesscachesnapshots/status + - snapshots/status + - usergroups/status + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - kms.services.k8s.aws + resources: + - keys + - keys/status + verbs: + - get + - list +- apiGroups: + - services.k8s.aws + resources: + - adoptedresources + - fieldexports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - services.k8s.aws + resources: + - adoptedresources/status + - fieldexports/status + verbs: + - get + - patch + - update +- apiGroups: + - sns.services.k8s.aws + resources: + - topics + - topics/status + verbs: + - get + - list +{{- end }} + +{{/* Convert k/v map to string like: "key1=value1,key2=value2,..." */}} +{{- define "ack-elasticache-controller.feature-gates" -}} +{{- $list := list -}} +{{- range $k, $v := .Values.featureGates -}} +{{- $list = append $list (printf "%s=%s" $k ( $v | toString)) -}} +{{- end -}} +{{ join "," $list }} {{- end -}} diff --git a/helm/templates/caches-role-binding.yaml b/helm/templates/caches-role-binding.yaml new file mode 100644 index 00000000..3205ee2e --- /dev/null +++ b/helm/templates/caches-role-binding.yaml @@ -0,0 +1,40 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ack-elasticache-controller.app.fullname" . }}-namespaces-cache + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} +roleRef: + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io + name: {{ include "ack-elasticache-controller.app.fullname" . }}-namespaces-cache +subjects: +- kind: ServiceAccount + name: {{ include "ack-elasticache-controller.service-account.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ack-elasticache-controller.app.fullname" . }}-configmaps-cache + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ include "ack-elasticache-controller.app.fullname" . }}-configmaps-cache +subjects: +- kind: ServiceAccount + name: {{ include "ack-elasticache-controller.service-account.name" . }} + namespace: {{ .Release.Namespace }} diff --git a/helm/templates/caches-role.yaml b/helm/templates/caches-role.yaml new file mode 100644 index 00000000..276817b1 --- /dev/null +++ b/helm/templates/caches-role.yaml @@ -0,0 +1,42 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ack-elasticache-controller.app.fullname" . }}-namespaces-cache + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ack-elasticache-controller.app.fullname" . }}-configmaps-cache + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/helm/templates/cluster-role-binding.yaml b/helm/templates/cluster-role-binding.yaml index 6b8a3178..61357245 100644 --- a/helm/templates/cluster-role-binding.yaml +++ b/helm/templates/cluster-role-binding.yaml @@ -1,21 +1,52 @@ -apiVersion: rbac.authorization.k8s.io/v1 {{ if eq .Values.installScope "cluster" }} +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: {{ include "app.fullname" . }} + name: {{ include "ack-elasticache-controller.app.fullname" . }}-rolebinding + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} roleRef: kind: ClusterRole -{{ else }} + apiGroup: rbac.authorization.k8s.io + name: {{ include "ack-elasticache-controller.app.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ include "ack-elasticache-controller.service-account.name" . }} + namespace: {{ .Release.Namespace }} +{{ else if eq .Values.installScope "namespace" }} +{{ $wn := include "ack-elasticache-controller.watch-namespace" . }} +{{ $namespaces := split "," $wn }} +{{ $fullname := include "ack-elasticache-controller.app.fullname" . }} +{{ $releaseNamespace := .Release.Namespace }} +{{ $serviceAccountName := include "ack-elasticache-controller.service-account.name" . }} +{{ $chartVersion := include "ack-elasticache-controller.chart.name-version" . }} +{{ $appVersion := .Chart.AppVersion | quote }} +{{ range $namespaces }} +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ include "app.fullname" . }} - namespace: {{ .Release.Namespace }} + name: {{ $fullname }}-{{ . }} + namespace: {{ . }} + labels: + app.kubernetes.io/name: {{ $fullname }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ $appVersion }} + k8s-app: {{ $fullname }} + helm.sh/chart: {{ $chartVersion }} roleRef: kind: Role -{{ end }} apiGroup: rbac.authorization.k8s.io - name: ack-elasticache-controller + name: {{ $fullname }}-{{ . }} subjects: - kind: ServiceAccount - name: {{ include "service-account.name" . }} - namespace: {{ .Release.Namespace }} + name: {{ $serviceAccountName }} + namespace: {{ $releaseNamespace }} +{{ end }} +{{ end }} \ No newline at end of file diff --git a/helm/templates/cluster-role-controller.yaml b/helm/templates/cluster-role-controller.yaml index 4b59a55e..489b841e 100644 --- a/helm/templates/cluster-role-controller.yaml +++ b/helm/templates/cluster-role-controller.yaml @@ -1,208 +1,44 @@ -apiVersion: rbac.authorization.k8s.io/v1 +{{ $labels := .Values.role.labels }} +{{ $appVersion := .Chart.AppVersion | quote }} +{{ $rbacRules := include "ack-elasticache-controller.rbac-rules" . }} +{{ $fullname := include "ack-elasticache-controller.app.fullname" . }} +{{ $chartVersion := include "ack-elasticache-controller.chart.name-version" . }} {{ if eq .Values.installScope "cluster" }} +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null - name: ack-elasticache-controller + name: {{ include "ack-elasticache-controller.app.fullname" . }} labels: - {{- range $key, $value := .Values.role.labels }} + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} + {{- range $key, $value := $labels }} {{ $key }}: {{ $value | quote }} {{- end }} -{{ else }} +{{$rbacRules }} +{{ else if eq .Values.installScope "namespace" }} +{{ $wn := include "ack-elasticache-controller.watch-namespace" . }} +{{ $namespaces := split "," $wn }} +{{ range $namespaces }} +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - creationTimestamp: null - name: ack-elasticache-controller + name: {{ $fullname }}-{{ . }} + namespace: {{ . }} labels: - {{- range $key, $value := .Values.role.labels }} + app.kubernetes.io/name: {{ $fullname }} + app.kubernetes.io/instance: {{ $.Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ $appVersion }} + k8s-app: {{ $fullname }} + helm.sh/chart: {{ $chartVersion }} + {{- range $key, $value := $labels }} {{ $key }}: {{ $value | quote }} {{- end }} - namespace: {{ .Release.Namespace }} +{{ $rbacRules }} {{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - patch - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cacheparametergroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cacheparametergroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cachesubnetgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - cachesubnetgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - - replicationgroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - replicationgroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - - snapshots - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - snapshots/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - - usergroups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - usergroups/status - verbs: - - get - - patch - - update -- apiGroups: - - elasticache.services.k8s.aws - resources: - - users - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - elasticache.services.k8s.aws - resources: - - users/status - verbs: - - get - - patch - - update -- apiGroups: - - services.k8s.aws - resources: - - adoptedresources - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - services.k8s.aws - resources: - - adoptedresources/status - verbs: - - get - - patch - - update -- apiGroups: - - services.k8s.aws - resources: - - fieldexports - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - services.k8s.aws - resources: - - fieldexports/status - verbs: - - get - - patch - - update +{{ end }} \ No newline at end of file diff --git a/helm/templates/deployment.yaml b/helm/templates/deployment.yaml index b3c0edef..5291bef3 100644 --- a/helm/templates/deployment.yaml +++ b/helm/templates/deployment.yaml @@ -1,37 +1,42 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "app.fullname" . }} + name: {{ include "ack-elasticache-controller.app.fullname" . }} namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ include "app.name" . }} + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: Helm app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} - k8s-app: {{ include "app.name" . }} - helm.sh/chart: {{ include "chart.name-version" . }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} +{{- range $key, $value := .Values.deployment.labels }} + {{ $key }}: {{ $value | quote }} +{{- end }} spec: - replicas: 1 + replicas: {{ .Values.deployment.replicas }} selector: matchLabels: - app.kubernetes.io/name: {{ include "app.name" . }} + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} template: metadata: +{{- if .Values.deployment.annotations }} annotations: {{- range $key, $value := .Values.deployment.annotations }} {{ $key }}: {{ $value | quote }} {{- end }} +{{- end }} labels: - app.kubernetes.io/name: {{ include "app.name" . }} + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: Helm - k8s-app: {{ include "app.name" . }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} {{- range $key, $value := .Values.deployment.labels }} {{ $key }}: {{ $value | quote }} {{- end }} spec: - serviceAccountName: {{ include "service-account.name" . }} + serviceAccountName: {{ include "ack-elasticache-controller.service-account.name" . }} {{- if .Values.image.pullSecrets }} imagePullSecrets: {{- range .Values.image.pullSecrets }} @@ -46,17 +51,27 @@ spec: - "$(AWS_REGION)" - --aws-endpoint-url - "$(AWS_ENDPOINT_URL)" +{{- if .Values.log.enable_development_logging }} - --enable-development-logging - - "$(ACK_ENABLE_DEVELOPMENT_LOGGING)" +{{- end }} - --log-level - "$(ACK_LOG_LEVEL)" - --resource-tags - "$(ACK_RESOURCE_TAGS)" - --watch-namespace - "$(ACK_WATCH_NAMESPACE)" + - --watch-selectors + - "$(ACK_WATCH_SELECTORS)" + - --reconcile-resources + - "$(RECONCILE_RESOURCES)" - --deletion-policy - "$(DELETION_POLICY)" -{{- if gt .Values.reconcile.defaultResyncPeriod 0.0 }} +{{- if .Values.leaderElection.enabled }} + - --enable-leader-election + - --leader-election-namespace + - "$(LEADER_ELECTION_NAMESPACE)" +{{- end }} +{{- if gt (int .Values.reconcile.defaultResyncPeriod) 0 }} - --reconcile-default-resync-seconds - "$(RECONCILE_DEFAULT_RESYNC_SECONDS)" {{- end }} @@ -64,6 +79,19 @@ spec: - --reconcile-resource-resync-seconds - "$(RECONCILE_RESOURCE_RESYNC_SECONDS_{{ $key | upper }})" {{- end }} +{{- if gt (int .Values.reconcile.defaultMaxConcurrentSyncs) 0 }} + - --reconcile-default-max-concurrent-syncs + - "$(RECONCILE_DEFAULT_MAX_CONCURRENT_SYNCS)" +{{- end }} +{{- range $key, $value := .Values.reconcile.resourceMaxConcurrentSyncs }} + - --reconcile-resource-max-concurrent-syncs + - "$(RECONCILE_RESOURCE_MAX_CONCURRENT_SYNCS_{{ $key | upper }})" +{{- end }} +{{- if .Values.featureGates}} + - --feature-gates + - "$(FEATURE_GATES)" +{{- end }} + - --enable-carm={{ .Values.enableCARM }} image: {{ .Values.image.repository }}:{{ .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} name: controller @@ -82,40 +110,82 @@ spec: - name: AWS_ENDPOINT_URL value: {{ .Values.aws.endpoint_url | quote }} - name: ACK_WATCH_NAMESPACE - value: {{ include "watch-namespace" . }} + value: {{ include "ack-elasticache-controller.watch-namespace" . }} + - name: ACK_WATCH_SELECTORS + value: {{ .Values.watchSelectors }} + - name: RECONCILE_RESOURCES + value: {{ join "," .Values.reconcile.resources | quote }} - name: DELETION_POLICY value: {{ .Values.deletionPolicy }} - - name: ACK_ENABLE_DEVELOPMENT_LOGGING - value: {{ .Values.log.enable_development_logging | quote }} + - name: LEADER_ELECTION_NAMESPACE + value: {{ .Values.leaderElection.namespace | quote }} - name: ACK_LOG_LEVEL value: {{ .Values.log.level | quote }} - name: ACK_RESOURCE_TAGS value: {{ join "," .Values.resourceTags | quote }} -{{- if gt .Values.reconcile.defaultResyncPeriod 0.0 }} +{{- if gt (int .Values.reconcile.defaultResyncPeriod) 0 }} - name: RECONCILE_DEFAULT_RESYNC_SECONDS value: {{ .Values.reconcile.defaultResyncPeriod | quote }} {{- end }} {{- range $key, $value := .Values.reconcile.resourceResyncPeriods }} - name: RECONCILE_RESOURCE_RESYNC_SECONDS_{{ $key | upper }} value: {{ $key }}={{ $value }} +{{- end }} +{{- if gt (int .Values.reconcile.defaultMaxConcurrentSyncs) 0 }} + - name: RECONCILE_DEFAULT_MAX_CONCURRENT_SYNCS + value: {{ .Values.reconcile.defaultMaxConcurrentSyncs | quote }} +{{- end }} +{{- range $key, $value := .Values.reconcile.resourceMaxConcurrentSyncs }} + - name: RECONCILE_RESOURCE_MAX_CONCURRENT_SYNCS_{{ $key | upper }} + value: {{ $key }}={{ $value }} +{{- end }} +{{- if .Values.featureGates}} + - name: FEATURE_GATES + value: {{ include "ack-elasticache-controller.feature-gates" . }} {{- end }} {{- if .Values.aws.credentials.secretName }} - name: AWS_SHARED_CREDENTIALS_FILE - value: {{ include "aws.credentials.path" . }} + value: {{ include "ack-elasticache-controller.aws.credentials.path" . }} - name: AWS_PROFILE value: {{ .Values.aws.credentials.profile }} + {{- end }} + {{- if .Values.deployment.extraEnvVars -}} + {{ toYaml .Values.deployment.extraEnvVars | nindent 8 }} + {{- end }} + {{- if or .Values.aws.credentials.secretName .Values.deployment.extraVolumeMounts }} volumeMounts: + {{- if .Values.aws.credentials.secretName }} - name: {{ .Values.aws.credentials.secretName }} - mountPath: {{ include "aws.credentials.secret_mount_path" . }} + mountPath: {{ include "ack-elasticache-controller.aws.credentials.secret_mount_path" . }} readOnly: true {{- end }} + {{- if .Values.deployment.extraVolumeMounts -}} + {{ toYaml .Values.deployment.extraVolumeMounts | nindent 10 }} + {{- end }} + {{- end }} securityContext: allowPrivilegeEscalation: false privileged: false + readOnlyRootFilesystem: true runAsNonRoot: true capabilities: drop: - ALL + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + securityContext: + seccompProfile: + type: RuntimeDefault terminationGracePeriodSeconds: 10 nodeSelector: {{ toYaml .Values.deployment.nodeSelector | nindent 8 }} {{ if .Values.deployment.tolerations -}} @@ -128,11 +198,20 @@ spec: priorityClassName: {{ .Values.deployment.priorityClassName }} {{ end -}} hostIPC: false - hostNetwork: false hostPID: false - {{ if .Values.aws.credentials.secretName -}} + hostNetwork: {{ .Values.deployment.hostNetwork }} + dnsPolicy: {{ .Values.deployment.dnsPolicy }} + {{- if or .Values.aws.credentials.secretName .Values.deployment.extraVolumes }} volumes: + {{- if .Values.aws.credentials.secretName }} - name: {{ .Values.aws.credentials.secretName }} secret: secretName: {{ .Values.aws.credentials.secretName }} - {{ end -}} + {{- end }} + {{- if .Values.deployment.extraVolumes }} + {{- toYaml .Values.deployment.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} + {{- with .Values.deployment.strategy }} + strategy: {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/helm/templates/leader-election-role-binding.yaml b/helm/templates/leader-election-role-binding.yaml new file mode 100644 index 00000000..a2b6e292 --- /dev/null +++ b/helm/templates/leader-election-role-binding.yaml @@ -0,0 +1,25 @@ +{{ if .Values.leaderElection.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ack-elasticache-controller.app.fullname" . }}-leaderelection +{{ if .Values.leaderElection.namespace }} + namespace: {{ .Values.leaderElection.namespace }} +{{ else }} + namespace: {{ .Release.Namespace }} +{{ end }} + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ack-elasticache-controller.app.fullname" . }}-leaderelection +subjects: +- kind: ServiceAccount + name: {{ include "ack-elasticache-controller.service-account.name" . }} + namespace: {{ .Release.Namespace }}{{- end }} diff --git a/helm/templates/leader-election-role.yaml b/helm/templates/leader-election-role.yaml new file mode 100644 index 00000000..5724c113 --- /dev/null +++ b/helm/templates/leader-election-role.yaml @@ -0,0 +1,37 @@ +{{ if .Values.leaderElection.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ack-elasticache-controller.app.fullname" . }}-leaderelection +{{ if .Values.leaderElection.namespace }} + namespace: {{ .Values.leaderElection.namespace }} +{{ else }} + namespace: {{ .Release.Namespace }} +{{ end }} + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch{{- end }} diff --git a/helm/templates/metrics-service.yaml b/helm/templates/metrics-service.yaml index 638858a3..da484622 100644 --- a/helm/templates/metrics-service.yaml +++ b/helm/templates/metrics-service.yaml @@ -5,18 +5,18 @@ metadata: name: {{ .Chart.Name | trimSuffix "-chart" | trunc 44 }}-controller-metrics namespace: {{ .Release.Namespace }} labels: - app.kubernetes.io/name: {{ include "app.name" . }} + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: Helm app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} - k8s-app: {{ include "app.name" . }} - helm.sh/chart: {{ include "chart.name-version" . }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} spec: selector: - app.kubernetes.io/name: {{ include "app.name" . }} + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: Helm - k8s-app: {{ include "app.name" . }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} {{- range $key, $value := .Values.deployment.labels }} {{ $key }}: {{ $value | quote }} {{- end }} diff --git a/helm/templates/role-reader.yaml b/helm/templates/role-reader.yaml index aa520863..acc08279 100644 --- a/helm/templates/role-reader.yaml +++ b/helm/templates/role-reader.yaml @@ -3,15 +3,25 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null - name: ack-elasticache-reader + name: {{ include "ack-elasticache-controller.app.fullname" . }}-reader namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups + - serverlesscaches + - serverlesscachesnapshots - snapshots - users - usergroups diff --git a/helm/templates/role-writer.yaml b/helm/templates/role-writer.yaml index 8ec6e3e0..5e8f19b7 100644 --- a/helm/templates/role-writer.yaml +++ b/helm/templates/role-writer.yaml @@ -3,24 +3,28 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null - name: ack-elasticache-writer + name: {{ include "ack-elasticache-controller.app.fullname" . }}-writer namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - - cachesubnetgroups - - replicationgroups - + - serverlesscaches + - serverlesscachesnapshots - snapshots - - users - - usergroups - verbs: - create - delete @@ -32,9 +36,12 @@ rules: - apiGroups: - elasticache.services.k8s.aws resources: + - cacheclusters - cacheparametergroups - cachesubnetgroups - replicationgroups + - serverlesscaches + - serverlesscachesnapshots - snapshots - users - usergroups diff --git a/helm/templates/service-account.yaml b/helm/templates/service-account.yaml index 73306395..370b709a 100644 --- a/helm/templates/service-account.yaml +++ b/helm/templates/service-account.yaml @@ -3,13 +3,13 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - app.kubernetes.io/name: {{ include "app.name" . }} + app.kubernetes.io/name: {{ include "ack-elasticache-controller.app.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: Helm app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} - k8s-app: {{ include "app.name" . }} - helm.sh/chart: {{ include "chart.name-version" . }} - name: {{ include "service-account.name" . }} + k8s-app: {{ include "ack-elasticache-controller.app.name" . }} + helm.sh/chart: {{ include "ack-elasticache-controller.chart.name-version" . }} + name: {{ include "ack-elasticache-controller.service-account.name" . }} namespace: {{ .Release.Namespace }} annotations: {{- range $key, $value := .Values.serviceAccount.annotations }} diff --git a/helm/values.schema.json b/helm/values.schema.json index d5a8d35e..c3f56a0c 100644 --- a/helm/values.schema.json +++ b/helm/values.schema.json @@ -47,6 +47,9 @@ "minimum": 1, "maximum": 65535 }, + "replicas": { + "type": "integer" + }, "nodeSelector": { "type": "object" }, @@ -58,6 +61,15 @@ }, "priorityClassName": { "type": "string" + }, + "extraVolumeMounts": { + "type": "array" + }, + "extraVolumes": { + "type": "array" + }, + "extraEnvVars": { + "type": "array" } }, "required": [ @@ -196,6 +208,12 @@ "type": "string", "enum": ["cluster", "namespace"] }, + "watchNamespace": { + "type": "string" + }, + "watchSelectors": { + "type": "string" + }, "resourceTags": { "type": "array", "items": { @@ -208,17 +226,48 @@ "enum": ["delete", "retain"] }, "reconcile": { - "description": "Reconcile resync settings. Parameters to tune the controller's drift remediation period.", + "description": "Reconcile settings. This is used to configure the controller's reconciliation behavior. e.g resyncPeriod and maxConcurrentSyncs", "properties": { "defaultResyncPeriod": { "type": "number" }, "resourceResyncPeriods": { "type": "object" + }, + "defaultMaxConcurentSyncs": { + "type": "number" + }, + "resourceMaxConcurrentSyncs": { + "type": "object" + }, + "resources": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of resource kinds to reconcile. If empty, all resources will be reconciled.", + "default": [] } }, "type": "object" }, + "leaderElection": { + "description": "Parameter to configure the controller's leader election system.", + "properties": { + "enabled": { + "type": "boolean" + }, + "namespace": { + "type": "string" + } + }, + "type": "object" + }, + "enableCARM": { + "description": "Parameter to enable or disable cross account resource management.", + "type": "boolean", + "default": true + }, "serviceAccount": { "description": "ServiceAccount settings", "properties": { @@ -235,6 +284,13 @@ "type": "object" } }, + "featureGates": { + "description": "Feature gates settings", + "type": "object", + "additionalProperties": { + "type": "boolean" + } + }, "required": [ "image", "deployment", diff --git a/helm/values.yaml b/helm/values.yaml index d413aa51..96aaf152 100644 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -4,7 +4,7 @@ image: repository: public.ecr.aws/aws-controllers-k8s/elasticache-controller - tag: v0.0.22 + tag: 1.2.3 pullPolicy: IfNotPresent pullSecrets: [] @@ -15,6 +15,10 @@ deployment: annotations: {} labels: {} containerPort: 8080 + # Number of Deployment replicas + # This determines how many instances of the controller will be running. It's recommended + # to enable leader election if you need to increase the number of replicas > 1 + replicas: 1 # Which nodeSelector to set? # See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: @@ -28,11 +32,43 @@ deployment: # Which priorityClassName to set? # See: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority priorityClassName: "" + # Specifies the hostname of the Pod. + # If not specified, the pod's hostname will be set to a system-defined value. + hostNetwork: false + # Set DNS policy for the pod. + # Defaults to "ClusterFirst". + # Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + # To have DNS options set along with hostNetwork, you have to specify DNS policy + # explicitly to 'ClusterFirstWithHostNet'. + dnsPolicy: ClusterFirst + # Set rollout strategy for deployment. + # See: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + strategy: {} + extraVolumes: [] + extraVolumeMounts: [] + + # Additional server container environment variables + # + # You specify this manually like you would a raw deployment manifest. + # This means you can bind in environment variables from secrets. + # + # e.g. static environment variable: + # - name: DEMO_GREETING + # value: "Hello from the environment" + # + # e.g. secret environment variable: + # - name: USERNAME + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: username + extraEnvVars: [] + # If "installScope: cluster" then these labels will be applied to ClusterRole role: - labels: {} - + labels: {} + metrics: service: # Set to true to automatically create a Kubernetes Service resource for the @@ -60,7 +96,7 @@ aws: # Secret stringData key that contains the credentials secretKey: "credentials" # Profile used for AWS credentials - profile: "default" + profile: "default" # log level for the controller log: @@ -72,6 +108,15 @@ log: # cluster wide. installScope: cluster +# Set the value of the "namespace" to be watched by the controller +# This value is only used when the `installScope` is set to "namespace". If left empty, the default value is the release namespace for the chart. +# You can set multiple namespaces by providing a comma separated list of namespaces. e.g "namespace1,namespace2" +watchNamespace: "" + +# Set the value of labelsSelectors to be used by the controller to filter the resources to watch. +# You can set multiple labelsSelectors by providing a comma separated list of a=b arguments. e.g "label1=value1,label2=value2" +watchSelectors: "" + resourceTags: # Configures the ACK service controller to always set key/value pairs tags on # resources that it manages. @@ -86,10 +131,30 @@ deletionPolicy: delete # controller reconciliation configurations reconcile: # The default duration, in seconds, to wait before resyncing desired state of custom resources. - defaultResyncPeriod: 0 + defaultResyncPeriod: 36000 # 10 Hours # An object representing the reconcile resync configuration for each specific resource. resourceResyncPeriods: {} + # The default number of concurrent syncs that a reconciler can perform. + defaultMaxConcurrentSyncs: 1 + # An object representing the reconcile max concurrent syncs configuration for each specific + # resource. + resourceMaxConcurrentSyncs: {} + + # Set the value of resources to specify which resource kinds to reconcile. + # If empty, all resources will be reconciled. + # If specified, only the listed resource kinds will be reconciled. + resources: + - CacheCluster + - CacheParameterGroup + - CacheSubnetGroup + - ReplicationGroup + - ServerlessCache + - ServerlessCacheSnapshot + - Snapshot + - User + - UserGroup + serviceAccount: # Specifies whether a service account should be created create: true @@ -97,3 +162,31 @@ serviceAccount: name: ack-elasticache-controller annotations: {} # eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME + +# Configuration of the leader election. Required for running multiple instances of the +# controller within the same cluster. +# See https://kubernetes.io/docs/concepts/architecture/leases/#leader-election +leaderElection: + # Enable Controller Leader Election. Set this to true to enable leader election + # for this controller. + enabled: false + # Leader election can be scoped to a specific namespace. By default, the controller + # will attempt to use the namespace of the service account mounted to the Controller + # pod. + namespace: "" + +# Enable Cross Account Resource Management (default = true). Set this to false to disable cross account resource management. +enableCARM: true + +# Configuration for feature gates. These are optional controller features that +# can be individually enabled ("true") or disabled ("false") by adding key/value +# pairs below. +featureGates: + # Enables the Service level granularity for CARM. See https://github.com/aws-controllers-k8s/community/issues/2031 + ServiceLevelCARM: false + # Enables the Team level granularity for CARM. See https://github.com/aws-controllers-k8s/community/issues/2031 + TeamLevelCARM: false + # Enable ReadOnlyResources feature/annotation. + ReadOnlyResources: true + # Enable ResourceAdoption feature/annotation. + ResourceAdoption: true \ No newline at end of file diff --git a/metadata.yaml b/metadata.yaml index a6609be8..70ec689a 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -2,7 +2,7 @@ service: full_name: "Amazon ElastiCache" short_name: "ElastiCache" link: "https://aws.amazon.com/elasticache/" - documentation: "https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/index.html" + documentation: "https://docs.aws.amazon.com/AmazonElastiCache/latest/dg/WhatIs.html" api_versions: - api_version: v1alpha1 status: available diff --git a/mocks/aws-sdk-go/elasticache/ElastiCacheAPI.go b/mocks/aws-sdk-go/elasticache/ElastiCacheAPI.go deleted file mode 100644 index 112b29f5..00000000 --- a/mocks/aws-sdk-go/elasticache/ElastiCacheAPI.go +++ /dev/null @@ -1,5823 +0,0 @@ -// Code generated by mockery v2.2.2. DO NOT EDIT. - -package mocks - -import ( - context "context" - - elasticache "github.com/aws/aws-sdk-go/service/elasticache" - - mock "github.com/stretchr/testify/mock" - - request "github.com/aws/aws-sdk-go/aws/request" -) - -// ElastiCacheAPI is an autogenerated mock type for the ElastiCacheAPI type -type ElastiCacheAPI struct { - mock.Mock -} - -// AddTagsToResource provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AddTagsToResource(_a0 *elasticache.AddTagsToResourceInput) (*elasticache.TagListMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(*elasticache.AddTagsToResourceInput) *elasticache.TagListMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.AddTagsToResourceInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AddTagsToResourceRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AddTagsToResourceRequest(_a0 *elasticache.AddTagsToResourceInput) (*request.Request, *elasticache.TagListMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.AddTagsToResourceInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.TagListMessage - if rf, ok := ret.Get(1).(func(*elasticache.AddTagsToResourceInput) *elasticache.TagListMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TagListMessage) - } - } - - return r0, r1 -} - -// AddTagsToResourceWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) AddTagsToResourceWithContext(_a0 context.Context, _a1 *elasticache.AddTagsToResourceInput, _a2 ...request.Option) (*elasticache.TagListMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.AddTagsToResourceInput, ...request.Option) *elasticache.TagListMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.AddTagsToResourceInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AuthorizeCacheSecurityGroupIngress provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AuthorizeCacheSecurityGroupIngress(_a0 *elasticache.AuthorizeCacheSecurityGroupIngressInput) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.AuthorizeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(0).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) *elasticache.AuthorizeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.AuthorizeCacheSecurityGroupIngressOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AuthorizeCacheSecurityGroupIngressRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) AuthorizeCacheSecurityGroupIngressRequest(_a0 *elasticache.AuthorizeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.AuthorizeCacheSecurityGroupIngressOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.AuthorizeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(1).(func(*elasticache.AuthorizeCacheSecurityGroupIngressInput) *elasticache.AuthorizeCacheSecurityGroupIngressOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.AuthorizeCacheSecurityGroupIngressOutput) - } - } - - return r0, r1 -} - -// AuthorizeCacheSecurityGroupIngressWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) AuthorizeCacheSecurityGroupIngressWithContext(_a0 context.Context, _a1 *elasticache.AuthorizeCacheSecurityGroupIngressInput, _a2 ...request.Option) (*elasticache.AuthorizeCacheSecurityGroupIngressOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.AuthorizeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.AuthorizeCacheSecurityGroupIngressInput, ...request.Option) *elasticache.AuthorizeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.AuthorizeCacheSecurityGroupIngressOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.AuthorizeCacheSecurityGroupIngressInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchApplyUpdateAction provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchApplyUpdateAction(_a0 *elasticache.BatchApplyUpdateActionInput) (*elasticache.BatchApplyUpdateActionOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.BatchApplyUpdateActionOutput - if rf, ok := ret.Get(0).(func(*elasticache.BatchApplyUpdateActionInput) *elasticache.BatchApplyUpdateActionOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchApplyUpdateActionOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.BatchApplyUpdateActionInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchApplyUpdateActionRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchApplyUpdateActionRequest(_a0 *elasticache.BatchApplyUpdateActionInput) (*request.Request, *elasticache.BatchApplyUpdateActionOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.BatchApplyUpdateActionInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.BatchApplyUpdateActionOutput - if rf, ok := ret.Get(1).(func(*elasticache.BatchApplyUpdateActionInput) *elasticache.BatchApplyUpdateActionOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.BatchApplyUpdateActionOutput) - } - } - - return r0, r1 -} - -// BatchApplyUpdateActionWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) BatchApplyUpdateActionWithContext(_a0 context.Context, _a1 *elasticache.BatchApplyUpdateActionInput, _a2 ...request.Option) (*elasticache.BatchApplyUpdateActionOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.BatchApplyUpdateActionOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.BatchApplyUpdateActionInput, ...request.Option) *elasticache.BatchApplyUpdateActionOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchApplyUpdateActionOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.BatchApplyUpdateActionInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchStopUpdateAction provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchStopUpdateAction(_a0 *elasticache.BatchStopUpdateActionInput) (*elasticache.BatchStopUpdateActionOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.BatchStopUpdateActionOutput - if rf, ok := ret.Get(0).(func(*elasticache.BatchStopUpdateActionInput) *elasticache.BatchStopUpdateActionOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchStopUpdateActionOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.BatchStopUpdateActionInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchStopUpdateActionRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) BatchStopUpdateActionRequest(_a0 *elasticache.BatchStopUpdateActionInput) (*request.Request, *elasticache.BatchStopUpdateActionOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.BatchStopUpdateActionInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.BatchStopUpdateActionOutput - if rf, ok := ret.Get(1).(func(*elasticache.BatchStopUpdateActionInput) *elasticache.BatchStopUpdateActionOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.BatchStopUpdateActionOutput) - } - } - - return r0, r1 -} - -// BatchStopUpdateActionWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) BatchStopUpdateActionWithContext(_a0 context.Context, _a1 *elasticache.BatchStopUpdateActionInput, _a2 ...request.Option) (*elasticache.BatchStopUpdateActionOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.BatchStopUpdateActionOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.BatchStopUpdateActionInput, ...request.Option) *elasticache.BatchStopUpdateActionOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.BatchStopUpdateActionOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.BatchStopUpdateActionInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CompleteMigration provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CompleteMigration(_a0 *elasticache.CompleteMigrationInput) (*elasticache.CompleteMigrationOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CompleteMigrationOutput - if rf, ok := ret.Get(0).(func(*elasticache.CompleteMigrationInput) *elasticache.CompleteMigrationOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CompleteMigrationOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CompleteMigrationInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CompleteMigrationRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CompleteMigrationRequest(_a0 *elasticache.CompleteMigrationInput) (*request.Request, *elasticache.CompleteMigrationOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CompleteMigrationInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CompleteMigrationOutput - if rf, ok := ret.Get(1).(func(*elasticache.CompleteMigrationInput) *elasticache.CompleteMigrationOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CompleteMigrationOutput) - } - } - - return r0, r1 -} - -// CompleteMigrationWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CompleteMigrationWithContext(_a0 context.Context, _a1 *elasticache.CompleteMigrationInput, _a2 ...request.Option) (*elasticache.CompleteMigrationOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CompleteMigrationOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CompleteMigrationInput, ...request.Option) *elasticache.CompleteMigrationOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CompleteMigrationOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CompleteMigrationInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CopySnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CopySnapshot(_a0 *elasticache.CopySnapshotInput) (*elasticache.CopySnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CopySnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.CopySnapshotInput) *elasticache.CopySnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CopySnapshotOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CopySnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CopySnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CopySnapshotRequest(_a0 *elasticache.CopySnapshotInput) (*request.Request, *elasticache.CopySnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CopySnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CopySnapshotOutput - if rf, ok := ret.Get(1).(func(*elasticache.CopySnapshotInput) *elasticache.CopySnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CopySnapshotOutput) - } - } - - return r0, r1 -} - -// CopySnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CopySnapshotWithContext(_a0 context.Context, _a1 *elasticache.CopySnapshotInput, _a2 ...request.Option) (*elasticache.CopySnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CopySnapshotOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CopySnapshotInput, ...request.Option) *elasticache.CopySnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CopySnapshotOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CopySnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheCluster(_a0 *elasticache.CreateCacheClusterInput) (*elasticache.CreateCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheClusterInput) *elasticache.CreateCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheClusterRequest(_a0 *elasticache.CreateCacheClusterInput) (*request.Request, *elasticache.CreateCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateCacheClusterOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheClusterInput) *elasticache.CreateCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheClusterOutput) - } - } - - return r0, r1 -} - -// CreateCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheClusterInput, _a2 ...request.Option) (*elasticache.CreateCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheClusterOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheClusterInput, ...request.Option) *elasticache.CreateCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheParameterGroup(_a0 *elasticache.CreateCacheParameterGroupInput) (*elasticache.CreateCacheParameterGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheParameterGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheParameterGroupInput) *elasticache.CreateCacheParameterGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheParameterGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheParameterGroupRequest(_a0 *elasticache.CreateCacheParameterGroupInput) (*request.Request, *elasticache.CreateCacheParameterGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateCacheParameterGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheParameterGroupInput) *elasticache.CreateCacheParameterGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheParameterGroupOutput) - } - } - - return r0, r1 -} - -// CreateCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.CreateCacheParameterGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheParameterGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheParameterGroupInput, ...request.Option) *elasticache.CreateCacheParameterGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheParameterGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSecurityGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSecurityGroup(_a0 *elasticache.CreateCacheSecurityGroupInput) (*elasticache.CreateCacheSecurityGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheSecurityGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSecurityGroupInput) *elasticache.CreateCacheSecurityGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSecurityGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSecurityGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSecurityGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSecurityGroupRequest(_a0 *elasticache.CreateCacheSecurityGroupInput) (*request.Request, *elasticache.CreateCacheSecurityGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSecurityGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateCacheSecurityGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSecurityGroupInput) *elasticache.CreateCacheSecurityGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheSecurityGroupOutput) - } - } - - return r0, r1 -} - -// CreateCacheSecurityGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheSecurityGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheSecurityGroupInput, _a2 ...request.Option) (*elasticache.CreateCacheSecurityGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheSecurityGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheSecurityGroupInput, ...request.Option) *elasticache.CreateCacheSecurityGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSecurityGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheSecurityGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSubnetGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSubnetGroup(_a0 *elasticache.CreateCacheSubnetGroupInput) (*elasticache.CreateCacheSubnetGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSubnetGroupInput) *elasticache.CreateCacheSubnetGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSubnetGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSubnetGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateCacheSubnetGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateCacheSubnetGroupRequest(_a0 *elasticache.CreateCacheSubnetGroupInput) (*request.Request, *elasticache.CreateCacheSubnetGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateCacheSubnetGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateCacheSubnetGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateCacheSubnetGroupInput) *elasticache.CreateCacheSubnetGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateCacheSubnetGroupOutput) - } - } - - return r0, r1 -} - -// CreateCacheSubnetGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateCacheSubnetGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateCacheSubnetGroupInput, _a2 ...request.Option) (*elasticache.CreateCacheSubnetGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateCacheSubnetGroupInput, ...request.Option) *elasticache.CreateCacheSubnetGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateCacheSubnetGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateCacheSubnetGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateGlobalReplicationGroup(_a0 *elasticache.CreateGlobalReplicationGroupInput) (*elasticache.CreateGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateGlobalReplicationGroupInput) *elasticache.CreateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateGlobalReplicationGroupRequest(_a0 *elasticache.CreateGlobalReplicationGroupInput) (*request.Request, *elasticache.CreateGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateGlobalReplicationGroupInput) *elasticache.CreateGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// CreateGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.CreateGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateGlobalReplicationGroupInput, ...request.Option) *elasticache.CreateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateReplicationGroup(_a0 *elasticache.CreateReplicationGroupInput) (*elasticache.CreateReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateReplicationGroupInput) *elasticache.CreateReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateReplicationGroupRequest(_a0 *elasticache.CreateReplicationGroupInput) (*request.Request, *elasticache.CreateReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateReplicationGroupInput) *elasticache.CreateReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateReplicationGroupOutput) - } - } - - return r0, r1 -} - -// CreateReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateReplicationGroupInput, _a2 ...request.Option) (*elasticache.CreateReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateReplicationGroupInput, ...request.Option) *elasticache.CreateReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateSnapshot(_a0 *elasticache.CreateSnapshotInput) (*elasticache.CreateSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateSnapshotInput) *elasticache.CreateSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateSnapshotOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateSnapshotRequest(_a0 *elasticache.CreateSnapshotInput) (*request.Request, *elasticache.CreateSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateSnapshotOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateSnapshotInput) *elasticache.CreateSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateSnapshotOutput) - } - } - - return r0, r1 -} - -// CreateSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateSnapshotWithContext(_a0 context.Context, _a1 *elasticache.CreateSnapshotInput, _a2 ...request.Option) (*elasticache.CreateSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateSnapshotOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateSnapshotInput, ...request.Option) *elasticache.CreateSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateSnapshotOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUser provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUser(_a0 *elasticache.CreateUserInput) (*elasticache.CreateUserOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateUserOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserInput) *elasticache.CreateUserOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUserGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUserGroup(_a0 *elasticache.CreateUserGroupInput) (*elasticache.CreateUserGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CreateUserGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserGroupInput) *elasticache.CreateUserGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUserGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUserGroupRequest(_a0 *elasticache.CreateUserGroupInput) (*request.Request, *elasticache.CreateUserGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateUserGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserGroupInput) *elasticache.CreateUserGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateUserGroupOutput) - } - } - - return r0, r1 -} - -// CreateUserGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateUserGroupWithContext(_a0 context.Context, _a1 *elasticache.CreateUserGroupInput, _a2 ...request.Option) (*elasticache.CreateUserGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateUserGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateUserGroupInput, ...request.Option) *elasticache.CreateUserGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateUserGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateUserRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) CreateUserRequest(_a0 *elasticache.CreateUserInput) (*request.Request, *elasticache.CreateUserOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.CreateUserInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CreateUserOutput - if rf, ok := ret.Get(1).(func(*elasticache.CreateUserInput) *elasticache.CreateUserOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CreateUserOutput) - } - } - - return r0, r1 -} - -// CreateUserWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) CreateUserWithContext(_a0 context.Context, _a1 *elasticache.CreateUserInput, _a2 ...request.Option) (*elasticache.CreateUserOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CreateUserOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.CreateUserInput, ...request.Option) *elasticache.CreateUserOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CreateUserOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.CreateUserInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseNodeGroupsInGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseNodeGroupsInGlobalReplicationGroup(_a0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) (*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseNodeGroupsInGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseNodeGroupsInGlobalReplicationGroupRequest(_a0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) (*request.Request, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DecreaseNodeGroupsInGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DecreaseNodeGroupsInGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseReplicaCount provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseReplicaCount(_a0 *elasticache.DecreaseReplicaCountInput) (*elasticache.DecreaseReplicaCountOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DecreaseReplicaCountOutput - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseReplicaCountInput) *elasticache.DecreaseReplicaCountOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseReplicaCountOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseReplicaCountInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DecreaseReplicaCountRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DecreaseReplicaCountRequest(_a0 *elasticache.DecreaseReplicaCountInput) (*request.Request, *elasticache.DecreaseReplicaCountOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DecreaseReplicaCountInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DecreaseReplicaCountOutput - if rf, ok := ret.Get(1).(func(*elasticache.DecreaseReplicaCountInput) *elasticache.DecreaseReplicaCountOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DecreaseReplicaCountOutput) - } - } - - return r0, r1 -} - -// DecreaseReplicaCountWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DecreaseReplicaCountWithContext(_a0 context.Context, _a1 *elasticache.DecreaseReplicaCountInput, _a2 ...request.Option) (*elasticache.DecreaseReplicaCountOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DecreaseReplicaCountOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DecreaseReplicaCountInput, ...request.Option) *elasticache.DecreaseReplicaCountOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DecreaseReplicaCountOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DecreaseReplicaCountInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheCluster(_a0 *elasticache.DeleteCacheClusterInput) (*elasticache.DeleteCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheClusterInput) *elasticache.DeleteCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheClusterRequest(_a0 *elasticache.DeleteCacheClusterInput) (*request.Request, *elasticache.DeleteCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteCacheClusterOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheClusterInput) *elasticache.DeleteCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheClusterOutput) - } - } - - return r0, r1 -} - -// DeleteCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheClusterInput, _a2 ...request.Option) (*elasticache.DeleteCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheClusterOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheClusterInput, ...request.Option) *elasticache.DeleteCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheParameterGroup(_a0 *elasticache.DeleteCacheParameterGroupInput) (*elasticache.DeleteCacheParameterGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheParameterGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheParameterGroupInput) *elasticache.DeleteCacheParameterGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheParameterGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheParameterGroupRequest(_a0 *elasticache.DeleteCacheParameterGroupInput) (*request.Request, *elasticache.DeleteCacheParameterGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteCacheParameterGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheParameterGroupInput) *elasticache.DeleteCacheParameterGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheParameterGroupOutput) - } - } - - return r0, r1 -} - -// DeleteCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.DeleteCacheParameterGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheParameterGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheParameterGroupInput, ...request.Option) *elasticache.DeleteCacheParameterGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheParameterGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSecurityGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSecurityGroup(_a0 *elasticache.DeleteCacheSecurityGroupInput) (*elasticache.DeleteCacheSecurityGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheSecurityGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSecurityGroupInput) *elasticache.DeleteCacheSecurityGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSecurityGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSecurityGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSecurityGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSecurityGroupRequest(_a0 *elasticache.DeleteCacheSecurityGroupInput) (*request.Request, *elasticache.DeleteCacheSecurityGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSecurityGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteCacheSecurityGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSecurityGroupInput) *elasticache.DeleteCacheSecurityGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheSecurityGroupOutput) - } - } - - return r0, r1 -} - -// DeleteCacheSecurityGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheSecurityGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheSecurityGroupInput, _a2 ...request.Option) (*elasticache.DeleteCacheSecurityGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheSecurityGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheSecurityGroupInput, ...request.Option) *elasticache.DeleteCacheSecurityGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSecurityGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheSecurityGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSubnetGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSubnetGroup(_a0 *elasticache.DeleteCacheSubnetGroupInput) (*elasticache.DeleteCacheSubnetGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSubnetGroupInput) *elasticache.DeleteCacheSubnetGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSubnetGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSubnetGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteCacheSubnetGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteCacheSubnetGroupRequest(_a0 *elasticache.DeleteCacheSubnetGroupInput) (*request.Request, *elasticache.DeleteCacheSubnetGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteCacheSubnetGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteCacheSubnetGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteCacheSubnetGroupInput) *elasticache.DeleteCacheSubnetGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteCacheSubnetGroupOutput) - } - } - - return r0, r1 -} - -// DeleteCacheSubnetGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteCacheSubnetGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteCacheSubnetGroupInput, _a2 ...request.Option) (*elasticache.DeleteCacheSubnetGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteCacheSubnetGroupInput, ...request.Option) *elasticache.DeleteCacheSubnetGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteCacheSubnetGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteCacheSubnetGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteGlobalReplicationGroup(_a0 *elasticache.DeleteGlobalReplicationGroupInput) (*elasticache.DeleteGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteGlobalReplicationGroupInput) *elasticache.DeleteGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteGlobalReplicationGroupRequest(_a0 *elasticache.DeleteGlobalReplicationGroupInput) (*request.Request, *elasticache.DeleteGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteGlobalReplicationGroupInput) *elasticache.DeleteGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DeleteGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.DeleteGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteGlobalReplicationGroupInput, ...request.Option) *elasticache.DeleteGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteReplicationGroup(_a0 *elasticache.DeleteReplicationGroupInput) (*elasticache.DeleteReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteReplicationGroupInput) *elasticache.DeleteReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteReplicationGroupRequest(_a0 *elasticache.DeleteReplicationGroupInput) (*request.Request, *elasticache.DeleteReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteReplicationGroupInput) *elasticache.DeleteReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DeleteReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteReplicationGroupInput, _a2 ...request.Option) (*elasticache.DeleteReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteReplicationGroupInput, ...request.Option) *elasticache.DeleteReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteSnapshot provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteSnapshot(_a0 *elasticache.DeleteSnapshotInput) (*elasticache.DeleteSnapshotOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteSnapshotOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteSnapshotInput) *elasticache.DeleteSnapshotOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteSnapshotOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteSnapshotInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteSnapshotRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteSnapshotRequest(_a0 *elasticache.DeleteSnapshotInput) (*request.Request, *elasticache.DeleteSnapshotOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteSnapshotInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteSnapshotOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteSnapshotInput) *elasticache.DeleteSnapshotOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteSnapshotOutput) - } - } - - return r0, r1 -} - -// DeleteSnapshotWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteSnapshotWithContext(_a0 context.Context, _a1 *elasticache.DeleteSnapshotInput, _a2 ...request.Option) (*elasticache.DeleteSnapshotOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteSnapshotOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteSnapshotInput, ...request.Option) *elasticache.DeleteSnapshotOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteSnapshotOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteSnapshotInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUser provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUser(_a0 *elasticache.DeleteUserInput) (*elasticache.DeleteUserOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteUserOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserInput) *elasticache.DeleteUserOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUserGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUserGroup(_a0 *elasticache.DeleteUserGroupInput) (*elasticache.DeleteUserGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DeleteUserGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserGroupInput) *elasticache.DeleteUserGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUserGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUserGroupRequest(_a0 *elasticache.DeleteUserGroupInput) (*request.Request, *elasticache.DeleteUserGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteUserGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserGroupInput) *elasticache.DeleteUserGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteUserGroupOutput) - } - } - - return r0, r1 -} - -// DeleteUserGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteUserGroupWithContext(_a0 context.Context, _a1 *elasticache.DeleteUserGroupInput, _a2 ...request.Option) (*elasticache.DeleteUserGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteUserGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteUserGroupInput, ...request.Option) *elasticache.DeleteUserGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteUserGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteUserRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DeleteUserRequest(_a0 *elasticache.DeleteUserInput) (*request.Request, *elasticache.DeleteUserOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DeleteUserInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DeleteUserOutput - if rf, ok := ret.Get(1).(func(*elasticache.DeleteUserInput) *elasticache.DeleteUserOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DeleteUserOutput) - } - } - - return r0, r1 -} - -// DeleteUserWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DeleteUserWithContext(_a0 context.Context, _a1 *elasticache.DeleteUserInput, _a2 ...request.Option) (*elasticache.DeleteUserOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DeleteUserOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DeleteUserInput, ...request.Option) *elasticache.DeleteUserOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DeleteUserOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DeleteUserInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheClusters provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheClusters(_a0 *elasticache.DescribeCacheClustersInput) (*elasticache.DescribeCacheClustersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheClustersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) *elasticache.DescribeCacheClustersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheClustersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheClustersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheClustersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheClustersPages(_a0 *elasticache.DescribeCacheClustersInput, _a1 func(*elasticache.DescribeCacheClustersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput, func(*elasticache.DescribeCacheClustersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheClustersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheClustersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 func(*elasticache.DescribeCacheClustersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, func(*elasticache.DescribeCacheClustersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheClustersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheClustersRequest(_a0 *elasticache.DescribeCacheClustersInput) (*request.Request, *elasticache.DescribeCacheClustersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeCacheClustersOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheClustersInput) *elasticache.DescribeCacheClustersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheClustersOutput) - } - } - - return r0, r1 -} - -// DescribeCacheClustersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheClustersWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 ...request.Option) (*elasticache.DescribeCacheClustersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheClustersOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.Option) *elasticache.DescribeCacheClustersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheClustersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheEngineVersions provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersions(_a0 *elasticache.DescribeCacheEngineVersionsInput) (*elasticache.DescribeCacheEngineVersionsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheEngineVersionsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput) *elasticache.DescribeCacheEngineVersionsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheEngineVersionsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheEngineVersionsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheEngineVersionsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsPages(_a0 *elasticache.DescribeCacheEngineVersionsInput, _a1 func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput, func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheEngineVersionsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheEngineVersionsInput, _a2 func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheEngineVersionsInput, func(*elasticache.DescribeCacheEngineVersionsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheEngineVersionsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsRequest(_a0 *elasticache.DescribeCacheEngineVersionsInput) (*request.Request, *elasticache.DescribeCacheEngineVersionsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheEngineVersionsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeCacheEngineVersionsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheEngineVersionsInput) *elasticache.DescribeCacheEngineVersionsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheEngineVersionsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheEngineVersionsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheEngineVersionsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheEngineVersionsInput, _a2 ...request.Option) (*elasticache.DescribeCacheEngineVersionsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheEngineVersionsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheEngineVersionsInput, ...request.Option) *elasticache.DescribeCacheEngineVersionsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheEngineVersionsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheEngineVersionsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParameterGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroups(_a0 *elasticache.DescribeCacheParameterGroupsInput) (*elasticache.DescribeCacheParameterGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheParameterGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput) *elasticache.DescribeCacheParameterGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParameterGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParameterGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParameterGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsPages(_a0 *elasticache.DescribeCacheParameterGroupsInput, _a1 func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput, func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParameterGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParameterGroupsInput, _a2 func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParameterGroupsInput, func(*elasticache.DescribeCacheParameterGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParameterGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsRequest(_a0 *elasticache.DescribeCacheParameterGroupsInput) (*request.Request, *elasticache.DescribeCacheParameterGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParameterGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeCacheParameterGroupsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParameterGroupsInput) *elasticache.DescribeCacheParameterGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheParameterGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheParameterGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheParameterGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParameterGroupsInput, _a2 ...request.Option) (*elasticache.DescribeCacheParameterGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheParameterGroupsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParameterGroupsInput, ...request.Option) *elasticache.DescribeCacheParameterGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParameterGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheParameterGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParameters provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParameters(_a0 *elasticache.DescribeCacheParametersInput) (*elasticache.DescribeCacheParametersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheParametersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput) *elasticache.DescribeCacheParametersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParametersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParametersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheParametersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheParametersPages(_a0 *elasticache.DescribeCacheParametersInput, _a1 func(*elasticache.DescribeCacheParametersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput, func(*elasticache.DescribeCacheParametersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParametersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheParametersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParametersInput, _a2 func(*elasticache.DescribeCacheParametersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParametersInput, func(*elasticache.DescribeCacheParametersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheParametersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheParametersRequest(_a0 *elasticache.DescribeCacheParametersInput) (*request.Request, *elasticache.DescribeCacheParametersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheParametersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeCacheParametersOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheParametersInput) *elasticache.DescribeCacheParametersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheParametersOutput) - } - } - - return r0, r1 -} - -// DescribeCacheParametersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheParametersWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheParametersInput, _a2 ...request.Option) (*elasticache.DescribeCacheParametersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheParametersOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheParametersInput, ...request.Option) *elasticache.DescribeCacheParametersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheParametersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheParametersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSecurityGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroups(_a0 *elasticache.DescribeCacheSecurityGroupsInput) (*elasticache.DescribeCacheSecurityGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheSecurityGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput) *elasticache.DescribeCacheSecurityGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSecurityGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSecurityGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSecurityGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsPages(_a0 *elasticache.DescribeCacheSecurityGroupsInput, _a1 func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput, func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSecurityGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSecurityGroupsInput, _a2 func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSecurityGroupsInput, func(*elasticache.DescribeCacheSecurityGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSecurityGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsRequest(_a0 *elasticache.DescribeCacheSecurityGroupsInput) (*request.Request, *elasticache.DescribeCacheSecurityGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSecurityGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeCacheSecurityGroupsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSecurityGroupsInput) *elasticache.DescribeCacheSecurityGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheSecurityGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheSecurityGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheSecurityGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSecurityGroupsInput, _a2 ...request.Option) (*elasticache.DescribeCacheSecurityGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheSecurityGroupsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSecurityGroupsInput, ...request.Option) *elasticache.DescribeCacheSecurityGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSecurityGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheSecurityGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSubnetGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroups(_a0 *elasticache.DescribeCacheSubnetGroupsInput) (*elasticache.DescribeCacheSubnetGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeCacheSubnetGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput) *elasticache.DescribeCacheSubnetGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSubnetGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSubnetGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeCacheSubnetGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsPages(_a0 *elasticache.DescribeCacheSubnetGroupsInput, _a1 func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput, func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSubnetGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSubnetGroupsInput, _a2 func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSubnetGroupsInput, func(*elasticache.DescribeCacheSubnetGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeCacheSubnetGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsRequest(_a0 *elasticache.DescribeCacheSubnetGroupsInput) (*request.Request, *elasticache.DescribeCacheSubnetGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheSubnetGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeCacheSubnetGroupsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeCacheSubnetGroupsInput) *elasticache.DescribeCacheSubnetGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeCacheSubnetGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeCacheSubnetGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeCacheSubnetGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheSubnetGroupsInput, _a2 ...request.Option) (*elasticache.DescribeCacheSubnetGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeCacheSubnetGroupsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheSubnetGroupsInput, ...request.Option) *elasticache.DescribeCacheSubnetGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeCacheSubnetGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeCacheSubnetGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEngineDefaultParameters provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParameters(_a0 *elasticache.DescribeEngineDefaultParametersInput) (*elasticache.DescribeEngineDefaultParametersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeEngineDefaultParametersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput) *elasticache.DescribeEngineDefaultParametersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEngineDefaultParametersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEngineDefaultParametersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEngineDefaultParametersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersPages(_a0 *elasticache.DescribeEngineDefaultParametersInput, _a1 func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput, func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEngineDefaultParametersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeEngineDefaultParametersInput, _a2 func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEngineDefaultParametersInput, func(*elasticache.DescribeEngineDefaultParametersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEngineDefaultParametersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersRequest(_a0 *elasticache.DescribeEngineDefaultParametersInput) (*request.Request, *elasticache.DescribeEngineDefaultParametersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEngineDefaultParametersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeEngineDefaultParametersOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEngineDefaultParametersInput) *elasticache.DescribeEngineDefaultParametersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeEngineDefaultParametersOutput) - } - } - - return r0, r1 -} - -// DescribeEngineDefaultParametersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeEngineDefaultParametersWithContext(_a0 context.Context, _a1 *elasticache.DescribeEngineDefaultParametersInput, _a2 ...request.Option) (*elasticache.DescribeEngineDefaultParametersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeEngineDefaultParametersOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEngineDefaultParametersInput, ...request.Option) *elasticache.DescribeEngineDefaultParametersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEngineDefaultParametersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeEngineDefaultParametersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEvents provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEvents(_a0 *elasticache.DescribeEventsInput) (*elasticache.DescribeEventsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeEventsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput) *elasticache.DescribeEventsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEventsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEventsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeEventsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeEventsPages(_a0 *elasticache.DescribeEventsInput, _a1 func(*elasticache.DescribeEventsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput, func(*elasticache.DescribeEventsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEventsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeEventsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeEventsInput, _a2 func(*elasticache.DescribeEventsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEventsInput, func(*elasticache.DescribeEventsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeEventsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeEventsRequest(_a0 *elasticache.DescribeEventsInput) (*request.Request, *elasticache.DescribeEventsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeEventsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeEventsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeEventsInput) *elasticache.DescribeEventsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeEventsOutput) - } - } - - return r0, r1 -} - -// DescribeEventsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeEventsWithContext(_a0 context.Context, _a1 *elasticache.DescribeEventsInput, _a2 ...request.Option) (*elasticache.DescribeEventsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeEventsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeEventsInput, ...request.Option) *elasticache.DescribeEventsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeEventsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeEventsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeGlobalReplicationGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroups(_a0 *elasticache.DescribeGlobalReplicationGroupsInput) (*elasticache.DescribeGlobalReplicationGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeGlobalReplicationGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput) *elasticache.DescribeGlobalReplicationGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeGlobalReplicationGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeGlobalReplicationGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeGlobalReplicationGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsPages(_a0 *elasticache.DescribeGlobalReplicationGroupsInput, _a1 func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput, func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeGlobalReplicationGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeGlobalReplicationGroupsInput, _a2 func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeGlobalReplicationGroupsInput, func(*elasticache.DescribeGlobalReplicationGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeGlobalReplicationGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsRequest(_a0 *elasticache.DescribeGlobalReplicationGroupsInput) (*request.Request, *elasticache.DescribeGlobalReplicationGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeGlobalReplicationGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeGlobalReplicationGroupsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeGlobalReplicationGroupsInput) *elasticache.DescribeGlobalReplicationGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeGlobalReplicationGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeGlobalReplicationGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeGlobalReplicationGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeGlobalReplicationGroupsInput, _a2 ...request.Option) (*elasticache.DescribeGlobalReplicationGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeGlobalReplicationGroupsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeGlobalReplicationGroupsInput, ...request.Option) *elasticache.DescribeGlobalReplicationGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeGlobalReplicationGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeGlobalReplicationGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReplicationGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReplicationGroups(_a0 *elasticache.DescribeReplicationGroupsInput) (*elasticache.DescribeReplicationGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeReplicationGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) *elasticache.DescribeReplicationGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReplicationGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReplicationGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReplicationGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsPages(_a0 *elasticache.DescribeReplicationGroupsInput, _a1 func(*elasticache.DescribeReplicationGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput, func(*elasticache.DescribeReplicationGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReplicationGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 func(*elasticache.DescribeReplicationGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, func(*elasticache.DescribeReplicationGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReplicationGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsRequest(_a0 *elasticache.DescribeReplicationGroupsInput) (*request.Request, *elasticache.DescribeReplicationGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeReplicationGroupsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReplicationGroupsInput) *elasticache.DescribeReplicationGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeReplicationGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeReplicationGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeReplicationGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 ...request.Option) (*elasticache.DescribeReplicationGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeReplicationGroupsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.Option) *elasticache.DescribeReplicationGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReplicationGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodes provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodes(_a0 *elasticache.DescribeReservedCacheNodesInput) (*elasticache.DescribeReservedCacheNodesOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeReservedCacheNodesOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput) *elasticache.DescribeReservedCacheNodesOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodesOfferings provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferings(_a0 *elasticache.DescribeReservedCacheNodesOfferingsInput) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeReservedCacheNodesOfferingsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) *elasticache.DescribeReservedCacheNodesOfferingsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOfferingsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodesOfferingsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsPages(_a0 *elasticache.DescribeReservedCacheNodesOfferingsInput, _a1 func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput, func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesOfferingsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesOfferingsInput, _a2 func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesOfferingsInput, func(*elasticache.DescribeReservedCacheNodesOfferingsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesOfferingsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsRequest(_a0 *elasticache.DescribeReservedCacheNodesOfferingsInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOfferingsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeReservedCacheNodesOfferingsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesOfferingsInput) *elasticache.DescribeReservedCacheNodesOfferingsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeReservedCacheNodesOfferingsOutput) - } - } - - return r0, r1 -} - -// DescribeReservedCacheNodesOfferingsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesOfferingsWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesOfferingsInput, _a2 ...request.Option) (*elasticache.DescribeReservedCacheNodesOfferingsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeReservedCacheNodesOfferingsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesOfferingsInput, ...request.Option) *elasticache.DescribeReservedCacheNodesOfferingsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOfferingsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeReservedCacheNodesOfferingsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeReservedCacheNodesPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesPages(_a0 *elasticache.DescribeReservedCacheNodesInput, _a1 func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput, func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesInput, _a2 func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesInput, func(*elasticache.DescribeReservedCacheNodesOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeReservedCacheNodesRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesRequest(_a0 *elasticache.DescribeReservedCacheNodesInput) (*request.Request, *elasticache.DescribeReservedCacheNodesOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReservedCacheNodesInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeReservedCacheNodesOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeReservedCacheNodesInput) *elasticache.DescribeReservedCacheNodesOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeReservedCacheNodesOutput) - } - } - - return r0, r1 -} - -// DescribeReservedCacheNodesWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeReservedCacheNodesWithContext(_a0 context.Context, _a1 *elasticache.DescribeReservedCacheNodesInput, _a2 ...request.Option) (*elasticache.DescribeReservedCacheNodesOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeReservedCacheNodesOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReservedCacheNodesInput, ...request.Option) *elasticache.DescribeReservedCacheNodesOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeReservedCacheNodesOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeReservedCacheNodesInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServiceUpdates provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServiceUpdates(_a0 *elasticache.DescribeServiceUpdatesInput) (*elasticache.DescribeServiceUpdatesOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeServiceUpdatesOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput) *elasticache.DescribeServiceUpdatesOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServiceUpdatesOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServiceUpdatesInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeServiceUpdatesPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesPages(_a0 *elasticache.DescribeServiceUpdatesInput, _a1 func(*elasticache.DescribeServiceUpdatesOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput, func(*elasticache.DescribeServiceUpdatesOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServiceUpdatesPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeServiceUpdatesInput, _a2 func(*elasticache.DescribeServiceUpdatesOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServiceUpdatesInput, func(*elasticache.DescribeServiceUpdatesOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeServiceUpdatesRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesRequest(_a0 *elasticache.DescribeServiceUpdatesInput) (*request.Request, *elasticache.DescribeServiceUpdatesOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeServiceUpdatesInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeServiceUpdatesOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeServiceUpdatesInput) *elasticache.DescribeServiceUpdatesOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeServiceUpdatesOutput) - } - } - - return r0, r1 -} - -// DescribeServiceUpdatesWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeServiceUpdatesWithContext(_a0 context.Context, _a1 *elasticache.DescribeServiceUpdatesInput, _a2 ...request.Option) (*elasticache.DescribeServiceUpdatesOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeServiceUpdatesOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeServiceUpdatesInput, ...request.Option) *elasticache.DescribeServiceUpdatesOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeServiceUpdatesOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeServiceUpdatesInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeSnapshots provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeSnapshots(_a0 *elasticache.DescribeSnapshotsInput) (*elasticache.DescribeSnapshotsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeSnapshotsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput) *elasticache.DescribeSnapshotsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeSnapshotsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeSnapshotsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeSnapshotsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeSnapshotsPages(_a0 *elasticache.DescribeSnapshotsInput, _a1 func(*elasticache.DescribeSnapshotsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput, func(*elasticache.DescribeSnapshotsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeSnapshotsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeSnapshotsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeSnapshotsInput, _a2 func(*elasticache.DescribeSnapshotsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeSnapshotsInput, func(*elasticache.DescribeSnapshotsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeSnapshotsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeSnapshotsRequest(_a0 *elasticache.DescribeSnapshotsInput) (*request.Request, *elasticache.DescribeSnapshotsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeSnapshotsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeSnapshotsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeSnapshotsInput) *elasticache.DescribeSnapshotsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeSnapshotsOutput) - } - } - - return r0, r1 -} - -// DescribeSnapshotsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeSnapshotsWithContext(_a0 context.Context, _a1 *elasticache.DescribeSnapshotsInput, _a2 ...request.Option) (*elasticache.DescribeSnapshotsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeSnapshotsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeSnapshotsInput, ...request.Option) *elasticache.DescribeSnapshotsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeSnapshotsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeSnapshotsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUpdateActions provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUpdateActions(_a0 *elasticache.DescribeUpdateActionsInput) (*elasticache.DescribeUpdateActionsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeUpdateActionsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput) *elasticache.DescribeUpdateActionsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUpdateActionsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUpdateActionsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUpdateActionsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeUpdateActionsPages(_a0 *elasticache.DescribeUpdateActionsInput, _a1 func(*elasticache.DescribeUpdateActionsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput, func(*elasticache.DescribeUpdateActionsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUpdateActionsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeUpdateActionsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeUpdateActionsInput, _a2 func(*elasticache.DescribeUpdateActionsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUpdateActionsInput, func(*elasticache.DescribeUpdateActionsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUpdateActionsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUpdateActionsRequest(_a0 *elasticache.DescribeUpdateActionsInput) (*request.Request, *elasticache.DescribeUpdateActionsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUpdateActionsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeUpdateActionsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUpdateActionsInput) *elasticache.DescribeUpdateActionsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeUpdateActionsOutput) - } - } - - return r0, r1 -} - -// DescribeUpdateActionsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeUpdateActionsWithContext(_a0 context.Context, _a1 *elasticache.DescribeUpdateActionsInput, _a2 ...request.Option) (*elasticache.DescribeUpdateActionsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeUpdateActionsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUpdateActionsInput, ...request.Option) *elasticache.DescribeUpdateActionsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUpdateActionsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeUpdateActionsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUserGroups provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUserGroups(_a0 *elasticache.DescribeUserGroupsInput) (*elasticache.DescribeUserGroupsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeUserGroupsOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput) *elasticache.DescribeUserGroupsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUserGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUserGroupsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUserGroupsPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeUserGroupsPages(_a0 *elasticache.DescribeUserGroupsInput, _a1 func(*elasticache.DescribeUserGroupsOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput, func(*elasticache.DescribeUserGroupsOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUserGroupsPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeUserGroupsPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeUserGroupsInput, _a2 func(*elasticache.DescribeUserGroupsOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUserGroupsInput, func(*elasticache.DescribeUserGroupsOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUserGroupsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUserGroupsRequest(_a0 *elasticache.DescribeUserGroupsInput) (*request.Request, *elasticache.DescribeUserGroupsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUserGroupsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeUserGroupsOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUserGroupsInput) *elasticache.DescribeUserGroupsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeUserGroupsOutput) - } - } - - return r0, r1 -} - -// DescribeUserGroupsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeUserGroupsWithContext(_a0 context.Context, _a1 *elasticache.DescribeUserGroupsInput, _a2 ...request.Option) (*elasticache.DescribeUserGroupsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeUserGroupsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUserGroupsInput, ...request.Option) *elasticache.DescribeUserGroupsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUserGroupsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeUserGroupsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUsers provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUsers(_a0 *elasticache.DescribeUsersInput) (*elasticache.DescribeUsersOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DescribeUsersOutput - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput) *elasticache.DescribeUsersOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUsersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUsersInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DescribeUsersPages provides a mock function with given fields: _a0, _a1 -func (_m *ElastiCacheAPI) DescribeUsersPages(_a0 *elasticache.DescribeUsersInput, _a1 func(*elasticache.DescribeUsersOutput, bool) bool) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput, func(*elasticache.DescribeUsersOutput, bool) bool) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUsersPagesWithContext provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *ElastiCacheAPI) DescribeUsersPagesWithContext(_a0 context.Context, _a1 *elasticache.DescribeUsersInput, _a2 func(*elasticache.DescribeUsersOutput, bool) bool, _a3 ...request.Option) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUsersInput, func(*elasticache.DescribeUsersOutput, bool) bool, ...request.Option) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DescribeUsersRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DescribeUsersRequest(_a0 *elasticache.DescribeUsersInput) (*request.Request, *elasticache.DescribeUsersOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DescribeUsersInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DescribeUsersOutput - if rf, ok := ret.Get(1).(func(*elasticache.DescribeUsersInput) *elasticache.DescribeUsersOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DescribeUsersOutput) - } - } - - return r0, r1 -} - -// DescribeUsersWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DescribeUsersWithContext(_a0 context.Context, _a1 *elasticache.DescribeUsersInput, _a2 ...request.Option) (*elasticache.DescribeUsersOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DescribeUsersOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeUsersInput, ...request.Option) *elasticache.DescribeUsersOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DescribeUsersOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DescribeUsersInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DisassociateGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DisassociateGlobalReplicationGroup(_a0 *elasticache.DisassociateGlobalReplicationGroupInput) (*elasticache.DisassociateGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.DisassociateGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.DisassociateGlobalReplicationGroupInput) *elasticache.DisassociateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DisassociateGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.DisassociateGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DisassociateGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) DisassociateGlobalReplicationGroupRequest(_a0 *elasticache.DisassociateGlobalReplicationGroupInput) (*request.Request, *elasticache.DisassociateGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.DisassociateGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.DisassociateGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.DisassociateGlobalReplicationGroupInput) *elasticache.DisassociateGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.DisassociateGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// DisassociateGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) DisassociateGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.DisassociateGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.DisassociateGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.DisassociateGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DisassociateGlobalReplicationGroupInput, ...request.Option) *elasticache.DisassociateGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.DisassociateGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.DisassociateGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FailoverGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) FailoverGlobalReplicationGroup(_a0 *elasticache.FailoverGlobalReplicationGroupInput) (*elasticache.FailoverGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.FailoverGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.FailoverGlobalReplicationGroupInput) *elasticache.FailoverGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.FailoverGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.FailoverGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FailoverGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) FailoverGlobalReplicationGroupRequest(_a0 *elasticache.FailoverGlobalReplicationGroupInput) (*request.Request, *elasticache.FailoverGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.FailoverGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.FailoverGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.FailoverGlobalReplicationGroupInput) *elasticache.FailoverGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.FailoverGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// FailoverGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) FailoverGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.FailoverGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.FailoverGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.FailoverGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.FailoverGlobalReplicationGroupInput, ...request.Option) *elasticache.FailoverGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.FailoverGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.FailoverGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseNodeGroupsInGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseNodeGroupsInGlobalReplicationGroup(_a0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) (*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseNodeGroupsInGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseNodeGroupsInGlobalReplicationGroupRequest(_a0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) (*request.Request, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput) *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// IncreaseNodeGroupsInGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) IncreaseNodeGroupsInGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseNodeGroupsInGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseReplicaCount provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseReplicaCount(_a0 *elasticache.IncreaseReplicaCountInput) (*elasticache.IncreaseReplicaCountOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.IncreaseReplicaCountOutput - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseReplicaCountInput) *elasticache.IncreaseReplicaCountOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseReplicaCountOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseReplicaCountInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IncreaseReplicaCountRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) IncreaseReplicaCountRequest(_a0 *elasticache.IncreaseReplicaCountInput) (*request.Request, *elasticache.IncreaseReplicaCountOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.IncreaseReplicaCountInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.IncreaseReplicaCountOutput - if rf, ok := ret.Get(1).(func(*elasticache.IncreaseReplicaCountInput) *elasticache.IncreaseReplicaCountOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.IncreaseReplicaCountOutput) - } - } - - return r0, r1 -} - -// IncreaseReplicaCountWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) IncreaseReplicaCountWithContext(_a0 context.Context, _a1 *elasticache.IncreaseReplicaCountInput, _a2 ...request.Option) (*elasticache.IncreaseReplicaCountOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.IncreaseReplicaCountOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.IncreaseReplicaCountInput, ...request.Option) *elasticache.IncreaseReplicaCountOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.IncreaseReplicaCountOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.IncreaseReplicaCountInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListAllowedNodeTypeModifications provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListAllowedNodeTypeModifications(_a0 *elasticache.ListAllowedNodeTypeModificationsInput) (*elasticache.ListAllowedNodeTypeModificationsOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ListAllowedNodeTypeModificationsOutput - if rf, ok := ret.Get(0).(func(*elasticache.ListAllowedNodeTypeModificationsInput) *elasticache.ListAllowedNodeTypeModificationsOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ListAllowedNodeTypeModificationsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ListAllowedNodeTypeModificationsInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListAllowedNodeTypeModificationsRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListAllowedNodeTypeModificationsRequest(_a0 *elasticache.ListAllowedNodeTypeModificationsInput) (*request.Request, *elasticache.ListAllowedNodeTypeModificationsOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ListAllowedNodeTypeModificationsInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ListAllowedNodeTypeModificationsOutput - if rf, ok := ret.Get(1).(func(*elasticache.ListAllowedNodeTypeModificationsInput) *elasticache.ListAllowedNodeTypeModificationsOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ListAllowedNodeTypeModificationsOutput) - } - } - - return r0, r1 -} - -// ListAllowedNodeTypeModificationsWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ListAllowedNodeTypeModificationsWithContext(_a0 context.Context, _a1 *elasticache.ListAllowedNodeTypeModificationsInput, _a2 ...request.Option) (*elasticache.ListAllowedNodeTypeModificationsOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ListAllowedNodeTypeModificationsOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ListAllowedNodeTypeModificationsInput, ...request.Option) *elasticache.ListAllowedNodeTypeModificationsOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ListAllowedNodeTypeModificationsOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ListAllowedNodeTypeModificationsInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListTagsForResource provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListTagsForResource(_a0 *elasticache.ListTagsForResourceInput) (*elasticache.TagListMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(*elasticache.ListTagsForResourceInput) *elasticache.TagListMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ListTagsForResourceInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListTagsForResourceRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ListTagsForResourceRequest(_a0 *elasticache.ListTagsForResourceInput) (*request.Request, *elasticache.TagListMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ListTagsForResourceInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.TagListMessage - if rf, ok := ret.Get(1).(func(*elasticache.ListTagsForResourceInput) *elasticache.TagListMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TagListMessage) - } - } - - return r0, r1 -} - -// ListTagsForResourceWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ListTagsForResourceWithContext(_a0 context.Context, _a1 *elasticache.ListTagsForResourceInput, _a2 ...request.Option) (*elasticache.TagListMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ListTagsForResourceInput, ...request.Option) *elasticache.TagListMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ListTagsForResourceInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheCluster(_a0 *elasticache.ModifyCacheClusterInput) (*elasticache.ModifyCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheClusterInput) *elasticache.ModifyCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheClusterRequest(_a0 *elasticache.ModifyCacheClusterInput) (*request.Request, *elasticache.ModifyCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ModifyCacheClusterOutput - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheClusterInput) *elasticache.ModifyCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyCacheClusterOutput) - } - } - - return r0, r1 -} - -// ModifyCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.ModifyCacheClusterInput, _a2 ...request.Option) (*elasticache.ModifyCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyCacheClusterOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheClusterInput, ...request.Option) *elasticache.ModifyCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheParameterGroup(_a0 *elasticache.ModifyCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheParameterGroupRequest(_a0 *elasticache.ModifyCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CacheParameterGroupNameMessage) - } - } - - return r0, r1 -} - -// ModifyCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.CacheParameterGroupNameMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheParameterGroupInput, ...request.Option) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheSubnetGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheSubnetGroup(_a0 *elasticache.ModifyCacheSubnetGroupInput) (*elasticache.ModifyCacheSubnetGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheSubnetGroupInput) *elasticache.ModifyCacheSubnetGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheSubnetGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheSubnetGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyCacheSubnetGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyCacheSubnetGroupRequest(_a0 *elasticache.ModifyCacheSubnetGroupInput) (*request.Request, *elasticache.ModifyCacheSubnetGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyCacheSubnetGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ModifyCacheSubnetGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.ModifyCacheSubnetGroupInput) *elasticache.ModifyCacheSubnetGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyCacheSubnetGroupOutput) - } - } - - return r0, r1 -} - -// ModifyCacheSubnetGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyCacheSubnetGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyCacheSubnetGroupInput, _a2 ...request.Option) (*elasticache.ModifyCacheSubnetGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyCacheSubnetGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyCacheSubnetGroupInput, ...request.Option) *elasticache.ModifyCacheSubnetGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyCacheSubnetGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyCacheSubnetGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyGlobalReplicationGroup(_a0 *elasticache.ModifyGlobalReplicationGroupInput) (*elasticache.ModifyGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyGlobalReplicationGroupInput) *elasticache.ModifyGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyGlobalReplicationGroupRequest(_a0 *elasticache.ModifyGlobalReplicationGroupInput) (*request.Request, *elasticache.ModifyGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ModifyGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.ModifyGlobalReplicationGroupInput) *elasticache.ModifyGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// ModifyGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.ModifyGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyGlobalReplicationGroupInput, ...request.Option) *elasticache.ModifyGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroup(_a0 *elasticache.ModifyReplicationGroupInput) (*elasticache.ModifyReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupInput) *elasticache.ModifyReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroupRequest(_a0 *elasticache.ModifyReplicationGroupInput) (*request.Request, *elasticache.ModifyReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ModifyReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupInput) *elasticache.ModifyReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyReplicationGroupOutput) - } - } - - return r0, r1 -} - -// ModifyReplicationGroupShardConfiguration provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroupShardConfiguration(_a0 *elasticache.ModifyReplicationGroupShardConfigurationInput) (*elasticache.ModifyReplicationGroupShardConfigurationOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyReplicationGroupShardConfigurationOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) *elasticache.ModifyReplicationGroupShardConfigurationOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupShardConfigurationOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroupShardConfigurationRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyReplicationGroupShardConfigurationRequest(_a0 *elasticache.ModifyReplicationGroupShardConfigurationInput) (*request.Request, *elasticache.ModifyReplicationGroupShardConfigurationOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ModifyReplicationGroupShardConfigurationOutput - if rf, ok := ret.Get(1).(func(*elasticache.ModifyReplicationGroupShardConfigurationInput) *elasticache.ModifyReplicationGroupShardConfigurationOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyReplicationGroupShardConfigurationOutput) - } - } - - return r0, r1 -} - -// ModifyReplicationGroupShardConfigurationWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyReplicationGroupShardConfigurationWithContext(_a0 context.Context, _a1 *elasticache.ModifyReplicationGroupShardConfigurationInput, _a2 ...request.Option) (*elasticache.ModifyReplicationGroupShardConfigurationOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyReplicationGroupShardConfigurationOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyReplicationGroupShardConfigurationInput, ...request.Option) *elasticache.ModifyReplicationGroupShardConfigurationOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupShardConfigurationOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyReplicationGroupShardConfigurationInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyReplicationGroupInput, _a2 ...request.Option) (*elasticache.ModifyReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyReplicationGroupInput, ...request.Option) *elasticache.ModifyReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUser provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUser(_a0 *elasticache.ModifyUserInput) (*elasticache.ModifyUserOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyUserOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserInput) *elasticache.ModifyUserOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUserGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUserGroup(_a0 *elasticache.ModifyUserGroupInput) (*elasticache.ModifyUserGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.ModifyUserGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserGroupInput) *elasticache.ModifyUserGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUserGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUserGroupRequest(_a0 *elasticache.ModifyUserGroupInput) (*request.Request, *elasticache.ModifyUserGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ModifyUserGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserGroupInput) *elasticache.ModifyUserGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyUserGroupOutput) - } - } - - return r0, r1 -} - -// ModifyUserGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyUserGroupWithContext(_a0 context.Context, _a1 *elasticache.ModifyUserGroupInput, _a2 ...request.Option) (*elasticache.ModifyUserGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyUserGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyUserGroupInput, ...request.Option) *elasticache.ModifyUserGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyUserGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ModifyUserRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ModifyUserRequest(_a0 *elasticache.ModifyUserInput) (*request.Request, *elasticache.ModifyUserOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ModifyUserInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.ModifyUserOutput - if rf, ok := ret.Get(1).(func(*elasticache.ModifyUserInput) *elasticache.ModifyUserOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.ModifyUserOutput) - } - } - - return r0, r1 -} - -// ModifyUserWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ModifyUserWithContext(_a0 context.Context, _a1 *elasticache.ModifyUserInput, _a2 ...request.Option) (*elasticache.ModifyUserOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.ModifyUserOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ModifyUserInput, ...request.Option) *elasticache.ModifyUserOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.ModifyUserOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ModifyUserInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PurchaseReservedCacheNodesOffering provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) PurchaseReservedCacheNodesOffering(_a0 *elasticache.PurchaseReservedCacheNodesOfferingInput) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.PurchaseReservedCacheNodesOfferingOutput - if rf, ok := ret.Get(0).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) *elasticache.PurchaseReservedCacheNodesOfferingOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.PurchaseReservedCacheNodesOfferingOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PurchaseReservedCacheNodesOfferingRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) PurchaseReservedCacheNodesOfferingRequest(_a0 *elasticache.PurchaseReservedCacheNodesOfferingInput) (*request.Request, *elasticache.PurchaseReservedCacheNodesOfferingOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.PurchaseReservedCacheNodesOfferingOutput - if rf, ok := ret.Get(1).(func(*elasticache.PurchaseReservedCacheNodesOfferingInput) *elasticache.PurchaseReservedCacheNodesOfferingOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.PurchaseReservedCacheNodesOfferingOutput) - } - } - - return r0, r1 -} - -// PurchaseReservedCacheNodesOfferingWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) PurchaseReservedCacheNodesOfferingWithContext(_a0 context.Context, _a1 *elasticache.PurchaseReservedCacheNodesOfferingInput, _a2 ...request.Option) (*elasticache.PurchaseReservedCacheNodesOfferingOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.PurchaseReservedCacheNodesOfferingOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.PurchaseReservedCacheNodesOfferingInput, ...request.Option) *elasticache.PurchaseReservedCacheNodesOfferingOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.PurchaseReservedCacheNodesOfferingOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.PurchaseReservedCacheNodesOfferingInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebalanceSlotsInGlobalReplicationGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebalanceSlotsInGlobalReplicationGroup(_a0 *elasticache.RebalanceSlotsInGlobalReplicationGroupInput) (*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebalanceSlotsInGlobalReplicationGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebalanceSlotsInGlobalReplicationGroupRequest(_a0 *elasticache.RebalanceSlotsInGlobalReplicationGroupInput) (*request.Request, *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(1).(func(*elasticache.RebalanceSlotsInGlobalReplicationGroupInput) *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) - } - } - - return r0, r1 -} - -// RebalanceSlotsInGlobalReplicationGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RebalanceSlotsInGlobalReplicationGroupWithContext(_a0 context.Context, _a1 *elasticache.RebalanceSlotsInGlobalReplicationGroupInput, _a2 ...request.Option) (*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RebalanceSlotsInGlobalReplicationGroupInput, ...request.Option) *elasticache.RebalanceSlotsInGlobalReplicationGroupOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebalanceSlotsInGlobalReplicationGroupOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RebalanceSlotsInGlobalReplicationGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebootCacheCluster provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebootCacheCluster(_a0 *elasticache.RebootCacheClusterInput) (*elasticache.RebootCacheClusterOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.RebootCacheClusterOutput - if rf, ok := ret.Get(0).(func(*elasticache.RebootCacheClusterInput) *elasticache.RebootCacheClusterOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebootCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.RebootCacheClusterInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RebootCacheClusterRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RebootCacheClusterRequest(_a0 *elasticache.RebootCacheClusterInput) (*request.Request, *elasticache.RebootCacheClusterOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.RebootCacheClusterInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.RebootCacheClusterOutput - if rf, ok := ret.Get(1).(func(*elasticache.RebootCacheClusterInput) *elasticache.RebootCacheClusterOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.RebootCacheClusterOutput) - } - } - - return r0, r1 -} - -// RebootCacheClusterWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RebootCacheClusterWithContext(_a0 context.Context, _a1 *elasticache.RebootCacheClusterInput, _a2 ...request.Option) (*elasticache.RebootCacheClusterOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.RebootCacheClusterOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RebootCacheClusterInput, ...request.Option) *elasticache.RebootCacheClusterOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RebootCacheClusterOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RebootCacheClusterInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RemoveTagsFromResource provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RemoveTagsFromResource(_a0 *elasticache.RemoveTagsFromResourceInput) (*elasticache.TagListMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(*elasticache.RemoveTagsFromResourceInput) *elasticache.TagListMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.RemoveTagsFromResourceInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RemoveTagsFromResourceRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RemoveTagsFromResourceRequest(_a0 *elasticache.RemoveTagsFromResourceInput) (*request.Request, *elasticache.TagListMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.RemoveTagsFromResourceInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.TagListMessage - if rf, ok := ret.Get(1).(func(*elasticache.RemoveTagsFromResourceInput) *elasticache.TagListMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TagListMessage) - } - } - - return r0, r1 -} - -// RemoveTagsFromResourceWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RemoveTagsFromResourceWithContext(_a0 context.Context, _a1 *elasticache.RemoveTagsFromResourceInput, _a2 ...request.Option) (*elasticache.TagListMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TagListMessage - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RemoveTagsFromResourceInput, ...request.Option) *elasticache.TagListMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TagListMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RemoveTagsFromResourceInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ResetCacheParameterGroup provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ResetCacheParameterGroup(_a0 *elasticache.ResetCacheParameterGroupInput) (*elasticache.CacheParameterGroupNameMessage, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(0).(func(*elasticache.ResetCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.ResetCacheParameterGroupInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ResetCacheParameterGroupRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) ResetCacheParameterGroupRequest(_a0 *elasticache.ResetCacheParameterGroupInput) (*request.Request, *elasticache.CacheParameterGroupNameMessage) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.ResetCacheParameterGroupInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(1).(func(*elasticache.ResetCacheParameterGroupInput) *elasticache.CacheParameterGroupNameMessage); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.CacheParameterGroupNameMessage) - } - } - - return r0, r1 -} - -// ResetCacheParameterGroupWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) ResetCacheParameterGroupWithContext(_a0 context.Context, _a1 *elasticache.ResetCacheParameterGroupInput, _a2 ...request.Option) (*elasticache.CacheParameterGroupNameMessage, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.CacheParameterGroupNameMessage - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.ResetCacheParameterGroupInput, ...request.Option) *elasticache.CacheParameterGroupNameMessage); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.CacheParameterGroupNameMessage) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.ResetCacheParameterGroupInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RevokeCacheSecurityGroupIngress provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RevokeCacheSecurityGroupIngress(_a0 *elasticache.RevokeCacheSecurityGroupIngressInput) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.RevokeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(0).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) *elasticache.RevokeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RevokeCacheSecurityGroupIngressOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RevokeCacheSecurityGroupIngressRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) RevokeCacheSecurityGroupIngressRequest(_a0 *elasticache.RevokeCacheSecurityGroupIngressInput) (*request.Request, *elasticache.RevokeCacheSecurityGroupIngressOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.RevokeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(1).(func(*elasticache.RevokeCacheSecurityGroupIngressInput) *elasticache.RevokeCacheSecurityGroupIngressOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.RevokeCacheSecurityGroupIngressOutput) - } - } - - return r0, r1 -} - -// RevokeCacheSecurityGroupIngressWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) RevokeCacheSecurityGroupIngressWithContext(_a0 context.Context, _a1 *elasticache.RevokeCacheSecurityGroupIngressInput, _a2 ...request.Option) (*elasticache.RevokeCacheSecurityGroupIngressOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.RevokeCacheSecurityGroupIngressOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.RevokeCacheSecurityGroupIngressInput, ...request.Option) *elasticache.RevokeCacheSecurityGroupIngressOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.RevokeCacheSecurityGroupIngressOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.RevokeCacheSecurityGroupIngressInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StartMigration provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) StartMigration(_a0 *elasticache.StartMigrationInput) (*elasticache.StartMigrationOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.StartMigrationOutput - if rf, ok := ret.Get(0).(func(*elasticache.StartMigrationInput) *elasticache.StartMigrationOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.StartMigrationOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.StartMigrationInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StartMigrationRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) StartMigrationRequest(_a0 *elasticache.StartMigrationInput) (*request.Request, *elasticache.StartMigrationOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.StartMigrationInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.StartMigrationOutput - if rf, ok := ret.Get(1).(func(*elasticache.StartMigrationInput) *elasticache.StartMigrationOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.StartMigrationOutput) - } - } - - return r0, r1 -} - -// StartMigrationWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) StartMigrationWithContext(_a0 context.Context, _a1 *elasticache.StartMigrationInput, _a2 ...request.Option) (*elasticache.StartMigrationOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.StartMigrationOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.StartMigrationInput, ...request.Option) *elasticache.StartMigrationOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.StartMigrationOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.StartMigrationInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TestFailover provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) TestFailover(_a0 *elasticache.TestFailoverInput) (*elasticache.TestFailoverOutput, error) { - ret := _m.Called(_a0) - - var r0 *elasticache.TestFailoverOutput - if rf, ok := ret.Get(0).(func(*elasticache.TestFailoverInput) *elasticache.TestFailoverOutput); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TestFailoverOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(*elasticache.TestFailoverInput) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TestFailoverRequest provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) TestFailoverRequest(_a0 *elasticache.TestFailoverInput) (*request.Request, *elasticache.TestFailoverOutput) { - ret := _m.Called(_a0) - - var r0 *request.Request - if rf, ok := ret.Get(0).(func(*elasticache.TestFailoverInput) *request.Request); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*request.Request) - } - } - - var r1 *elasticache.TestFailoverOutput - if rf, ok := ret.Get(1).(func(*elasticache.TestFailoverInput) *elasticache.TestFailoverOutput); ok { - r1 = rf(_a0) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*elasticache.TestFailoverOutput) - } - } - - return r0, r1 -} - -// TestFailoverWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) TestFailoverWithContext(_a0 context.Context, _a1 *elasticache.TestFailoverInput, _a2 ...request.Option) (*elasticache.TestFailoverOutput, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *elasticache.TestFailoverOutput - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.TestFailoverInput, ...request.Option) *elasticache.TestFailoverOutput); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*elasticache.TestFailoverOutput) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *elasticache.TestFailoverInput, ...request.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// WaitUntilCacheClusterAvailable provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterAvailable(_a0 *elasticache.DescribeCacheClustersInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilCacheClusterAvailableWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterAvailableWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilCacheClusterDeleted provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterDeleted(_a0 *elasticache.DescribeCacheClustersInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeCacheClustersInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilCacheClusterDeletedWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilCacheClusterDeletedWithContext(_a0 context.Context, _a1 *elasticache.DescribeCacheClustersInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeCacheClustersInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupAvailable provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupAvailable(_a0 *elasticache.DescribeReplicationGroupsInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupAvailableWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupAvailableWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupDeleted provides a mock function with given fields: _a0 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupDeleted(_a0 *elasticache.DescribeReplicationGroupsInput) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*elasticache.DescribeReplicationGroupsInput) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// WaitUntilReplicationGroupDeletedWithContext provides a mock function with given fields: _a0, _a1, _a2 -func (_m *ElastiCacheAPI) WaitUntilReplicationGroupDeletedWithContext(_a0 context.Context, _a1 *elasticache.DescribeReplicationGroupsInput, _a2 ...request.WaiterOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *elasticache.DescribeReplicationGroupsInput, ...request.WaiterOption) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/pkg/resource/cache_cluster/delta.go b/pkg/resource/cache_cluster/delta.go new file mode 100644 index 00000000..f7a32a5a --- /dev/null +++ b/pkg/resource/cache_cluster/delta.go @@ -0,0 +1,268 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "bytes" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" +) + +// Hack to avoid import errors during build... +var ( + _ = &bytes.Buffer{} + _ = &reflect.Method{} + _ = &acktags.Tags{} +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.AZMode, b.ko.Spec.AZMode) { + delta.Add("Spec.AZMode", a.ko.Spec.AZMode, b.ko.Spec.AZMode) + } else if a.ko.Spec.AZMode != nil && b.ko.Spec.AZMode != nil { + if *a.ko.Spec.AZMode != *b.ko.Spec.AZMode { + delta.Add("Spec.AZMode", a.ko.Spec.AZMode, b.ko.Spec.AZMode) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.AuthToken, b.ko.Spec.AuthToken) { + delta.Add("Spec.AuthToken", a.ko.Spec.AuthToken, b.ko.Spec.AuthToken) + } else if a.ko.Spec.AuthToken != nil && b.ko.Spec.AuthToken != nil { + if *a.ko.Spec.AuthToken != *b.ko.Spec.AuthToken { + delta.Add("Spec.AuthToken", a.ko.Spec.AuthToken, b.ko.Spec.AuthToken) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.AutoMinorVersionUpgrade, b.ko.Spec.AutoMinorVersionUpgrade) { + delta.Add("Spec.AutoMinorVersionUpgrade", a.ko.Spec.AutoMinorVersionUpgrade, b.ko.Spec.AutoMinorVersionUpgrade) + } else if a.ko.Spec.AutoMinorVersionUpgrade != nil && b.ko.Spec.AutoMinorVersionUpgrade != nil { + if *a.ko.Spec.AutoMinorVersionUpgrade != *b.ko.Spec.AutoMinorVersionUpgrade { + delta.Add("Spec.AutoMinorVersionUpgrade", a.ko.Spec.AutoMinorVersionUpgrade, b.ko.Spec.AutoMinorVersionUpgrade) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheClusterID, b.ko.Spec.CacheClusterID) { + delta.Add("Spec.CacheClusterID", a.ko.Spec.CacheClusterID, b.ko.Spec.CacheClusterID) + } else if a.ko.Spec.CacheClusterID != nil && b.ko.Spec.CacheClusterID != nil { + if *a.ko.Spec.CacheClusterID != *b.ko.Spec.CacheClusterID { + delta.Add("Spec.CacheClusterID", a.ko.Spec.CacheClusterID, b.ko.Spec.CacheClusterID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheNodeType, b.ko.Spec.CacheNodeType) { + delta.Add("Spec.CacheNodeType", a.ko.Spec.CacheNodeType, b.ko.Spec.CacheNodeType) + } else if a.ko.Spec.CacheNodeType != nil && b.ko.Spec.CacheNodeType != nil { + if *a.ko.Spec.CacheNodeType != *b.ko.Spec.CacheNodeType { + delta.Add("Spec.CacheNodeType", a.ko.Spec.CacheNodeType, b.ko.Spec.CacheNodeType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheParameterGroupName, b.ko.Spec.CacheParameterGroupName) { + delta.Add("Spec.CacheParameterGroupName", a.ko.Spec.CacheParameterGroupName, b.ko.Spec.CacheParameterGroupName) + } else if a.ko.Spec.CacheParameterGroupName != nil && b.ko.Spec.CacheParameterGroupName != nil { + if *a.ko.Spec.CacheParameterGroupName != *b.ko.Spec.CacheParameterGroupName { + delta.Add("Spec.CacheParameterGroupName", a.ko.Spec.CacheParameterGroupName, b.ko.Spec.CacheParameterGroupName) + } + } + if !reflect.DeepEqual(a.ko.Spec.CacheParameterGroupRef, b.ko.Spec.CacheParameterGroupRef) { + delta.Add("Spec.CacheParameterGroupRef", a.ko.Spec.CacheParameterGroupRef, b.ko.Spec.CacheParameterGroupRef) + } + if len(a.ko.Spec.CacheSecurityGroupNames) != len(b.ko.Spec.CacheSecurityGroupNames) { + delta.Add("Spec.CacheSecurityGroupNames", a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) + } else if len(a.ko.Spec.CacheSecurityGroupNames) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) { + delta.Add("Spec.CacheSecurityGroupNames", a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) { + delta.Add("Spec.CacheSubnetGroupName", a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) + } else if a.ko.Spec.CacheSubnetGroupName != nil && b.ko.Spec.CacheSubnetGroupName != nil { + if *a.ko.Spec.CacheSubnetGroupName != *b.ko.Spec.CacheSubnetGroupName { + delta.Add("Spec.CacheSubnetGroupName", a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) + } + } + if !reflect.DeepEqual(a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) { + delta.Add("Spec.CacheSubnetGroupRef", a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.Engine, b.ko.Spec.Engine) { + delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) + } else if a.ko.Spec.Engine != nil && b.ko.Spec.Engine != nil { + if *a.ko.Spec.Engine != *b.ko.Spec.Engine { + delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) { + delta.Add("Spec.EngineVersion", a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) + } else if a.ko.Spec.EngineVersion != nil && b.ko.Spec.EngineVersion != nil { + if *a.ko.Spec.EngineVersion != *b.ko.Spec.EngineVersion { + delta.Add("Spec.EngineVersion", a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } else if a.ko.Spec.IPDiscovery != nil && b.ko.Spec.IPDiscovery != nil { + if *a.ko.Spec.IPDiscovery != *b.ko.Spec.IPDiscovery { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } + } + if len(a.ko.Spec.LogDeliveryConfigurations) != len(b.ko.Spec.LogDeliveryConfigurations) { + delta.Add("Spec.LogDeliveryConfigurations", a.ko.Spec.LogDeliveryConfigurations, b.ko.Spec.LogDeliveryConfigurations) + } else if len(a.ko.Spec.LogDeliveryConfigurations) > 0 { + if !reflect.DeepEqual(a.ko.Spec.LogDeliveryConfigurations, b.ko.Spec.LogDeliveryConfigurations) { + delta.Add("Spec.LogDeliveryConfigurations", a.ko.Spec.LogDeliveryConfigurations, b.ko.Spec.LogDeliveryConfigurations) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } else if a.ko.Spec.NetworkType != nil && b.ko.Spec.NetworkType != nil { + if *a.ko.Spec.NetworkType != *b.ko.Spec.NetworkType { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) { + delta.Add("Spec.NotificationTopicARN", a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) + } else if a.ko.Spec.NotificationTopicARN != nil && b.ko.Spec.NotificationTopicARN != nil { + if *a.ko.Spec.NotificationTopicARN != *b.ko.Spec.NotificationTopicARN { + delta.Add("Spec.NotificationTopicARN", a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) + } + } + if !reflect.DeepEqual(a.ko.Spec.NotificationTopicRef, b.ko.Spec.NotificationTopicRef) { + delta.Add("Spec.NotificationTopicRef", a.ko.Spec.NotificationTopicRef, b.ko.Spec.NotificationTopicRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.NumCacheNodes, b.ko.Spec.NumCacheNodes) { + delta.Add("Spec.NumCacheNodes", a.ko.Spec.NumCacheNodes, b.ko.Spec.NumCacheNodes) + } else if a.ko.Spec.NumCacheNodes != nil && b.ko.Spec.NumCacheNodes != nil { + if *a.ko.Spec.NumCacheNodes != *b.ko.Spec.NumCacheNodes { + delta.Add("Spec.NumCacheNodes", a.ko.Spec.NumCacheNodes, b.ko.Spec.NumCacheNodes) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.OutpostMode, b.ko.Spec.OutpostMode) { + delta.Add("Spec.OutpostMode", a.ko.Spec.OutpostMode, b.ko.Spec.OutpostMode) + } else if a.ko.Spec.OutpostMode != nil && b.ko.Spec.OutpostMode != nil { + if *a.ko.Spec.OutpostMode != *b.ko.Spec.OutpostMode { + delta.Add("Spec.OutpostMode", a.ko.Spec.OutpostMode, b.ko.Spec.OutpostMode) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Port, b.ko.Spec.Port) { + delta.Add("Spec.Port", a.ko.Spec.Port, b.ko.Spec.Port) + } else if a.ko.Spec.Port != nil && b.ko.Spec.Port != nil { + if *a.ko.Spec.Port != *b.ko.Spec.Port { + delta.Add("Spec.Port", a.ko.Spec.Port, b.ko.Spec.Port) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PreferredAvailabilityZone, b.ko.Spec.PreferredAvailabilityZone) { + delta.Add("Spec.PreferredAvailabilityZone", a.ko.Spec.PreferredAvailabilityZone, b.ko.Spec.PreferredAvailabilityZone) + } else if a.ko.Spec.PreferredAvailabilityZone != nil && b.ko.Spec.PreferredAvailabilityZone != nil { + if *a.ko.Spec.PreferredAvailabilityZone != *b.ko.Spec.PreferredAvailabilityZone { + delta.Add("Spec.PreferredAvailabilityZone", a.ko.Spec.PreferredAvailabilityZone, b.ko.Spec.PreferredAvailabilityZone) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) { + delta.Add("Spec.PreferredMaintenanceWindow", a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) + } else if a.ko.Spec.PreferredMaintenanceWindow != nil && b.ko.Spec.PreferredMaintenanceWindow != nil { + if *a.ko.Spec.PreferredMaintenanceWindow != *b.ko.Spec.PreferredMaintenanceWindow { + delta.Add("Spec.PreferredMaintenanceWindow", a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PreferredOutpostARN, b.ko.Spec.PreferredOutpostARN) { + delta.Add("Spec.PreferredOutpostARN", a.ko.Spec.PreferredOutpostARN, b.ko.Spec.PreferredOutpostARN) + } else if a.ko.Spec.PreferredOutpostARN != nil && b.ko.Spec.PreferredOutpostARN != nil { + if *a.ko.Spec.PreferredOutpostARN != *b.ko.Spec.PreferredOutpostARN { + delta.Add("Spec.PreferredOutpostARN", a.ko.Spec.PreferredOutpostARN, b.ko.Spec.PreferredOutpostARN) + } + } + if len(a.ko.Spec.PreferredOutpostARNs) != len(b.ko.Spec.PreferredOutpostARNs) { + delta.Add("Spec.PreferredOutpostARNs", a.ko.Spec.PreferredOutpostARNs, b.ko.Spec.PreferredOutpostARNs) + } else if len(a.ko.Spec.PreferredOutpostARNs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.PreferredOutpostARNs, b.ko.Spec.PreferredOutpostARNs) { + delta.Add("Spec.PreferredOutpostARNs", a.ko.Spec.PreferredOutpostARNs, b.ko.Spec.PreferredOutpostARNs) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ReplicationGroupID, b.ko.Spec.ReplicationGroupID) { + delta.Add("Spec.ReplicationGroupID", a.ko.Spec.ReplicationGroupID, b.ko.Spec.ReplicationGroupID) + } else if a.ko.Spec.ReplicationGroupID != nil && b.ko.Spec.ReplicationGroupID != nil { + if *a.ko.Spec.ReplicationGroupID != *b.ko.Spec.ReplicationGroupID { + delta.Add("Spec.ReplicationGroupID", a.ko.Spec.ReplicationGroupID, b.ko.Spec.ReplicationGroupID) + } + } + if !reflect.DeepEqual(a.ko.Spec.ReplicationGroupRef, b.ko.Spec.ReplicationGroupRef) { + delta.Add("Spec.ReplicationGroupRef", a.ko.Spec.ReplicationGroupRef, b.ko.Spec.ReplicationGroupRef) + } + if len(a.ko.Spec.SecurityGroupIDs) != len(b.ko.Spec.SecurityGroupIDs) { + delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } else if len(a.ko.Spec.SecurityGroupIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) { + delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } + } + if !reflect.DeepEqual(a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) { + delta.Add("Spec.SecurityGroupRefs", a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) + } + if len(a.ko.Spec.SnapshotARNs) != len(b.ko.Spec.SnapshotARNs) { + delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) + } else if len(a.ko.Spec.SnapshotARNs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) { + delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) { + delta.Add("Spec.SnapshotName", a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) + } else if a.ko.Spec.SnapshotName != nil && b.ko.Spec.SnapshotName != nil { + if *a.ko.Spec.SnapshotName != *b.ko.Spec.SnapshotName { + delta.Add("Spec.SnapshotName", a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) + } + } + if !reflect.DeepEqual(a.ko.Spec.SnapshotRef, b.ko.Spec.SnapshotRef) { + delta.Add("Spec.SnapshotRef", a.ko.Spec.SnapshotRef, b.ko.Spec.SnapshotRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) { + delta.Add("Spec.SnapshotRetentionLimit", a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) + } else if a.ko.Spec.SnapshotRetentionLimit != nil && b.ko.Spec.SnapshotRetentionLimit != nil { + if *a.ko.Spec.SnapshotRetentionLimit != *b.ko.Spec.SnapshotRetentionLimit { + delta.Add("Spec.SnapshotRetentionLimit", a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.SnapshotWindow, b.ko.Spec.SnapshotWindow) { + delta.Add("Spec.SnapshotWindow", a.ko.Spec.SnapshotWindow, b.ko.Spec.SnapshotWindow) + } else if a.ko.Spec.SnapshotWindow != nil && b.ko.Spec.SnapshotWindow != nil { + if *a.ko.Spec.SnapshotWindow != *b.ko.Spec.SnapshotWindow { + delta.Add("Spec.SnapshotWindow", a.ko.Spec.SnapshotWindow, b.ko.Spec.SnapshotWindow) + } + } + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { + delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) + } + if ackcompare.HasNilDifference(a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) { + delta.Add("Spec.TransitEncryptionEnabled", a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) + } else if a.ko.Spec.TransitEncryptionEnabled != nil && b.ko.Spec.TransitEncryptionEnabled != nil { + if *a.ko.Spec.TransitEncryptionEnabled != *b.ko.Spec.TransitEncryptionEnabled { + delta.Add("Spec.TransitEncryptionEnabled", a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) + } + } + + modifyDelta(delta, a, b) + return delta +} diff --git a/pkg/resource/cache_cluster/delta_util.go b/pkg/resource/cache_cluster/delta_util.go new file mode 100644 index 00000000..12a84867 --- /dev/null +++ b/pkg/resource/cache_cluster/delta_util.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache_cluster + +import ( + "encoding/json" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + + "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" +) + +// modifyDelta removes non-meaningful differences from the delta and adds additional differences if necessary. +func modifyDelta( + delta *ackcompare.Delta, + desired *resource, + latest *resource, +) { + if delta.DifferentAt("Spec.EngineVersion") && desired.ko.Spec.EngineVersion != nil && latest.ko.Spec.EngineVersion != nil && + util.EngineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { + common.RemoveFromDelta(delta, "Spec.EngineVersion") + // TODO: handle the case of a nil difference (especially when desired EV is nil) + } + + // if server has given PreferredMaintenanceWindow a default value, no action needs to be taken. + if delta.DifferentAt("Spec.PreferredMaintenanceWindow") && desired.ko.Spec.PreferredMaintenanceWindow == nil && + latest.ko.Spec.PreferredMaintenanceWindow != nil { + common.RemoveFromDelta(delta, "Spec.PreferredMaintenanceWindow") + } + + if delta.DifferentAt("Spec.PreferredAvailabilityZone") && desired.ko.Spec.PreferredAvailabilityZone == nil && + latest.ko.Spec.PreferredAvailabilityZone != nil { + common.RemoveFromDelta(delta, "Spec.PreferredAvailabilityZone") + } + + updatePAZsDelta(desired, delta) +} + +// updatePAZsDelta retrieves the last requested configurations saved in annotations and compares them +// to the current desired configurations. If a diff is found, it adds it to delta. +func updatePAZsDelta(desired *resource, delta *ackcompare.Delta) { + var lastRequestedPAZs []*string + unmarshalAnnotation(desired, AnnotationLastRequestedPAZs, &lastRequestedPAZs) + if !reflect.DeepEqual(desired.ko.Spec.PreferredAvailabilityZones, lastRequestedPAZs) { + delta.Add("Spec.PreferredAvailabilityZones", desired.ko.Spec.PreferredAvailabilityZones, + lastRequestedPAZs) + } +} + +func unmarshalAnnotation(desired *resource, annotation string, val interface{}) { + if data, ok := desired.ko.ObjectMeta.GetAnnotations()[annotation]; ok { + _ = json.Unmarshal([]byte(data), val) + } +} diff --git a/pkg/resource/cache_cluster/descriptor.go b/pkg/resource/cache_cluster/descriptor.go new file mode 100644 index 00000000..6b048972 --- /dev/null +++ b/pkg/resource/cache_cluster/descriptor.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +const ( + FinalizerString = "finalizers.elasticache.services.k8s.aws/CacheCluster" +) + +var ( + GroupVersionResource = svcapitypes.GroupVersion.WithResource("cacheclusters") + GroupKind = metav1.GroupKind{ + Group: "elasticache.services.k8s.aws", + Kind: "CacheCluster", + } +) + +// resourceDescriptor implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceDescriptor` interface +type resourceDescriptor struct { +} + +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) +} + +// EmptyRuntimeObject returns an empty object prototype that may be used in +// apimachinery and k8s client operations +func (d *resourceDescriptor) EmptyRuntimeObject() rtclient.Object { + return &svcapitypes.CacheCluster{} +} + +// ResourceFromRuntimeObject returns an AWSResource that has been initialized +// with the supplied runtime.Object +func (d *resourceDescriptor) ResourceFromRuntimeObject( + obj rtclient.Object, +) acktypes.AWSResource { + return &resource{ + ko: obj.(*svcapitypes.CacheCluster), + } +} + +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) +} + +// IsManaged returns true if the supplied AWSResource is under the management +// of an ACK service controller. What this means in practice is that the +// underlying custom resource (CR) in the AWSResource has had a +// resource-specific finalizer associated with it. +func (d *resourceDescriptor) IsManaged( + res acktypes.AWSResource, +) bool { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + // Remove use of custom code once + // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is + // fixed. This should be able to be: + // + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) +} + +// Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 +// is fixed. +func containsFinalizer(obj rtclient.Object, finalizer string) bool { + f := obj.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// MarkManaged places the supplied resource under the management of ACK. What +// this typically means is that the resource manager will decorate the +// underlying custom resource (CR) with a finalizer that indicates ACK is +// managing the resource and the underlying CR may not be deleted until ACK is +// finished cleaning up any backend AWS service resources associated with the +// CR. +func (d *resourceDescriptor) MarkManaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.AddFinalizer(obj, FinalizerString) +} + +// MarkUnmanaged removes the supplied resource from management by ACK. What +// this typically means is that the resource manager will remove a finalizer +// underlying custom resource (CR) that indicates ACK is managing the resource. +// This will allow the Kubernetes API server to delete the underlying CR. +func (d *resourceDescriptor) MarkUnmanaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) +} + +// MarkAdopted places descriptors on the custom resource that indicate the +// resource was not created from within ACK. +func (d *resourceDescriptor) MarkAdopted( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeObject in AWSResource") + } + curr := obj.GetAnnotations() + if curr == nil { + curr = make(map[string]string) + } + curr[ackv1alpha1.AnnotationAdopted] = "true" + obj.SetAnnotations(curr) +} diff --git a/pkg/resource/cache_cluster/hooks.go b/pkg/resource/cache_cluster/hooks.go new file mode 100644 index 00000000..77082cdc --- /dev/null +++ b/pkg/resource/cache_cluster/hooks.go @@ -0,0 +1,212 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache_cluster + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "slices" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" +) + +const ( + statusCreating = "creating" + statusAvailable = "available" + statusModifying = "modifying" + statusDeleting = "deleting" +) + +const ( + // AnnotationLastRequestedPAZs is an annotation whose value is a JSON representation of []*string, + // passed in as input to either the create or modify API called most recently. + AnnotationLastRequestedPAZs = svcapitypes.AnnotationPrefix + "last-requested-preferred-availability-zones" +) + +var ( + condMsgCurrentlyDeleting = "CacheCluster is currently being deleted" + condMsgNoDeleteWhileModifying = "Cannot delete CacheCluster while it is being modified" +) + +var ( + requeueWaitWhileDeleting = ackrequeue.NeededAfter( + fmt.Errorf("CacheCluster is in %q state, it cannot be deleted", statusDeleting), + ackrequeue.DefaultRequeueAfterDuration, + ) + requeueWaitWhileModifying = ackrequeue.NeededAfter( + fmt.Errorf("CacheCluster is in %q state, it cannot be modified", statusModifying), + ackrequeue.DefaultRequeueAfterDuration, + ) +) + +func hasStatus(r *resource, status string) bool { + return r.ko.Status.CacheClusterStatus != nil && *r.ko.Status.CacheClusterStatus == status +} + +func isCreating(r *resource) bool { + return hasStatus(r, statusCreating) +} + +func isAvailable(r *resource) bool { + return hasStatus(r, statusAvailable) +} + +func isDeleting(r *resource) bool { + return hasStatus(r, statusDeleting) +} + +func isModifying(r *resource) bool { + return hasStatus(r, statusModifying) +} + +// getTags retrieves the resource's associated tags. +func (rm *resourceManager) getTags( + ctx context.Context, + resourceARN string, +) ([]*svcapitypes.Tag, error) { + return util.GetTags(ctx, rm.sdkapi, rm.metrics, resourceARN) +} + +// syncTags keeps the resource's tags in sync. +func (rm *resourceManager) syncTags( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + return util.SyncTags(ctx, desired.ko.Spec.Tags, latest.ko.Spec.Tags, latest.ko.Status.ACKResourceMetadata, convertToOrderedACKTags, rm.sdkapi, rm.metrics) +} + +func (rm *resourceManager) updateCacheClusterPayload(input *svcsdk.ModifyCacheClusterInput, desired, latest *resource, delta *ackcompare.Delta) error { + desiredSpec := desired.ko.Spec + var nodesDelta int64 + if delta.DifferentAt("Spec.NumCacheNodes") && desired.ko.Spec.NumCacheNodes != nil { + numNodes := *latest.ko.Spec.NumCacheNodes + if pendingModifications := latest.ko.Status.PendingModifiedValues; pendingModifications != nil && + pendingModifications.NumCacheNodes != nil && *pendingModifications.NumCacheNodes > numNodes { + numNodes = *pendingModifications.NumCacheNodes + } + nodesDelta = numNodes - *desired.ko.Spec.NumCacheNodes + if nodesDelta > 0 { + for i := numNodes; i > numNodes-nodesDelta; i-- { + nodeID := fmt.Sprintf("%04d", i) + input.CacheNodeIdsToRemove = append(input.CacheNodeIdsToRemove, nodeID) + } + } + } + + if idx := slices.IndexFunc(delta.Differences, func(diff *ackcompare.Difference) bool { + return diff.Path.Contains("Spec.PreferredAvailabilityZones") + }); idx != -1 && desired.ko.Spec.PreferredAvailabilityZones != nil { + if nodesDelta >= 0 { + return errors.New("spec.preferredAvailabilityZones can only be changed when new nodes are being added via spec.numCacheNodes") + } + + oldAZsLen := 0 + oldValues, ok := delta.Differences[idx].B.([]*string) + if ok { + oldAZsLen = len(oldValues) + } + if len(desiredSpec.PreferredAvailabilityZones) <= oldAZsLen { + return errors.New("newly specified AZs in spec.preferredAvailabilityZones must match the number of cache nodes being added") + } + preferredAvailability := make([]string, 0, len(desiredSpec.PreferredAvailabilityZones[oldAZsLen:])) + for az := range desiredSpec.PreferredAvailabilityZones[oldAZsLen:] { + if desiredSpec.PreferredAvailabilityZones[az] != nil { + preferredAvailability = append(preferredAvailability, *desiredSpec.PreferredAvailabilityZones[az]) + } + } + input.NewAvailabilityZones = preferredAvailability + } + return nil +} + +func (rm *resourceManager) customCreateCacheClusterSetOutput( + _ context.Context, + r *resource, + _ *svcsdk.CreateCacheClusterOutput, + ko *svcapitypes.CacheCluster, +) (*svcapitypes.CacheCluster, error) { + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +func (rm *resourceManager) customModifyCacheClusterSetOutput( + _ context.Context, + r *resource, + _ *svcsdk.ModifyCacheClusterOutput, + ko *svcapitypes.CacheCluster, +) (*svcapitypes.CacheCluster, error) { + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +// setAnnotationsFields copies the desired object's annotations, populates any +// relevant fields, and sets the latest object's annotations to this newly populated map. +// Fields that are handled by custom modify implementation are not set here. +// This should only be called upon a successful create or modify call. +func (rm *resourceManager) setAnnotationsFields( + r *resource, + ko *svcapitypes.CacheCluster, +) { + annotations := getAnnotationsFields(r, ko) + annotations[AnnotationLastRequestedPAZs] = marshalAsAnnotation(r.ko.Spec.PreferredAvailabilityZones) + ko.ObjectMeta.Annotations = annotations +} + +// getAnnotationsFields return the annotations map that would be used to set the fields. +func getAnnotationsFields( + r *resource, + ko *svcapitypes.CacheCluster, +) map[string]string { + if ko.ObjectMeta.Annotations != nil { + return ko.ObjectMeta.Annotations + } + desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() + annotations := make(map[string]string) + for k, v := range desiredAnnotations { + annotations[k] = v + } + ko.ObjectMeta.Annotations = annotations + return annotations +} + +func marshalAsAnnotation(val interface{}) string { + data, err := json.Marshal(val) + if err != nil { + return "null" + } + return string(data) +} + +func Int32OrNil(i *int64) *int32 { + if i != nil { + return aws.Int32(int32(*i)) + } + return aws.Int32(0) +} + +func Int64OrNil(i *int32) *int64 { + if i != nil { + return aws.Int64(int64(*i)) + } + return aws.Int64(0) +} diff --git a/pkg/resource/cache_cluster/identifiers.go b/pkg/resource/cache_cluster/identifiers.go new file mode 100644 index 00000000..4735c72f --- /dev/null +++ b/pkg/resource/cache_cluster/identifiers.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" +) + +// resourceIdentifiers implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceIdentifiers` interface +type resourceIdentifiers struct { + meta *ackv1alpha1.ResourceMetadata +} + +// ARN returns the AWS Resource Name for the backend AWS resource. If nil, +// this means the resource has not yet been created in the backend AWS +// service. +func (ri *resourceIdentifiers) ARN() *ackv1alpha1.AWSResourceName { + if ri.meta != nil { + return ri.meta.ARN + } + return nil +} + +// OwnerAccountID returns the AWS account identifier in which the +// backend AWS resource resides, or nil if this information is not known +// for the resource +func (ri *resourceIdentifiers) OwnerAccountID() *ackv1alpha1.AWSAccountID { + if ri.meta != nil { + return ri.meta.OwnerAccountID + } + return nil +} + +// Region returns the AWS region in which the resource exists, or +// nil if this information is not known. +func (ri *resourceIdentifiers) Region() *ackv1alpha1.AWSRegion { + if ri.meta != nil { + return ri.meta.Region + } + return nil +} diff --git a/pkg/resource/cache_cluster/manager.go b/pkg/resource/cache_cluster/manager.go new file mode 100644 index 00000000..b384bf9f --- /dev/null +++ b/pkg/resource/cache_cluster/manager.go @@ -0,0 +1,413 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "context" + "fmt" + "time" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrt "github.com/aws-controllers-k8s/runtime/pkg/runtime" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = ackutil.InStrings + _ = acktags.NewTags() + _ = ackrt.MissingImageTagValue + _ = svcapitypes.CacheCluster{} +) + +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=cacheclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=cacheclusters/status,verbs=get;update;patch + +var lateInitializeFieldNames = []string{"PreferredAvailabilityZone"} + +// resourceManager is responsible for providing a consistent way to perform +// CRUD operations in a backend AWS service API for Book custom resources. +type resourceManager struct { + // cfg is a copy of the ackcfg.Config object passed on start of the service + // controller + cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config + // log refers to the logr.Logger object handling logging for the service + // controller + log logr.Logger + // metrics contains a collection of Prometheus metric objects that the + // service controller and its reconcilers track + metrics *ackmetrics.Metrics + // rr is the Reconciler which can be used for various utility + // functions such as querying for Secret values given a SecretReference + rr acktypes.Reconciler + // awsAccountID is the AWS account identifier that contains the resources + // managed by this resource manager + awsAccountID ackv1alpha1.AWSAccountID + // The AWS Region that this resource manager targets + awsRegion ackv1alpha1.AWSRegion + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client +} + +// concreteResource returns a pointer to a resource from the supplied +// generic AWSResource interface +func (rm *resourceManager) concreteResource( + res acktypes.AWSResource, +) *resource { + // cast the generic interface into a pointer type specific to the concrete + // implementing resource type managed by this resource manager + return res.(*resource) +} + +// ReadOne returns the currently-observed state of the supplied AWSResource in +// the backend AWS service API. +func (rm *resourceManager) ReadOne( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's ReadOne() method received resource with nil CR object") + } + observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(observed) +} + +// Create attempts to create the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-created +// resource +func (rm *resourceManager) Create( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Create() method received resource with nil CR object") + } + created, err := rm.sdkCreate(ctx, r) + if err != nil { + if created != nil { + return rm.onError(created, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(created) +} + +// Update attempts to mutate the supplied desired AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-mutated +// resource. +// Note for specialized logic implementers can check to see how the latest +// observed resource differs from the supplied desired state. The +// higher-level reonciler determines whether or not the desired differs +// from the latest observed and decides whether to call the resource +// manager's Update method +func (rm *resourceManager) Update( + ctx context.Context, + resDesired acktypes.AWSResource, + resLatest acktypes.AWSResource, + delta *ackcompare.Delta, +) (acktypes.AWSResource, error) { + desired := rm.concreteResource(resDesired) + latest := rm.concreteResource(resLatest) + if desired.ko == nil || latest.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) + if err != nil { + if updated != nil { + return rm.onError(updated, err) + } + return rm.onError(latest, err) + } + return rm.onSuccess(updated) +} + +// Delete attempts to destroy the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the +// resource being deleted (if delete is asynchronous and takes time) +func (rm *resourceManager) Delete( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + observed, err := rm.sdkDelete(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + + return rm.onSuccess(observed) +} + +// ARNFromName returns an AWS Resource Name from a given string name. This +// is useful for constructing ARNs for APIs that require ARNs in their +// GetAttributes operations but all we have (for new CRs at least) is a +// name for the resource +func (rm *resourceManager) ARNFromName(name string) string { + return fmt.Sprintf( + "arn:aws:elasticache:%s:%s:%s", + rm.awsRegion, + rm.awsAccountID, + name, + ) +} + +// LateInitialize returns an acktypes.AWSResource after setting the late initialized +// fields from the readOne call. This method will initialize the optional fields +// which were not provided by the k8s user but were defaulted by the AWS service. +// If there are no such fields to be initialized, the returned object is similar to +// object passed in the parameter. +func (rm *resourceManager) LateInitialize( + ctx context.Context, + latest acktypes.AWSResource, +) (acktypes.AWSResource, error) { + rlog := ackrtlog.FromContext(ctx) + // If there are no fields to late initialize, do nothing + if len(lateInitializeFieldNames) == 0 { + rlog.Debug("no late initialization required.") + return latest, nil + } + latestCopy := latest.DeepCopy() + lateInitConditionReason := "" + lateInitConditionMessage := "" + observed, err := rm.ReadOne(ctx, latestCopy) + if err != nil { + lateInitConditionMessage = "Unable to complete Read operation required for late initialization" + lateInitConditionReason = "Late Initialization Failure" + ackcondition.SetLateInitialized(latestCopy, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(latestCopy, corev1.ConditionFalse, nil, nil) + return latestCopy, err + } + lateInitializedRes := rm.lateInitializeFromReadOneOutput(observed, latestCopy) + incompleteInitialization := rm.incompleteLateInitialization(lateInitializedRes) + if incompleteInitialization { + // Add the condition with LateInitialized=False + lateInitConditionMessage = "Late initialization did not complete, requeuing with delay of 5 seconds" + lateInitConditionReason = "Delayed Late Initialization" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(lateInitializedRes, corev1.ConditionFalse, nil, nil) + return lateInitializedRes, ackrequeue.NeededAfter(nil, time.Duration(5)*time.Second) + } + // Set LateInitialized condition to True + lateInitConditionMessage = "Late initialization successful" + lateInitConditionReason = "Late initialization successful" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionTrue, &lateInitConditionMessage, &lateInitConditionReason) + return lateInitializedRes, nil +} + +// incompleteLateInitialization return true if there are fields which were supposed to be +// late initialized but are not. If all the fields are late initialized, false is returned +func (rm *resourceManager) incompleteLateInitialization( + res acktypes.AWSResource, +) bool { + ko := rm.concreteResource(res).ko.DeepCopy() + if ko.Spec.PreferredAvailabilityZone == nil { + return true + } + return false +} + +// lateInitializeFromReadOneOutput late initializes the 'latest' resource from the 'observed' +// resource and returns 'latest' resource +func (rm *resourceManager) lateInitializeFromReadOneOutput( + observed acktypes.AWSResource, + latest acktypes.AWSResource, +) acktypes.AWSResource { + observedKo := rm.concreteResource(observed).ko.DeepCopy() + latestKo := rm.concreteResource(latest).ko.DeepCopy() + if observedKo.Spec.PreferredAvailabilityZone != nil && latestKo.Spec.PreferredAvailabilityZone == nil { + latestKo.Spec.PreferredAvailabilityZone = observedKo.Spec.PreferredAvailabilityZone + } + return &resource{latestKo} +} + +// IsSynced returns true if the resource is synced. +func (rm *resourceManager) IsSynced(ctx context.Context, res acktypes.AWSResource) (bool, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's IsSynced() method received resource with nil CR object") + } + + return true, nil +} + +// EnsureTags ensures that tags are present inside the AWSResource. +// If the AWSResource does not have any existing resource tags, the 'tags' +// field is initialized and the controller tags are added. +// If the AWSResource has existing resource tags, then controller tags are +// added to the existing resource tags without overriding them. +// If the AWSResource does not support tags, only then the controller tags +// will not be added to the AWSResource. +func (rm *resourceManager) EnsureTags( + ctx context.Context, + res acktypes.AWSResource, + md acktypes.ServiceControllerMetadata, +) error { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's EnsureTags method received resource with nil CR object") + } + defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) + tags := acktags.Merge(resourceTags, defaultTags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) + return nil +} + +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + +// newResourceManager returns a new struct implementing +// acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 +func newResourceManager( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (*resourceManager, error) { + return &resourceManager{ + cfg: cfg, + clientcfg: clientcfg, + log: log, + metrics: metrics, + rr: rr, + awsAccountID: id, + awsRegion: region, + sdkapi: svcsdk.NewFromConfig(clientcfg), + }, nil +} + +// onError updates resource conditions and returns updated resource +// it returns nil if no condition is updated. +func (rm *resourceManager) onError( + r *resource, + err error, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, err + } + r1, updated := rm.updateConditions(r, false, err) + if !updated { + return r, err + } + for _, condition := range r1.Conditions() { + if condition.Type == ackv1alpha1.ConditionTypeTerminal && + condition.Status == corev1.ConditionTrue { + // resource is in Terminal condition + // return Terminal error + return r1, ackerr.Terminal + } + } + return r1, err +} + +// onSuccess updates resource conditions and returns updated resource +// it returns the supplied resource if no condition is updated. +func (rm *resourceManager) onSuccess( + r *resource, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, nil + } + r1, updated := rm.updateConditions(r, true, nil) + if !updated { + return r, nil + } + return r1, nil +} diff --git a/pkg/resource/cache_cluster/manager_factory.go b/pkg/resource/cache_cluster/manager_factory.go new file mode 100644 index 00000000..3fa2b8dc --- /dev/null +++ b/pkg/resource/cache_cluster/manager_factory.go @@ -0,0 +1,100 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "fmt" + "sync" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/go-logr/logr" + + svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" +) + +// resourceManagerFactory produces resourceManager objects. It implements the +// `types.AWSResourceManagerFactory` interface. +type resourceManagerFactory struct { + sync.RWMutex + // rmCache contains resource managers for a particular AWS account ID + rmCache map[string]*resourceManager +} + +// ResourcePrototype returns an AWSResource that resource managers produced by +// this factory will handle +func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescriptor { + return &resourceDescriptor{} +} + +// ManagerFor returns a resource manager object that can manage resources for a +// supplied AWS account +func (f *resourceManagerFactory) ManagerFor( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, +) (acktypes.AWSResourceManager, error) { + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) + f.RLock() + rm, found := f.rmCache[rmId] + f.RUnlock() + + if found { + return rm, nil + } + + f.Lock() + defer f.Unlock() + + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) + if err != nil { + return nil, err + } + f.rmCache[rmId] = rm + return rm, nil +} + +// IsAdoptable returns true if the resource is able to be adopted +func (f *resourceManagerFactory) IsAdoptable() bool { + return true +} + +// RequeueOnSuccessSeconds returns true if the resource should be requeued after specified seconds +// Default is false which means resource will not be requeued after success. +func (f *resourceManagerFactory) RequeueOnSuccessSeconds() int { + return 0 +} + +func newResourceManagerFactory() *resourceManagerFactory { + return &resourceManagerFactory{ + rmCache: map[string]*resourceManager{}, + } +} + +func init() { + svcresource.RegisterManagerFactory(newResourceManagerFactory()) +} diff --git a/pkg/resource/cache_cluster/references.go b/pkg/resource/cache_cluster/references.go new file mode 100644 index 00000000..e2cbe57f --- /dev/null +++ b/pkg/resource/cache_cluster/references.go @@ -0,0 +1,661 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + snsapitypes "github.com/aws-controllers-k8s/sns-controller/apis/v1alpha1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// +kubebuilder:rbac:groups=sns.services.k8s.aws,resources=topics,verbs=get;list +// +kubebuilder:rbac:groups=sns.services.k8s.aws,resources=topics/status,verbs=get;list + +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=securitygroups,verbs=get;list +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=securitygroups/status,verbs=get;list + +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + if ko.Spec.CacheParameterGroupRef != nil { + ko.Spec.CacheParameterGroupName = nil + } + + if ko.Spec.CacheSubnetGroupRef != nil { + ko.Spec.CacheSubnetGroupName = nil + } + + if ko.Spec.NotificationTopicRef != nil { + ko.Spec.NotificationTopicARN = nil + } + + if ko.Spec.ReplicationGroupRef != nil { + ko.Spec.ReplicationGroupID = nil + } + + if len(ko.Spec.SecurityGroupRefs) > 0 { + ko.Spec.SecurityGroupIDs = nil + } + + if ko.Spec.SnapshotRef != nil { + ko.Spec.SnapshotName = nil + } + + return &resource{ko} +} + +// ResolveReferences finds if there are any Reference field(s) present +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. +func (rm *resourceManager) ResolveReferences( + ctx context.Context, + apiReader client.Reader, + res acktypes.AWSResource, +) (acktypes.AWSResource, bool, error) { + ko := rm.concreteResource(res).ko + + resourceHasReferences := false + err := validateReferenceFields(ko) + if fieldHasReferences, err := rm.resolveReferenceForCacheParameterGroupName(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForCacheSubnetGroupName(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForNotificationTopicARN(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForReplicationGroupID(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForSecurityGroupIDs(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForSnapshotName(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + return &resource{ko}, resourceHasReferences, err +} + +// validateReferenceFields validates the reference field and corresponding +// identifier field. +func validateReferenceFields(ko *svcapitypes.CacheCluster) error { + + if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("CacheParameterGroupName", "CacheParameterGroupRef") + } + + if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("CacheSubnetGroupName", "CacheSubnetGroupRef") + } + + if ko.Spec.NotificationTopicRef != nil && ko.Spec.NotificationTopicARN != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("NotificationTopicARN", "NotificationTopicRef") + } + + if ko.Spec.ReplicationGroupRef != nil && ko.Spec.ReplicationGroupID != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("ReplicationGroupID", "ReplicationGroupRef") + } + + if len(ko.Spec.SecurityGroupRefs) > 0 && len(ko.Spec.SecurityGroupIDs) > 0 { + return ackerr.ResourceReferenceAndIDNotSupportedFor("SecurityGroupIDs", "SecurityGroupRefs") + } + + if ko.Spec.SnapshotRef != nil && ko.Spec.SnapshotName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("SnapshotName", "SnapshotRef") + } + return nil +} + +// resolveReferenceForCacheParameterGroupName reads the resource referenced +// from CacheParameterGroupRef field and sets the CacheParameterGroupName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForCacheParameterGroupName( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.CacheParameterGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheParameterGroupRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &svcapitypes.CacheParameterGroup{} + if err := getReferencedResourceState_CacheParameterGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.CacheParameterGroupName = (*string)(obj.Spec.CacheParameterGroupName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_CacheParameterGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_CacheParameterGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.CacheParameterGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "CacheParameterGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "CacheParameterGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "CacheParameterGroup", + namespace, name) + } + if obj.Spec.CacheParameterGroupName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "CacheParameterGroup", + namespace, name, + "Spec.CacheParameterGroupName") + } + return nil +} + +// resolveReferenceForCacheSubnetGroupName reads the resource referenced +// from CacheSubnetGroupRef field and sets the CacheSubnetGroupName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForCacheSubnetGroupName( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.CacheSubnetGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheSubnetGroupRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &svcapitypes.CacheSubnetGroup{} + if err := getReferencedResourceState_CacheSubnetGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.CacheSubnetGroupName = (*string)(obj.Spec.CacheSubnetGroupName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_CacheSubnetGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_CacheSubnetGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.CacheSubnetGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "CacheSubnetGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "CacheSubnetGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "CacheSubnetGroup", + namespace, name) + } + if obj.Spec.CacheSubnetGroupName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "CacheSubnetGroup", + namespace, name, + "Spec.CacheSubnetGroupName") + } + return nil +} + +// resolveReferenceForNotificationTopicARN reads the resource referenced +// from NotificationTopicRef field and sets the NotificationTopicARN +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForNotificationTopicARN( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.NotificationTopicRef != nil && ko.Spec.NotificationTopicRef.From != nil { + hasReferences = true + arr := ko.Spec.NotificationTopicRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: NotificationTopicRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &snsapitypes.Topic{} + if err := getReferencedResourceState_Topic(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.NotificationTopicARN = (*string)(obj.Status.ACKResourceMetadata.ARN) + } + + return hasReferences, nil +} + +// getReferencedResourceState_Topic looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_Topic( + ctx context.Context, + apiReader client.Reader, + obj *snsapitypes.Topic, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "Topic", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "Topic", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "Topic", + namespace, name) + } + if obj.Status.ACKResourceMetadata == nil || obj.Status.ACKResourceMetadata.ARN == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "Topic", + namespace, name, + "Status.ACKResourceMetadata.ARN") + } + return nil +} + +// resolveReferenceForReplicationGroupID reads the resource referenced +// from ReplicationGroupRef field and sets the ReplicationGroupID +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForReplicationGroupID( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.ReplicationGroupRef != nil && ko.Spec.ReplicationGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.ReplicationGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: ReplicationGroupRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &svcapitypes.ReplicationGroup{} + if err := getReferencedResourceState_ReplicationGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.ReplicationGroupID = (*string)(obj.Spec.ReplicationGroupID) + } + + return hasReferences, nil +} + +// getReferencedResourceState_ReplicationGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_ReplicationGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.ReplicationGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "ReplicationGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "ReplicationGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "ReplicationGroup", + namespace, name) + } + if obj.Spec.ReplicationGroupID == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "ReplicationGroup", + namespace, name, + "Spec.ReplicationGroupID") + } + return nil +} + +// resolveReferenceForSecurityGroupIDs reads the resource referenced +// from SecurityGroupRefs field and sets the SecurityGroupIDs +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForSecurityGroupIDs( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + for _, f0iter := range ko.Spec.SecurityGroupRefs { + if f0iter != nil && f0iter.From != nil { + hasReferences = true + arr := f0iter.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SecurityGroupRefs") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &ec2apitypes.SecurityGroup{} + if err := getReferencedResourceState_SecurityGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + if ko.Spec.SecurityGroupIDs == nil { + ko.Spec.SecurityGroupIDs = make([]*string, 0, 1) + } + ko.Spec.SecurityGroupIDs = append(ko.Spec.SecurityGroupIDs, (*string)(obj.Status.ID)) + } + } + + return hasReferences, nil +} + +// getReferencedResourceState_SecurityGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_SecurityGroup( + ctx context.Context, + apiReader client.Reader, + obj *ec2apitypes.SecurityGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "SecurityGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "SecurityGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "SecurityGroup", + namespace, name) + } + if obj.Status.ID == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "SecurityGroup", + namespace, name, + "Status.ID") + } + return nil +} + +// resolveReferenceForSnapshotName reads the resource referenced +// from SnapshotRef field and sets the SnapshotName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForSnapshotName( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.CacheCluster, +) (hasReferences bool, err error) { + if ko.Spec.SnapshotRef != nil && ko.Spec.SnapshotRef.From != nil { + hasReferences = true + arr := ko.Spec.SnapshotRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SnapshotRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &svcapitypes.Snapshot{} + if err := getReferencedResourceState_Snapshot(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.SnapshotName = (*string)(obj.Spec.SnapshotName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_Snapshot looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_Snapshot( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.Snapshot, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "Snapshot", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "Snapshot", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "Snapshot", + namespace, name) + } + if obj.Spec.SnapshotName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "Snapshot", + namespace, name, + "Spec.SnapshotName") + } + return nil +} diff --git a/pkg/resource/cache_cluster/resource.go b/pkg/resource/cache_cluster/resource.go new file mode 100644 index 00000000..afb679b6 --- /dev/null +++ b/pkg/resource/cache_cluster/resource.go @@ -0,0 +1,113 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "fmt" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &ackerrors.MissingNameIdentifier +) + +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` +// interface +type resource struct { + // The Kubernetes-native CR representing the resource + ko *svcapitypes.CacheCluster +} + +// Identifiers returns an AWSResourceIdentifiers object containing various +// identifying information, including the AWS account ID that owns the +// resource, the resource's AWS Resource Name (ARN) +func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { + return &resourceIdentifiers{r.ko.Status.ACKResourceMetadata} +} + +// IsBeingDeleted returns true if the Kubernetes resource has a non-zero +// deletion timestamp +func (r *resource) IsBeingDeleted() bool { + return !r.ko.DeletionTimestamp.IsZero() +} + +// RuntimeObject returns the Kubernetes apimachinery/runtime representation of +// the AWSResource +func (r *resource) RuntimeObject() rtclient.Object { + return r.ko +} + +// MetaObject returns the Kubernetes apimachinery/apis/meta/v1.Object +// representation of the AWSResource +func (r *resource) MetaObject() metav1.Object { + return r.ko.GetObjectMeta() +} + +// Conditions returns the ACK Conditions collection for the AWSResource +func (r *resource) Conditions() []*ackv1alpha1.Condition { + return r.ko.Status.Conditions +} + +// ReplaceConditions sets the Conditions status field for the resource +func (r *resource) ReplaceConditions(conditions []*ackv1alpha1.Condition) { + r.ko.Status.Conditions = conditions +} + +// SetObjectMeta sets the ObjectMeta field for the resource +func (r *resource) SetObjectMeta(meta metav1.ObjectMeta) { + r.ko.ObjectMeta = meta +} + +// SetStatus will set the Status field for the resource +func (r *resource) SetStatus(desired acktypes.AWSResource) { + r.ko.Status = desired.(*resource).ko.Status +} + +// SetIdentifiers sets the Spec or Status field that is referenced as the unique +// resource identifier +func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error { + if identifier.NameOrID == "" { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.CacheClusterID = &identifier.NameOrID + + return nil +} + +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f0, ok := fields["cacheClusterID"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: cacheClusterID")) + } + r.ko.Spec.CacheClusterID = &f0 + + return nil +} + +// DeepCopy will return a copy of the resource +func (r *resource) DeepCopy() acktypes.AWSResource { + koCopy := r.ko.DeepCopy() + return &resource{koCopy} +} diff --git a/pkg/resource/cache_cluster/sdk.go b/pkg/resource/cache_cluster/sdk.go new file mode 100644 index 00000000..8e4e7ee8 --- /dev/null +++ b/pkg/resource/cache_cluster/sdk.go @@ -0,0 +1,1561 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "context" + "errors" + "fmt" + "math" + "reflect" + "strings" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &metav1.Time{} + _ = strings.ToLower("") + _ = &svcsdk.Client{} + _ = &svcapitypes.CacheCluster{} + _ = ackv1alpha1.AWSAccountID("") + _ = &ackerr.NotFound + _ = &ackcondition.NotManagedMessage + _ = &reflect.Value{} + _ = fmt.Sprintf("") + _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} +) + +// sdkFind returns SDK-specific information about a supplied resource +func (rm *resourceManager) sdkFind( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkFind") + defer func() { + exit(err) + }() + // If any required fields in the input shape are missing, AWS resource is + // not created yet. Return NotFound here to indicate to callers that the + // resource isn't yet created. + if rm.requiredFieldsMissingFromReadManyInput(r) { + return nil, ackerr.NotFound + } + + input, err := rm.newListRequestPayload(r) + if err != nil { + return nil, err + } + // Include cache node info to get endpoint details for clusters + input.ShowCacheNodeInfo = aws.Bool(true) + var resp *svcsdk.DescribeCacheClustersOutput + resp, err = rm.sdkapi.DescribeCacheClusters(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheClusters", err) + if err != nil { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheClusterNotFound" { + return nil, ackerr.NotFound + } + return nil, err + } + + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := r.ko.DeepCopy() + + found := false + for _, elem := range resp.CacheClusters { + if elem.ARN != nil { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + tmpARN := ackv1alpha1.AWSResourceName(*elem.ARN) + ko.Status.ACKResourceMetadata.ARN = &tmpARN + } + if elem.AtRestEncryptionEnabled != nil { + ko.Status.AtRestEncryptionEnabled = elem.AtRestEncryptionEnabled + } else { + ko.Status.AtRestEncryptionEnabled = nil + } + if elem.AuthTokenEnabled != nil { + ko.Status.AuthTokenEnabled = elem.AuthTokenEnabled + } else { + ko.Status.AuthTokenEnabled = nil + } + if elem.AuthTokenLastModifiedDate != nil { + ko.Status.AuthTokenLastModifiedDate = &metav1.Time{*elem.AuthTokenLastModifiedDate} + } else { + ko.Status.AuthTokenLastModifiedDate = nil + } + if elem.AutoMinorVersionUpgrade != nil { + ko.Spec.AutoMinorVersionUpgrade = elem.AutoMinorVersionUpgrade + } else { + ko.Spec.AutoMinorVersionUpgrade = nil + } + if elem.CacheClusterCreateTime != nil { + ko.Status.CacheClusterCreateTime = &metav1.Time{*elem.CacheClusterCreateTime} + } else { + ko.Status.CacheClusterCreateTime = nil + } + if elem.CacheClusterId != nil { + ko.Spec.CacheClusterID = elem.CacheClusterId + } else { + ko.Spec.CacheClusterID = nil + } + if elem.CacheClusterStatus != nil { + ko.Status.CacheClusterStatus = elem.CacheClusterStatus + } else { + ko.Status.CacheClusterStatus = nil + } + if elem.CacheNodeType != nil { + ko.Spec.CacheNodeType = elem.CacheNodeType + } else { + ko.Spec.CacheNodeType = nil + } + if elem.CacheNodes != nil { + f9 := []*svcapitypes.CacheNode{} + for _, f9iter := range elem.CacheNodes { + f9elem := &svcapitypes.CacheNode{} + if f9iter.CacheNodeCreateTime != nil { + f9elem.CacheNodeCreateTime = &metav1.Time{*f9iter.CacheNodeCreateTime} + } + if f9iter.CacheNodeId != nil { + f9elem.CacheNodeID = f9iter.CacheNodeId + } + if f9iter.CacheNodeStatus != nil { + f9elem.CacheNodeStatus = f9iter.CacheNodeStatus + } + if f9iter.CustomerAvailabilityZone != nil { + f9elem.CustomerAvailabilityZone = f9iter.CustomerAvailabilityZone + } + if f9iter.CustomerOutpostArn != nil { + f9elem.CustomerOutpostARN = f9iter.CustomerOutpostArn + } + if f9iter.Endpoint != nil { + f9elemf5 := &svcapitypes.Endpoint{} + if f9iter.Endpoint.Address != nil { + f9elemf5.Address = f9iter.Endpoint.Address + } + if f9iter.Endpoint.Port != nil { + portCopy := int64(*f9iter.Endpoint.Port) + f9elemf5.Port = &portCopy + } + f9elem.Endpoint = f9elemf5 + } + if f9iter.ParameterGroupStatus != nil { + f9elem.ParameterGroupStatus = f9iter.ParameterGroupStatus + } + if f9iter.SourceCacheNodeId != nil { + f9elem.SourceCacheNodeID = f9iter.SourceCacheNodeId + } + f9 = append(f9, f9elem) + } + ko.Status.CacheNodes = f9 + } else { + ko.Status.CacheNodes = nil + } + if elem.CacheParameterGroup != nil { + f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} + if elem.CacheParameterGroup.CacheNodeIdsToReboot != nil { + f10.CacheNodeIDsToReboot = aws.StringSlice(elem.CacheParameterGroup.CacheNodeIdsToReboot) + } + if elem.CacheParameterGroup.CacheParameterGroupName != nil { + f10.CacheParameterGroupName = elem.CacheParameterGroup.CacheParameterGroupName + } + if elem.CacheParameterGroup.ParameterApplyStatus != nil { + f10.ParameterApplyStatus = elem.CacheParameterGroup.ParameterApplyStatus + } + ko.Status.CacheParameterGroup = f10 + } else { + ko.Status.CacheParameterGroup = nil + } + if elem.CacheSecurityGroups != nil { + f11 := []*svcapitypes.CacheSecurityGroupMembership{} + for _, f11iter := range elem.CacheSecurityGroups { + f11elem := &svcapitypes.CacheSecurityGroupMembership{} + if f11iter.CacheSecurityGroupName != nil { + f11elem.CacheSecurityGroupName = f11iter.CacheSecurityGroupName + } + if f11iter.Status != nil { + f11elem.Status = f11iter.Status + } + f11 = append(f11, f11elem) + } + ko.Status.CacheSecurityGroups = f11 + } else { + ko.Status.CacheSecurityGroups = nil + } + if elem.CacheSubnetGroupName != nil { + ko.Spec.CacheSubnetGroupName = elem.CacheSubnetGroupName + } else { + ko.Spec.CacheSubnetGroupName = nil + } + if elem.ClientDownloadLandingPage != nil { + ko.Status.ClientDownloadLandingPage = elem.ClientDownloadLandingPage + } else { + ko.Status.ClientDownloadLandingPage = nil + } + if elem.ConfigurationEndpoint != nil { + f14 := &svcapitypes.Endpoint{} + if elem.ConfigurationEndpoint.Address != nil { + f14.Address = elem.ConfigurationEndpoint.Address + } + if elem.ConfigurationEndpoint.Port != nil { + portCopy := int64(*elem.ConfigurationEndpoint.Port) + f14.Port = &portCopy + } + ko.Status.ConfigurationEndpoint = f14 + } else { + ko.Status.ConfigurationEndpoint = nil + } + if elem.Engine != nil { + ko.Spec.Engine = elem.Engine + } else { + ko.Spec.Engine = nil + } + if elem.EngineVersion != nil { + ko.Spec.EngineVersion = elem.EngineVersion + } else { + ko.Spec.EngineVersion = nil + } + if elem.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(elem.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } + if elem.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(elem.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } + if elem.NotificationConfiguration != nil { + f19 := &svcapitypes.NotificationConfiguration{} + if elem.NotificationConfiguration.TopicArn != nil { + f19.TopicARN = elem.NotificationConfiguration.TopicArn + } + if elem.NotificationConfiguration.TopicStatus != nil { + f19.TopicStatus = elem.NotificationConfiguration.TopicStatus + } + ko.Status.NotificationConfiguration = f19 + } else { + ko.Status.NotificationConfiguration = nil + } + if elem.NumCacheNodes != nil { + numCacheNodesCopy := int64(*elem.NumCacheNodes) + ko.Spec.NumCacheNodes = &numCacheNodesCopy + } else { + ko.Spec.NumCacheNodes = nil + } + if elem.PendingModifiedValues != nil { + f21 := &svcapitypes.PendingModifiedValues{} + if elem.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(elem.PendingModifiedValues.AuthTokenStatus)) + } + if elem.PendingModifiedValues.CacheNodeIdsToRemove != nil { + f21.CacheNodeIDsToRemove = aws.StringSlice(elem.PendingModifiedValues.CacheNodeIdsToRemove) + } + if elem.PendingModifiedValues.CacheNodeType != nil { + f21.CacheNodeType = elem.PendingModifiedValues.CacheNodeType + } + if elem.PendingModifiedValues.EngineVersion != nil { + f21.EngineVersion = elem.PendingModifiedValues.EngineVersion + } + if elem.PendingModifiedValues.NumCacheNodes != nil { + numCacheNodesCopy := int64(*elem.PendingModifiedValues.NumCacheNodes) + f21.NumCacheNodes = &numCacheNodesCopy + } + if elem.PendingModifiedValues.TransitEncryptionEnabled != nil { + f21.TransitEncryptionEnabled = elem.PendingModifiedValues.TransitEncryptionEnabled + } + if elem.PendingModifiedValues.TransitEncryptionMode != "" { + f21.TransitEncryptionMode = aws.String(string(elem.PendingModifiedValues.TransitEncryptionMode)) + } + ko.Status.PendingModifiedValues = f21 + } else { + ko.Status.PendingModifiedValues = nil + } + if elem.PreferredAvailabilityZone != nil { + ko.Spec.PreferredAvailabilityZone = elem.PreferredAvailabilityZone + } else { + ko.Spec.PreferredAvailabilityZone = nil + } + if elem.PreferredMaintenanceWindow != nil { + ko.Spec.PreferredMaintenanceWindow = elem.PreferredMaintenanceWindow + } else { + ko.Spec.PreferredMaintenanceWindow = nil + } + if elem.PreferredOutpostArn != nil { + ko.Spec.PreferredOutpostARN = elem.PreferredOutpostArn + } else { + ko.Spec.PreferredOutpostARN = nil + } + if elem.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = elem.ReplicationGroupId + } else { + ko.Spec.ReplicationGroupID = nil + } + if elem.ReplicationGroupLogDeliveryEnabled != nil { + ko.Status.ReplicationGroupLogDeliveryEnabled = elem.ReplicationGroupLogDeliveryEnabled + } else { + ko.Status.ReplicationGroupLogDeliveryEnabled = nil + } + if elem.SecurityGroups != nil { + f27 := []*svcapitypes.SecurityGroupMembership{} + for _, f27iter := range elem.SecurityGroups { + f27elem := &svcapitypes.SecurityGroupMembership{} + if f27iter.SecurityGroupId != nil { + f27elem.SecurityGroupID = f27iter.SecurityGroupId + } + if f27iter.Status != nil { + f27elem.Status = f27iter.Status + } + f27 = append(f27, f27elem) + } + ko.Status.SecurityGroups = f27 + } else { + ko.Status.SecurityGroups = nil + } + if elem.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy := int64(*elem.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if elem.SnapshotWindow != nil { + ko.Spec.SnapshotWindow = elem.SnapshotWindow + } else { + ko.Spec.SnapshotWindow = nil + } + if elem.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = elem.TransitEncryptionEnabled + } else { + ko.Spec.TransitEncryptionEnabled = nil + } + if elem.TransitEncryptionMode != "" { + ko.Status.TransitEncryptionMode = aws.String(string(elem.TransitEncryptionMode)) + } else { + ko.Status.TransitEncryptionMode = nil + } + found = true + break + } + if !found { + return nil, ackerr.NotFound + } + + rm.setStatusDefaults(ko) + if pendingModifications := ko.Status.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } + if isAvailable(r) { + ackcondition.SetSynced(&resource{ko}, corev1.ConditionTrue, nil, nil) + } else { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } + + return &resource{ko}, nil +} + +// requiredFieldsMissingFromReadManyInput returns true if there are any fields +// for the ReadMany Input shape that are required but not present in the +// resource's Spec or Status +func (rm *resourceManager) requiredFieldsMissingFromReadManyInput( + r *resource, +) bool { + return r.ko.Spec.CacheClusterID == nil + +} + +// newListRequestPayload returns SDK-specific struct for the HTTP request +// payload of the List API call for the resource +func (rm *resourceManager) newListRequestPayload( + r *resource, +) (*svcsdk.DescribeCacheClustersInput, error) { + res := &svcsdk.DescribeCacheClustersInput{} + + if r.ko.Spec.CacheClusterID != nil { + res.CacheClusterId = r.ko.Spec.CacheClusterID + } + + return res, nil +} + +// sdkCreate creates the supplied resource in the backend AWS service API and +// returns a copy of the resource with resource fields (in both Spec and +// Status) filled in with values from the CREATE API operation's Output shape. +func (rm *resourceManager) sdkCreate( + ctx context.Context, + desired *resource, +) (created *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkCreate") + defer func() { + exit(err) + }() + input, err := rm.newCreateRequestPayload(ctx, desired) + if err != nil { + return nil, err + } + + var resp *svcsdk.CreateCacheClusterOutput + _ = resp + resp, err = rm.sdkapi.CreateCacheCluster(ctx, input) + rm.metrics.RecordAPICall("CREATE", "CreateCacheCluster", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.CacheCluster.ARN != nil { + arn := ackv1alpha1.AWSResourceName(*resp.CacheCluster.ARN) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.CacheCluster.AtRestEncryptionEnabled != nil { + ko.Status.AtRestEncryptionEnabled = resp.CacheCluster.AtRestEncryptionEnabled + } else { + ko.Status.AtRestEncryptionEnabled = nil + } + if resp.CacheCluster.AuthTokenEnabled != nil { + ko.Status.AuthTokenEnabled = resp.CacheCluster.AuthTokenEnabled + } else { + ko.Status.AuthTokenEnabled = nil + } + if resp.CacheCluster.AuthTokenLastModifiedDate != nil { + ko.Status.AuthTokenLastModifiedDate = &metav1.Time{*resp.CacheCluster.AuthTokenLastModifiedDate} + } else { + ko.Status.AuthTokenLastModifiedDate = nil + } + if resp.CacheCluster.AutoMinorVersionUpgrade != nil { + ko.Spec.AutoMinorVersionUpgrade = resp.CacheCluster.AutoMinorVersionUpgrade + } else { + ko.Spec.AutoMinorVersionUpgrade = nil + } + if resp.CacheCluster.CacheClusterCreateTime != nil { + ko.Status.CacheClusterCreateTime = &metav1.Time{*resp.CacheCluster.CacheClusterCreateTime} + } else { + ko.Status.CacheClusterCreateTime = nil + } + if resp.CacheCluster.CacheClusterId != nil { + ko.Spec.CacheClusterID = resp.CacheCluster.CacheClusterId + } else { + ko.Spec.CacheClusterID = nil + } + if resp.CacheCluster.CacheClusterStatus != nil { + ko.Status.CacheClusterStatus = resp.CacheCluster.CacheClusterStatus + } else { + ko.Status.CacheClusterStatus = nil + } + if resp.CacheCluster.CacheNodeType != nil { + ko.Spec.CacheNodeType = resp.CacheCluster.CacheNodeType + } else { + ko.Spec.CacheNodeType = nil + } + if resp.CacheCluster.CacheNodes != nil { + f9 := []*svcapitypes.CacheNode{} + for _, f9iter := range resp.CacheCluster.CacheNodes { + f9elem := &svcapitypes.CacheNode{} + if f9iter.CacheNodeCreateTime != nil { + f9elem.CacheNodeCreateTime = &metav1.Time{*f9iter.CacheNodeCreateTime} + } + if f9iter.CacheNodeId != nil { + f9elem.CacheNodeID = f9iter.CacheNodeId + } + if f9iter.CacheNodeStatus != nil { + f9elem.CacheNodeStatus = f9iter.CacheNodeStatus + } + if f9iter.CustomerAvailabilityZone != nil { + f9elem.CustomerAvailabilityZone = f9iter.CustomerAvailabilityZone + } + if f9iter.CustomerOutpostArn != nil { + f9elem.CustomerOutpostARN = f9iter.CustomerOutpostArn + } + if f9iter.Endpoint != nil { + f9elemf5 := &svcapitypes.Endpoint{} + if f9iter.Endpoint.Address != nil { + f9elemf5.Address = f9iter.Endpoint.Address + } + if f9iter.Endpoint.Port != nil { + portCopy := int64(*f9iter.Endpoint.Port) + f9elemf5.Port = &portCopy + } + f9elem.Endpoint = f9elemf5 + } + if f9iter.ParameterGroupStatus != nil { + f9elem.ParameterGroupStatus = f9iter.ParameterGroupStatus + } + if f9iter.SourceCacheNodeId != nil { + f9elem.SourceCacheNodeID = f9iter.SourceCacheNodeId + } + f9 = append(f9, f9elem) + } + ko.Status.CacheNodes = f9 + } else { + ko.Status.CacheNodes = nil + } + if resp.CacheCluster.CacheParameterGroup != nil { + f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} + if resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot != nil { + f10.CacheNodeIDsToReboot = aws.StringSlice(resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot) + } + if resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { + f10.CacheParameterGroupName = resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName + } + if resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus != nil { + f10.ParameterApplyStatus = resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus + } + ko.Status.CacheParameterGroup = f10 + } else { + ko.Status.CacheParameterGroup = nil + } + if resp.CacheCluster.CacheSecurityGroups != nil { + f11 := []*svcapitypes.CacheSecurityGroupMembership{} + for _, f11iter := range resp.CacheCluster.CacheSecurityGroups { + f11elem := &svcapitypes.CacheSecurityGroupMembership{} + if f11iter.CacheSecurityGroupName != nil { + f11elem.CacheSecurityGroupName = f11iter.CacheSecurityGroupName + } + if f11iter.Status != nil { + f11elem.Status = f11iter.Status + } + f11 = append(f11, f11elem) + } + ko.Status.CacheSecurityGroups = f11 + } else { + ko.Status.CacheSecurityGroups = nil + } + if resp.CacheCluster.CacheSubnetGroupName != nil { + ko.Spec.CacheSubnetGroupName = resp.CacheCluster.CacheSubnetGroupName + } else { + ko.Spec.CacheSubnetGroupName = nil + } + if resp.CacheCluster.ClientDownloadLandingPage != nil { + ko.Status.ClientDownloadLandingPage = resp.CacheCluster.ClientDownloadLandingPage + } else { + ko.Status.ClientDownloadLandingPage = nil + } + if resp.CacheCluster.ConfigurationEndpoint != nil { + f14 := &svcapitypes.Endpoint{} + if resp.CacheCluster.ConfigurationEndpoint.Address != nil { + f14.Address = resp.CacheCluster.ConfigurationEndpoint.Address + } + if resp.CacheCluster.ConfigurationEndpoint.Port != nil { + portCopy := int64(*resp.CacheCluster.ConfigurationEndpoint.Port) + f14.Port = &portCopy + } + ko.Status.ConfigurationEndpoint = f14 + } else { + ko.Status.ConfigurationEndpoint = nil + } + if resp.CacheCluster.Engine != nil { + ko.Spec.Engine = resp.CacheCluster.Engine + } else { + ko.Spec.Engine = nil + } + if resp.CacheCluster.EngineVersion != nil { + ko.Spec.EngineVersion = resp.CacheCluster.EngineVersion + } else { + ko.Spec.EngineVersion = nil + } + if resp.CacheCluster.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.CacheCluster.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } + if resp.CacheCluster.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.CacheCluster.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } + if resp.CacheCluster.NotificationConfiguration != nil { + f19 := &svcapitypes.NotificationConfiguration{} + if resp.CacheCluster.NotificationConfiguration.TopicArn != nil { + f19.TopicARN = resp.CacheCluster.NotificationConfiguration.TopicArn + } + if resp.CacheCluster.NotificationConfiguration.TopicStatus != nil { + f19.TopicStatus = resp.CacheCluster.NotificationConfiguration.TopicStatus + } + ko.Status.NotificationConfiguration = f19 + } else { + ko.Status.NotificationConfiguration = nil + } + if resp.CacheCluster.NumCacheNodes != nil { + numCacheNodesCopy := int64(*resp.CacheCluster.NumCacheNodes) + ko.Spec.NumCacheNodes = &numCacheNodesCopy + } else { + ko.Spec.NumCacheNodes = nil + } + if resp.CacheCluster.PendingModifiedValues != nil { + f21 := &svcapitypes.PendingModifiedValues{} + if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(resp.CacheCluster.PendingModifiedValues.AuthTokenStatus)) + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove != nil { + f21.CacheNodeIDsToRemove = aws.StringSlice(resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove) + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeType != nil { + f21.CacheNodeType = resp.CacheCluster.PendingModifiedValues.CacheNodeType + } + if resp.CacheCluster.PendingModifiedValues.EngineVersion != nil { + f21.EngineVersion = resp.CacheCluster.PendingModifiedValues.EngineVersion + } + if resp.CacheCluster.PendingModifiedValues.NumCacheNodes != nil { + numCacheNodesCopy := int64(*resp.CacheCluster.PendingModifiedValues.NumCacheNodes) + f21.NumCacheNodes = &numCacheNodesCopy + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled != nil { + f21.TransitEncryptionEnabled = resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != "" { + f21.TransitEncryptionMode = aws.String(string(resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode)) + } + ko.Status.PendingModifiedValues = f21 + } else { + ko.Status.PendingModifiedValues = nil + } + if resp.CacheCluster.PreferredAvailabilityZone != nil { + ko.Spec.PreferredAvailabilityZone = resp.CacheCluster.PreferredAvailabilityZone + } else { + ko.Spec.PreferredAvailabilityZone = nil + } + if resp.CacheCluster.PreferredMaintenanceWindow != nil { + ko.Spec.PreferredMaintenanceWindow = resp.CacheCluster.PreferredMaintenanceWindow + } else { + ko.Spec.PreferredMaintenanceWindow = nil + } + if resp.CacheCluster.PreferredOutpostArn != nil { + ko.Spec.PreferredOutpostARN = resp.CacheCluster.PreferredOutpostArn + } else { + ko.Spec.PreferredOutpostARN = nil + } + if resp.CacheCluster.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = resp.CacheCluster.ReplicationGroupId + } else { + ko.Spec.ReplicationGroupID = nil + } + if resp.CacheCluster.ReplicationGroupLogDeliveryEnabled != nil { + ko.Status.ReplicationGroupLogDeliveryEnabled = resp.CacheCluster.ReplicationGroupLogDeliveryEnabled + } else { + ko.Status.ReplicationGroupLogDeliveryEnabled = nil + } + if resp.CacheCluster.SecurityGroups != nil { + f27 := []*svcapitypes.SecurityGroupMembership{} + for _, f27iter := range resp.CacheCluster.SecurityGroups { + f27elem := &svcapitypes.SecurityGroupMembership{} + if f27iter.SecurityGroupId != nil { + f27elem.SecurityGroupID = f27iter.SecurityGroupId + } + if f27iter.Status != nil { + f27elem.Status = f27iter.Status + } + f27 = append(f27, f27elem) + } + ko.Status.SecurityGroups = f27 + } else { + ko.Status.SecurityGroups = nil + } + if resp.CacheCluster.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy := int64(*resp.CacheCluster.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if resp.CacheCluster.SnapshotWindow != nil { + ko.Spec.SnapshotWindow = resp.CacheCluster.SnapshotWindow + } else { + ko.Spec.SnapshotWindow = nil + } + if resp.CacheCluster.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = resp.CacheCluster.TransitEncryptionEnabled + } else { + ko.Spec.TransitEncryptionEnabled = nil + } + if resp.CacheCluster.TransitEncryptionMode != "" { + ko.Status.TransitEncryptionMode = aws.String(string(resp.CacheCluster.TransitEncryptionMode)) + } else { + ko.Status.TransitEncryptionMode = nil + } + + rm.setStatusDefaults(ko) + // custom set output from response + ko, err = rm.customCreateCacheClusterSetOutput(ctx, desired, resp, ko) + if err != nil { + return nil, err + } + if isCreating(&resource{ko}) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } + + return &resource{ko}, nil +} + +// newCreateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newCreateRequestPayload( + ctx context.Context, + r *resource, +) (*svcsdk.CreateCacheClusterInput, error) { + res := &svcsdk.CreateCacheClusterInput{} + + if r.ko.Spec.AZMode != nil { + res.AZMode = svcsdktypes.AZMode(*r.ko.Spec.AZMode) + } + if r.ko.Spec.AuthToken != nil { + tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) + if err != nil { + return nil, ackrequeue.Needed(err) + } + if tmpSecret != "" { + res.AuthToken = aws.String(tmpSecret) + } + } + if r.ko.Spec.AutoMinorVersionUpgrade != nil { + res.AutoMinorVersionUpgrade = r.ko.Spec.AutoMinorVersionUpgrade + } + if r.ko.Spec.CacheClusterID != nil { + res.CacheClusterId = r.ko.Spec.CacheClusterID + } + if r.ko.Spec.CacheNodeType != nil { + res.CacheNodeType = r.ko.Spec.CacheNodeType + } + if r.ko.Spec.CacheParameterGroupName != nil { + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName + } + if r.ko.Spec.CacheSecurityGroupNames != nil { + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) + } + if r.ko.Spec.CacheSubnetGroupName != nil { + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName + } + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine + } + if r.ko.Spec.EngineVersion != nil { + res.EngineVersion = r.ko.Spec.EngineVersion + } + if r.ko.Spec.IPDiscovery != nil { + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) + } + if r.ko.Spec.LogDeliveryConfigurations != nil { + f11 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { + f11elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f11iter.DestinationDetails != nil { + f11elemf0 := &svcsdktypes.DestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { + f11elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + } + f11elemf0.CloudWatchLogsDetails = f11elemf0f0 + } + if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { + f11elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + } + f11elemf0.KinesisFirehoseDetails = f11elemf0f1 + } + f11elem.DestinationDetails = f11elemf0 + } + if f11iter.DestinationType != nil { + f11elem.DestinationType = svcsdktypes.DestinationType(*f11iter.DestinationType) + } + if f11iter.Enabled != nil { + f11elem.Enabled = f11iter.Enabled + } + if f11iter.LogFormat != nil { + f11elem.LogFormat = svcsdktypes.LogFormat(*f11iter.LogFormat) + } + if f11iter.LogType != nil { + f11elem.LogType = svcsdktypes.LogType(*f11iter.LogType) + } + f11 = append(f11, *f11elem) + } + res.LogDeliveryConfigurations = f11 + } + if r.ko.Spec.NetworkType != nil { + res.NetworkType = svcsdktypes.NetworkType(*r.ko.Spec.NetworkType) + } + if r.ko.Spec.NotificationTopicARN != nil { + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN + } + if r.ko.Spec.NumCacheNodes != nil { + numCacheNodesCopy0 := *r.ko.Spec.NumCacheNodes + if numCacheNodesCopy0 > math.MaxInt32 || numCacheNodesCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field NumCacheNodes is of type int32") + } + numCacheNodesCopy := int32(numCacheNodesCopy0) + res.NumCacheNodes = &numCacheNodesCopy + } + if r.ko.Spec.OutpostMode != nil { + res.OutpostMode = svcsdktypes.OutpostMode(*r.ko.Spec.OutpostMode) + } + if r.ko.Spec.Port != nil { + portCopy0 := *r.ko.Spec.Port + if portCopy0 > math.MaxInt32 || portCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Port is of type int32") + } + portCopy := int32(portCopy0) + res.Port = &portCopy + } + if r.ko.Spec.PreferredAvailabilityZone != nil { + res.PreferredAvailabilityZone = r.ko.Spec.PreferredAvailabilityZone + } + if r.ko.Spec.PreferredAvailabilityZones != nil { + res.PreferredAvailabilityZones = aws.ToStringSlice(r.ko.Spec.PreferredAvailabilityZones) + } + if r.ko.Spec.PreferredMaintenanceWindow != nil { + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow + } + if r.ko.Spec.PreferredOutpostARN != nil { + res.PreferredOutpostArn = r.ko.Spec.PreferredOutpostARN + } + if r.ko.Spec.PreferredOutpostARNs != nil { + res.PreferredOutpostArns = aws.ToStringSlice(r.ko.Spec.PreferredOutpostARNs) + } + if r.ko.Spec.ReplicationGroupID != nil { + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID + } + if r.ko.Spec.SecurityGroupIDs != nil { + res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) + } + if r.ko.Spec.SnapshotARNs != nil { + res.SnapshotArns = aws.ToStringSlice(r.ko.Spec.SnapshotARNs) + } + if r.ko.Spec.SnapshotName != nil { + res.SnapshotName = r.ko.Spec.SnapshotName + } + if r.ko.Spec.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } + if r.ko.Spec.SnapshotWindow != nil { + res.SnapshotWindow = r.ko.Spec.SnapshotWindow + } + if r.ko.Spec.Tags != nil { + f28 := []svcsdktypes.Tag{} + for _, f28iter := range r.ko.Spec.Tags { + f28elem := &svcsdktypes.Tag{} + if f28iter.Key != nil { + f28elem.Key = f28iter.Key + } + if f28iter.Value != nil { + f28elem.Value = f28iter.Value + } + f28 = append(f28, *f28elem) + } + res.Tags = f28 + } + if r.ko.Spec.TransitEncryptionEnabled != nil { + res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled + } + + return res, nil +} + +// sdkUpdate patches the supplied resource in the backend AWS service API and +// returns a new resource with updated fields. +func (rm *resourceManager) sdkUpdate( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (updated *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkUpdate") + defer func() { + exit(err) + }() + if delta.DifferentAt("Spec.Tags") { + if err = rm.syncTags(ctx, desired, latest); err != nil { + return nil, err + } + } else if !delta.DifferentExcept("Spec.Tags") { + // If the only difference between the desired and latest is in the + // Spec.Tags field, we can skip the ModifyCacheCluster call. + return desired, nil + } + + input, err := rm.newUpdateRequestPayload(ctx, desired, delta) + if err != nil { + return nil, err + } + if err := rm.updateCacheClusterPayload(input, desired, latest, delta); err != nil { + return nil, ackerr.NewTerminalError(err) + } + + var resp *svcsdk.ModifyCacheClusterOutput + _ = resp + resp, err = rm.sdkapi.ModifyCacheCluster(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "ModifyCacheCluster", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.CacheCluster.ARN != nil { + arn := ackv1alpha1.AWSResourceName(*resp.CacheCluster.ARN) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.CacheCluster.AtRestEncryptionEnabled != nil { + ko.Status.AtRestEncryptionEnabled = resp.CacheCluster.AtRestEncryptionEnabled + } else { + ko.Status.AtRestEncryptionEnabled = nil + } + if resp.CacheCluster.AuthTokenEnabled != nil { + ko.Status.AuthTokenEnabled = resp.CacheCluster.AuthTokenEnabled + } else { + ko.Status.AuthTokenEnabled = nil + } + if resp.CacheCluster.AuthTokenLastModifiedDate != nil { + ko.Status.AuthTokenLastModifiedDate = &metav1.Time{*resp.CacheCluster.AuthTokenLastModifiedDate} + } else { + ko.Status.AuthTokenLastModifiedDate = nil + } + if resp.CacheCluster.AutoMinorVersionUpgrade != nil { + ko.Spec.AutoMinorVersionUpgrade = resp.CacheCluster.AutoMinorVersionUpgrade + } else { + ko.Spec.AutoMinorVersionUpgrade = nil + } + if resp.CacheCluster.CacheClusterCreateTime != nil { + ko.Status.CacheClusterCreateTime = &metav1.Time{*resp.CacheCluster.CacheClusterCreateTime} + } else { + ko.Status.CacheClusterCreateTime = nil + } + if resp.CacheCluster.CacheClusterId != nil { + ko.Spec.CacheClusterID = resp.CacheCluster.CacheClusterId + } else { + ko.Spec.CacheClusterID = nil + } + if resp.CacheCluster.CacheClusterStatus != nil { + ko.Status.CacheClusterStatus = resp.CacheCluster.CacheClusterStatus + } else { + ko.Status.CacheClusterStatus = nil + } + if resp.CacheCluster.CacheNodeType != nil { + ko.Spec.CacheNodeType = resp.CacheCluster.CacheNodeType + } else { + ko.Spec.CacheNodeType = nil + } + if resp.CacheCluster.CacheNodes != nil { + f9 := []*svcapitypes.CacheNode{} + for _, f9iter := range resp.CacheCluster.CacheNodes { + f9elem := &svcapitypes.CacheNode{} + if f9iter.CacheNodeCreateTime != nil { + f9elem.CacheNodeCreateTime = &metav1.Time{*f9iter.CacheNodeCreateTime} + } + if f9iter.CacheNodeId != nil { + f9elem.CacheNodeID = f9iter.CacheNodeId + } + if f9iter.CacheNodeStatus != nil { + f9elem.CacheNodeStatus = f9iter.CacheNodeStatus + } + if f9iter.CustomerAvailabilityZone != nil { + f9elem.CustomerAvailabilityZone = f9iter.CustomerAvailabilityZone + } + if f9iter.CustomerOutpostArn != nil { + f9elem.CustomerOutpostARN = f9iter.CustomerOutpostArn + } + if f9iter.Endpoint != nil { + f9elemf5 := &svcapitypes.Endpoint{} + if f9iter.Endpoint.Address != nil { + f9elemf5.Address = f9iter.Endpoint.Address + } + if f9iter.Endpoint.Port != nil { + portCopy := int64(*f9iter.Endpoint.Port) + f9elemf5.Port = &portCopy + } + f9elem.Endpoint = f9elemf5 + } + if f9iter.ParameterGroupStatus != nil { + f9elem.ParameterGroupStatus = f9iter.ParameterGroupStatus + } + if f9iter.SourceCacheNodeId != nil { + f9elem.SourceCacheNodeID = f9iter.SourceCacheNodeId + } + f9 = append(f9, f9elem) + } + ko.Status.CacheNodes = f9 + } else { + ko.Status.CacheNodes = nil + } + if resp.CacheCluster.CacheParameterGroup != nil { + f10 := &svcapitypes.CacheParameterGroupStatus_SDK{} + if resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot != nil { + f10.CacheNodeIDsToReboot = aws.StringSlice(resp.CacheCluster.CacheParameterGroup.CacheNodeIdsToReboot) + } + if resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { + f10.CacheParameterGroupName = resp.CacheCluster.CacheParameterGroup.CacheParameterGroupName + } + if resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus != nil { + f10.ParameterApplyStatus = resp.CacheCluster.CacheParameterGroup.ParameterApplyStatus + } + ko.Status.CacheParameterGroup = f10 + } else { + ko.Status.CacheParameterGroup = nil + } + if resp.CacheCluster.CacheSecurityGroups != nil { + f11 := []*svcapitypes.CacheSecurityGroupMembership{} + for _, f11iter := range resp.CacheCluster.CacheSecurityGroups { + f11elem := &svcapitypes.CacheSecurityGroupMembership{} + if f11iter.CacheSecurityGroupName != nil { + f11elem.CacheSecurityGroupName = f11iter.CacheSecurityGroupName + } + if f11iter.Status != nil { + f11elem.Status = f11iter.Status + } + f11 = append(f11, f11elem) + } + ko.Status.CacheSecurityGroups = f11 + } else { + ko.Status.CacheSecurityGroups = nil + } + if resp.CacheCluster.CacheSubnetGroupName != nil { + ko.Spec.CacheSubnetGroupName = resp.CacheCluster.CacheSubnetGroupName + } else { + ko.Spec.CacheSubnetGroupName = nil + } + if resp.CacheCluster.ClientDownloadLandingPage != nil { + ko.Status.ClientDownloadLandingPage = resp.CacheCluster.ClientDownloadLandingPage + } else { + ko.Status.ClientDownloadLandingPage = nil + } + if resp.CacheCluster.ConfigurationEndpoint != nil { + f14 := &svcapitypes.Endpoint{} + if resp.CacheCluster.ConfigurationEndpoint.Address != nil { + f14.Address = resp.CacheCluster.ConfigurationEndpoint.Address + } + if resp.CacheCluster.ConfigurationEndpoint.Port != nil { + portCopy := int64(*resp.CacheCluster.ConfigurationEndpoint.Port) + f14.Port = &portCopy + } + ko.Status.ConfigurationEndpoint = f14 + } else { + ko.Status.ConfigurationEndpoint = nil + } + if resp.CacheCluster.Engine != nil { + ko.Spec.Engine = resp.CacheCluster.Engine + } else { + ko.Spec.Engine = nil + } + if resp.CacheCluster.EngineVersion != nil { + ko.Spec.EngineVersion = resp.CacheCluster.EngineVersion + } else { + ko.Spec.EngineVersion = nil + } + if resp.CacheCluster.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.CacheCluster.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } + if resp.CacheCluster.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.CacheCluster.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } + if resp.CacheCluster.NotificationConfiguration != nil { + f19 := &svcapitypes.NotificationConfiguration{} + if resp.CacheCluster.NotificationConfiguration.TopicArn != nil { + f19.TopicARN = resp.CacheCluster.NotificationConfiguration.TopicArn + } + if resp.CacheCluster.NotificationConfiguration.TopicStatus != nil { + f19.TopicStatus = resp.CacheCluster.NotificationConfiguration.TopicStatus + } + ko.Status.NotificationConfiguration = f19 + } else { + ko.Status.NotificationConfiguration = nil + } + if resp.CacheCluster.NumCacheNodes != nil { + numCacheNodesCopy := int64(*resp.CacheCluster.NumCacheNodes) + ko.Spec.NumCacheNodes = &numCacheNodesCopy + } else { + ko.Spec.NumCacheNodes = nil + } + if resp.CacheCluster.PendingModifiedValues != nil { + f21 := &svcapitypes.PendingModifiedValues{} + if resp.CacheCluster.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(resp.CacheCluster.PendingModifiedValues.AuthTokenStatus)) + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove != nil { + f21.CacheNodeIDsToRemove = aws.StringSlice(resp.CacheCluster.PendingModifiedValues.CacheNodeIdsToRemove) + } + if resp.CacheCluster.PendingModifiedValues.CacheNodeType != nil { + f21.CacheNodeType = resp.CacheCluster.PendingModifiedValues.CacheNodeType + } + if resp.CacheCluster.PendingModifiedValues.EngineVersion != nil { + f21.EngineVersion = resp.CacheCluster.PendingModifiedValues.EngineVersion + } + if resp.CacheCluster.PendingModifiedValues.NumCacheNodes != nil { + numCacheNodesCopy := int64(*resp.CacheCluster.PendingModifiedValues.NumCacheNodes) + f21.NumCacheNodes = &numCacheNodesCopy + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled != nil { + f21.TransitEncryptionEnabled = resp.CacheCluster.PendingModifiedValues.TransitEncryptionEnabled + } + if resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode != "" { + f21.TransitEncryptionMode = aws.String(string(resp.CacheCluster.PendingModifiedValues.TransitEncryptionMode)) + } + ko.Status.PendingModifiedValues = f21 + } else { + ko.Status.PendingModifiedValues = nil + } + if resp.CacheCluster.PreferredAvailabilityZone != nil { + ko.Spec.PreferredAvailabilityZone = resp.CacheCluster.PreferredAvailabilityZone + } else { + ko.Spec.PreferredAvailabilityZone = nil + } + if resp.CacheCluster.PreferredMaintenanceWindow != nil { + ko.Spec.PreferredMaintenanceWindow = resp.CacheCluster.PreferredMaintenanceWindow + } else { + ko.Spec.PreferredMaintenanceWindow = nil + } + if resp.CacheCluster.PreferredOutpostArn != nil { + ko.Spec.PreferredOutpostARN = resp.CacheCluster.PreferredOutpostArn + } else { + ko.Spec.PreferredOutpostARN = nil + } + if resp.CacheCluster.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = resp.CacheCluster.ReplicationGroupId + } else { + ko.Spec.ReplicationGroupID = nil + } + if resp.CacheCluster.ReplicationGroupLogDeliveryEnabled != nil { + ko.Status.ReplicationGroupLogDeliveryEnabled = resp.CacheCluster.ReplicationGroupLogDeliveryEnabled + } else { + ko.Status.ReplicationGroupLogDeliveryEnabled = nil + } + if resp.CacheCluster.SecurityGroups != nil { + f27 := []*svcapitypes.SecurityGroupMembership{} + for _, f27iter := range resp.CacheCluster.SecurityGroups { + f27elem := &svcapitypes.SecurityGroupMembership{} + if f27iter.SecurityGroupId != nil { + f27elem.SecurityGroupID = f27iter.SecurityGroupId + } + if f27iter.Status != nil { + f27elem.Status = f27iter.Status + } + f27 = append(f27, f27elem) + } + ko.Status.SecurityGroups = f27 + } else { + ko.Status.SecurityGroups = nil + } + if resp.CacheCluster.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy := int64(*resp.CacheCluster.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if resp.CacheCluster.SnapshotWindow != nil { + ko.Spec.SnapshotWindow = resp.CacheCluster.SnapshotWindow + } else { + ko.Spec.SnapshotWindow = nil + } + if resp.CacheCluster.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = resp.CacheCluster.TransitEncryptionEnabled + } else { + ko.Spec.TransitEncryptionEnabled = nil + } + if resp.CacheCluster.TransitEncryptionMode != "" { + ko.Status.TransitEncryptionMode = aws.String(string(resp.CacheCluster.TransitEncryptionMode)) + } else { + ko.Status.TransitEncryptionMode = nil + } + + rm.setStatusDefaults(ko) + // custom set output from response + ko, err = rm.customModifyCacheClusterSetOutput(ctx, desired, resp, ko) + if err != nil { + return nil, err + } + if pendingModifications := resp.CacheCluster.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = Int64OrNil(pendingModifications.NumCacheNodes) + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } + + return &resource{ko}, nil +} + +// newUpdateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Update API call for the resource +func (rm *resourceManager) newUpdateRequestPayload( + ctx context.Context, + r *resource, + delta *ackcompare.Delta, +) (*svcsdk.ModifyCacheClusterInput, error) { + res := &svcsdk.ModifyCacheClusterInput{} + + if r.ko.Spec.AZMode != nil { + res.AZMode = svcsdktypes.AZMode(*r.ko.Spec.AZMode) + } + res.ApplyImmediately = aws.Bool(true) + if r.ko.Spec.AuthToken != nil { + tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) + if err != nil { + return nil, ackrequeue.Needed(err) + } + if tmpSecret != "" { + res.AuthToken = aws.String(tmpSecret) + } + } + if r.ko.Spec.AutoMinorVersionUpgrade != nil { + res.AutoMinorVersionUpgrade = r.ko.Spec.AutoMinorVersionUpgrade + } + if r.ko.Spec.CacheClusterID != nil { + res.CacheClusterId = r.ko.Spec.CacheClusterID + } + if r.ko.Spec.CacheNodeType != nil { + res.CacheNodeType = r.ko.Spec.CacheNodeType + } + if r.ko.Spec.CacheParameterGroupName != nil { + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName + } + if r.ko.Spec.CacheSecurityGroupNames != nil { + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) + } + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine + } + if r.ko.Spec.EngineVersion != nil { + res.EngineVersion = r.ko.Spec.EngineVersion + } + if r.ko.Spec.IPDiscovery != nil { + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) + } + if r.ko.Spec.LogDeliveryConfigurations != nil { + f13 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f13iter := range r.ko.Spec.LogDeliveryConfigurations { + f13elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f13iter.DestinationDetails != nil { + f13elemf0 := &svcsdktypes.DestinationDetails{} + if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { + f13elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + } + f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + } + if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { + f13elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + } + f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + } + f13elem.DestinationDetails = f13elemf0 + } + if f13iter.DestinationType != nil { + f13elem.DestinationType = svcsdktypes.DestinationType(*f13iter.DestinationType) + } + if f13iter.Enabled != nil { + f13elem.Enabled = f13iter.Enabled + } + if f13iter.LogFormat != nil { + f13elem.LogFormat = svcsdktypes.LogFormat(*f13iter.LogFormat) + } + if f13iter.LogType != nil { + f13elem.LogType = svcsdktypes.LogType(*f13iter.LogType) + } + f13 = append(f13, *f13elem) + } + res.LogDeliveryConfigurations = f13 + } + if r.ko.Spec.NotificationTopicARN != nil { + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN + } + if r.ko.Spec.NumCacheNodes != nil { + numCacheNodesCopy0 := *r.ko.Spec.NumCacheNodes + if numCacheNodesCopy0 > math.MaxInt32 || numCacheNodesCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field NumCacheNodes is of type int32") + } + numCacheNodesCopy := int32(numCacheNodesCopy0) + res.NumCacheNodes = &numCacheNodesCopy + } + if r.ko.Spec.PreferredMaintenanceWindow != nil { + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow + } + if r.ko.Spec.SecurityGroupIDs != nil { + res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) + } + if r.ko.Spec.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } + if r.ko.Spec.SnapshotWindow != nil { + res.SnapshotWindow = r.ko.Spec.SnapshotWindow + } + + return res, nil +} + +// sdkDelete deletes the supplied resource in the backend AWS service API +func (rm *resourceManager) sdkDelete( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkDelete") + defer func() { + exit(err) + }() + if isDeleting(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgCurrentlyDeleting, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileDeleting + } + if isModifying(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgNoDeleteWhileModifying, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileModifying + } + + input, err := rm.newDeleteRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DeleteCacheClusterOutput + _ = resp + resp, err = rm.sdkapi.DeleteCacheCluster(ctx, input) + rm.metrics.RecordAPICall("DELETE", "DeleteCacheCluster", err) + return nil, err +} + +// newDeleteRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Delete API call for the resource +func (rm *resourceManager) newDeleteRequestPayload( + r *resource, +) (*svcsdk.DeleteCacheClusterInput, error) { + res := &svcsdk.DeleteCacheClusterInput{} + + if r.ko.Spec.CacheClusterID != nil { + res.CacheClusterId = r.ko.Spec.CacheClusterID + } + + return res, nil +} + +// setStatusDefaults sets default properties into supplied custom resource +func (rm *resourceManager) setStatusDefaults( + ko *svcapitypes.CacheCluster, +) { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if ko.Status.ACKResourceMetadata.Region == nil { + ko.Status.ACKResourceMetadata.Region = &rm.awsRegion + } + if ko.Status.ACKResourceMetadata.OwnerAccountID == nil { + ko.Status.ACKResourceMetadata.OwnerAccountID = &rm.awsAccountID + } + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } +} + +// updateConditions returns updated resource, true; if conditions were updated +// else it returns nil, false +func (rm *resourceManager) updateConditions( + r *resource, + onSuccess bool, + err error, +) (*resource, bool) { + ko := r.ko.DeepCopy() + rm.setStatusDefaults(ko) + + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + var recoverableCondition *ackv1alpha1.Condition = nil + var syncCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeRecoverable { + recoverableCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + syncCondition = condition + } + } + var termError *ackerr.TerminalError + if rm.terminalAWSError(err) || err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + var errorMessage = "" + if err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + errorMessage = err.Error() + } else { + awsErr, _ := ackerr.AWSError(err) + errorMessage = awsErr.Error() + } + terminalCondition.Status = corev1.ConditionTrue + terminalCondition.Message = &errorMessage + } else { + // Clear the terminal condition if no longer present + if terminalCondition != nil { + terminalCondition.Status = corev1.ConditionFalse + terminalCondition.Message = nil + } + // Handling Recoverable Conditions + if err != nil { + if recoverableCondition == nil { + // Add a new Condition containing a non-terminal error + recoverableCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeRecoverable, + } + ko.Status.Conditions = append(ko.Status.Conditions, recoverableCondition) + } + recoverableCondition.Status = corev1.ConditionTrue + awsErr, _ := ackerr.AWSError(err) + errorMessage := err.Error() + if awsErr != nil { + errorMessage = awsErr.Error() + } + recoverableCondition.Message = &errorMessage + } else if recoverableCondition != nil { + recoverableCondition.Status = corev1.ConditionFalse + recoverableCondition.Message = nil + } + } + // Required to avoid the "declared but not used" error in the default case + _ = syncCondition + if terminalCondition != nil || recoverableCondition != nil || syncCondition != nil { + return &resource{ko}, true // updated + } + return nil, false // not updated +} + +// terminalAWSError returns awserr, true; if the supplied error is an aws Error type +// and if the exception indicates that it is a Terminal exception +// 'Terminal' exception are specified in generator configuration +func (rm *resourceManager) terminalAWSError(err error) bool { + if err == nil { + return false + } + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { + return false + } + switch terminalErr.ErrorCode() { + case "ReplicationGroupNotFoundFault", + "InvalidReplicationGroupStateFault", + "CacheClusterAlreadyExistsFault", + "InsufficientCacheClusterCapacityFault", + "CacheSecurityGroupNotFoundFault", + "CacheSubnetGroupNotFoundFault", + "ClusterQuotaForCustomerExceededFault", + "NodeQuotaForClusterExceededFault", + "NodeQuotaForCustomerExceededFault", + "CacheParameterGroupNotFoundFault", + "InvalidVPCNetworkStateFault", + "TagQuotaPerResource", + "InvalidParameterValue", + "InvalidParameterCombination": + return true + default: + return false + } +} diff --git a/pkg/resource/cache_cluster/tags.go b/pkg/resource/cache_cluster/tags.go new file mode 100644 index 00000000..0095e01b --- /dev/null +++ b/pkg/resource/cache_cluster/tags.go @@ -0,0 +1,119 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package cache_cluster + +import ( + "slices" + "strings" + + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = svcapitypes.CacheCluster{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} +) + +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. +// This method helps in creating the hub(acktags.Tags) for merging +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { + result := acktags.NewTags() + keyOrder := []string{} + + if len(tags) == 0 { + return result, keyOrder + } + for _, t := range tags { + if t.Key != nil { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { + result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" + } + } + } + + return result, keyOrder +} + +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// This method helps in setting the tags back inside AWSResource after merging +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { + result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } + for k, v := range tags { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + } + + return result +} + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/cache_parameter_group/custom_set_output.go b/pkg/resource/cache_parameter_group/custom_set_output.go deleted file mode 100644 index 76a3bf96..00000000 --- a/pkg/resource/cache_parameter_group/custom_set_output.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache_parameter_group - -import ( - "context" - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" -) - -func (rm *resourceManager) CustomDescribeCacheParameterGroupsSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.DescribeCacheParameterGroupsOutput, - ko *svcapitypes.CacheParameterGroup, -) (*svcapitypes.CacheParameterGroup, error) { - // Retrieve parameters using DescribeCacheParameters API and populate ko.Status.ParameterNameValues - if len(resp.CacheParameterGroups) == 0 { - return ko, nil - } - cpg := resp.CacheParameterGroups[0] - // Populate latest.ko.Spec.ParameterNameValues with latest parameter values - // Populate latest.ko.Status.Parameters with latest detailed parameters - error := rm.customSetOutputDescribeCacheParameters(ctx, cpg.CacheParameterGroupName, ko) - if error != nil { - return nil, error - } - return ko, nil -} - -func (rm *resourceManager) CustomCreateCacheParameterGroupSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.CreateCacheParameterGroupOutput, - ko *svcapitypes.CacheParameterGroup, -) (*svcapitypes.CacheParameterGroup, error) { - if r.ko.Spec.ParameterNameValues != nil && len(r.ko.Spec.ParameterNameValues) != 0 { - // Spec has parameters name and values. Create API does not save these, but Modify API does. - // Thus, Create needs to be followed by Modify call to save parameters from Spec. - // Setting synched condition to false, so that reconciler gets invoked again - // and modify logic gets executed. - rm.setCondition(ko, ackv1alpha1.ConditionTypeResourceSynced, corev1.ConditionFalse) - } - return ko, nil -} diff --git a/pkg/resource/cache_parameter_group/custom_update_api.go b/pkg/resource/cache_parameter_group/custom_update_api.go deleted file mode 100644 index fdb91f14..00000000 --- a/pkg/resource/cache_parameter_group/custom_update_api.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package cache_parameter_group - -import ( - "context" - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" -) - -// Implements specialized logic for update CacheParameterGroup. -func (rm *resourceManager) customUpdateCacheParameterGroup( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - desiredParameters := desired.ko.Spec.ParameterNameValues - latestParameters := latest.ko.Spec.ParameterNameValues - - updated := false - var err error - // Update - if (desiredParameters == nil || len(desiredParameters) == 0) && - (latestParameters != nil && len(latestParameters) > 0) { - updated, err = rm.resetAllParameters(ctx, desired) - if !updated || err != nil { - return nil, err - } - } else { - removedParameters, modifiedParameters, addedParameters := rm.provideDelta(desiredParameters, latestParameters) - if removedParameters != nil && len(removedParameters) > 0 { - updated, err = rm.resetParameters(ctx, desired, removedParameters) - if !updated || err != nil { - return nil, err - } - } - if modifiedParameters != nil && len(modifiedParameters) > 0 { - updated, err = rm.saveParameters(ctx, desired, modifiedParameters) - if !updated || err != nil { - return nil, err - } - } - if addedParameters != nil && len(addedParameters) > 0 { - updated, err = rm.saveParameters(ctx, desired, addedParameters) - if !updated || err != nil { - return nil, err - } - } - } - if updated { - rm.setStatusDefaults(latest.ko) - // Populate latest.ko.Spec.ParameterNameValues with latest parameter values - // Populate latest.ko.Status.Parameters with latest detailed parameters - error := rm.customSetOutputDescribeCacheParameters(ctx, desired.ko.Spec.CacheParameterGroupName, latest.ko) - if error != nil { - return nil, error - } - } - return latest, nil -} - -// provideDelta compares given desired and latest Parameters and returns -// removedParameters, modifiedParameters, addedParameters -func (rm *resourceManager) provideDelta( - desiredParameters []*svcapitypes.ParameterNameValue, - latestParameters []*svcapitypes.ParameterNameValue, -) ([]*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue) { - - desiredPametersMap := map[string]*svcapitypes.ParameterNameValue{} - for _, parameter := range desiredParameters { - p := *parameter - desiredPametersMap[*p.ParameterName] = &p - } - latestPametersMap := map[string]*svcapitypes.ParameterNameValue{} - for _, parameter := range latestParameters { - p := *parameter - latestPametersMap[*p.ParameterName] = &p - } - - removedParameters := []*svcapitypes.ParameterNameValue{} // available in latest but not found in desired - modifiedParameters := []*svcapitypes.ParameterNameValue{} // available in both desired, latest but values differ - addedParameters := []*svcapitypes.ParameterNameValue{} // available in desired but not found in latest - for latestParameterName, latestParameterNameValue := range latestPametersMap { - desiredParameterNameValue, found := desiredPametersMap[latestParameterName] - if found && desiredParameterNameValue != nil && - desiredParameterNameValue.ParameterValue != nil && *desiredParameterNameValue.ParameterValue != "" { - if *desiredParameterNameValue.ParameterValue != *latestParameterNameValue.ParameterValue { - // available in both desired, latest but values differ - modified := *desiredParameterNameValue - modifiedParameters = append(modifiedParameters, &modified) - } - } else { - // available in latest but not found in desired - removed := *latestParameterNameValue - removedParameters = append(removedParameters, &removed) - } - } - for desiredParameterName, desiredParameterNameValue := range desiredPametersMap { - _, found := latestPametersMap[desiredParameterName] - if !found && desiredParameterNameValue != nil { - // available in desired but not found in latest - added := *desiredParameterNameValue - if added.ParameterValue != nil && *added.ParameterValue != "" { - addedParameters = append(addedParameters, &added) - } - } - } - return removedParameters, modifiedParameters, addedParameters -} diff --git a/pkg/resource/cache_parameter_group/delta.go b/pkg/resource/cache_parameter_group/delta.go index 44b1d613..45e19535 100644 --- a/pkg/resource/cache_parameter_group/delta.go +++ b/pkg/resource/cache_parameter_group/delta.go @@ -64,10 +64,16 @@ func newResourceDelta( delta.Add("Spec.Description", a.ko.Spec.Description, b.ko.Spec.Description) } } - if !reflect.DeepEqual(a.ko.Spec.ParameterNameValues, b.ko.Spec.ParameterNameValues) { + if len(a.ko.Spec.ParameterNameValues) != len(b.ko.Spec.ParameterNameValues) { delta.Add("Spec.ParameterNameValues", a.ko.Spec.ParameterNameValues, b.ko.Spec.ParameterNameValues) + } else if len(a.ko.Spec.ParameterNameValues) > 0 { + if !reflect.DeepEqual(a.ko.Spec.ParameterNameValues, b.ko.Spec.ParameterNameValues) { + delta.Add("Spec.ParameterNameValues", a.ko.Spec.ParameterNameValues, b.ko.Spec.ParameterNameValues) + } } - if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) } diff --git a/pkg/resource/cache_parameter_group/descriptor.go b/pkg/resource/cache_parameter_group/descriptor.go index 4310707d..95307743 100644 --- a/pkg/resource/cache_parameter_group/descriptor.go +++ b/pkg/resource/cache_parameter_group/descriptor.go @@ -20,6 +20,7 @@ import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" rtclient "sigs.k8s.io/controller-runtime/pkg/client" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/CacheParameterGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/CacheParameterGroup" ) var ( @@ -43,10 +44,10 @@ var ( type resourceDescriptor struct { } -// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the -// API Group and Kind of CRs described by the descriptor -func (d *resourceDescriptor) GroupKind() *metav1.GroupKind { - return &GroupKind +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) } // EmptyRuntimeObject returns an empty object prototype that may be used in @@ -87,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -117,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -132,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/cache_parameter_group/custom_api.go b/pkg/resource/cache_parameter_group/hooks.go similarity index 54% rename from pkg/resource/cache_parameter_group/custom_api.go rename to pkg/resource/cache_parameter_group/hooks.go index 4919560e..67b7e1d9 100644 --- a/pkg/resource/cache_parameter_group/custom_api.go +++ b/pkg/resource/cache_parameter_group/hooks.go @@ -15,10 +15,14 @@ package cache_parameter_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -85,11 +89,11 @@ func (rm *resourceManager) provideEvents( maxRecords int64, ) ([]*svcapitypes.Event, error) { input := &svcsdk.DescribeEventsInput{} - input.SetSourceType("cache-parameter-group") - input.SetSourceIdentifier(*cacheParameterGroupName) - input.SetMaxRecords(maxRecords) - input.SetDuration(eventsDuration) - resp, err := rm.sdkapi.DescribeEventsWithContext(ctx, input) + input.SourceType = svcsdktypes.SourceTypeCacheParameterGroup + input.SourceIdentifier = cacheParameterGroupName + input.MaxRecords = aws.Int32(int32(maxRecords)) + input.Duration = aws.Int32(eventsDuration) + resp, err := rm.sdkapi.DescribeEvents(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-CacheParameterGroup", err) if err != nil { rm.log.V(1).Info("Error during DescribeEvents-CacheParameterGroup", "error", err) @@ -128,10 +132,10 @@ func (rm *resourceManager) describeCacheParameters( if err != nil { return nil, err } - response, respErr := rm.sdkapi.DescribeCacheParametersWithContext(ctx, input) + response, respErr := rm.sdkapi.DescribeCacheParameters(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheParameters", respErr) if respErr != nil { - if awsErr, ok := ackerr.AWSError(respErr); ok && awsErr.Code() == "CacheParameterGroupNotFound" { + if awsErr, ok := ackerr.AWSError(respErr); ok && awsErr.ErrorCode() == "CacheParameterGroupNotFound" { return nil, ackerr.NotFound } rm.log.V(1).Info("Error during DescribeCacheParameters", "error", respErr) @@ -175,13 +179,13 @@ func (rm *resourceManager) newDescribeCacheParametersRequestPayload( res := &svcsdk.DescribeCacheParametersInput{} if cacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*cacheParameterGroupName) + res.CacheParameterGroupName = cacheParameterGroupName } if source != nil { - res.SetSource(*source) + res.Source = source } if paginationMarker != nil { - res.SetMarker(*paginationMarker) + res.Marker = paginationMarker } return res, nil } @@ -193,11 +197,11 @@ func (rm *resourceManager) resetAllParameters( ) (bool, error) { input := &svcsdk.ResetCacheParameterGroupInput{} if desired.ko.Spec.CacheParameterGroupName != nil { - input.SetCacheParameterGroupName(*desired.ko.Spec.CacheParameterGroupName) + input.CacheParameterGroupName = desired.ko.Spec.CacheParameterGroupName } - input.SetResetAllParameters(true) + input.ResetAllParameters = aws.Bool(true) - _, err := rm.sdkapi.ResetCacheParameterGroupWithContext(ctx, input) + _, err := rm.sdkapi.ResetCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ResetCacheParameterGroup-ResetAllParameters", err) if err != nil { rm.log.V(1).Info("Error during ResetCacheParameterGroup-ResetAllParameters", "error", err) @@ -214,21 +218,30 @@ func (rm *resourceManager) resetParameters( ) (bool, error) { input := &svcsdk.ResetCacheParameterGroupInput{} if desired.ko.Spec.CacheParameterGroupName != nil { - input.SetCacheParameterGroupName(*desired.ko.Spec.CacheParameterGroupName) + input.CacheParameterGroupName = desired.ko.Spec.CacheParameterGroupName } if parameters != nil && len(parameters) > 0 { - parametersToReset := []*svcsdk.ParameterNameValue{} + parametersToReset := []*svcsdktypes.ParameterNameValue{} for _, parameter := range parameters { - parameterToReset := &svcsdk.ParameterNameValue{} + parameterToReset := &svcsdktypes.ParameterNameValue{} if parameter.ParameterName != nil { - parameterToReset.SetParameterName(*parameter.ParameterName) + parameterToReset.ParameterName = parameter.ParameterName + } + if parameter.ParameterValue != nil { + parameterToReset.ParameterValue = parameter.ParameterValue } parametersToReset = append(parametersToReset, parameterToReset) } - input.SetParameterNameValues(parametersToReset) + parameterNameValues := make([]svcsdktypes.ParameterNameValue, len(parametersToReset)) + for i, parameter := range parametersToReset { + if parameter != nil { + parameterNameValues[i] = *parameter + } + } + input.ParameterNameValues = parameterNameValues } - _, err := rm.sdkapi.ResetCacheParameterGroupWithContext(ctx, input) + _, err := rm.sdkapi.ResetCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ResetCacheParameterGroup", err) if err != nil { rm.log.V(1).Info("Error during ResetCacheParameterGroup", "error", err) @@ -246,14 +259,14 @@ func (rm *resourceManager) saveParameters( ) (bool, error) { modifyApiBatchSize := 20 // Paginated save: 20 parameters in single api call - parametersToSave := []*svcsdk.ParameterNameValue{} + parametersToSave := []svcsdktypes.ParameterNameValue{} for _, parameter := range parameters { - parameterToSave := &svcsdk.ParameterNameValue{} + parameterToSave := svcsdktypes.ParameterNameValue{} if parameter.ParameterName != nil { - parameterToSave.SetParameterName(*parameter.ParameterName) + parameterToSave.ParameterName = parameter.ParameterName } if parameter.ParameterValue != nil { - parameterToSave.SetParameterValue(*parameter.ParameterValue) + parameterToSave.ParameterValue = parameter.ParameterValue } parametersToSave = append(parametersToSave, parameterToSave) @@ -263,7 +276,7 @@ func (rm *resourceManager) saveParameters( return false, err } // re-init to save next set of parameters - parametersToSave = []*svcsdk.ParameterNameValue{} + parametersToSave = []svcsdktypes.ParameterNameValue{} } } if len(parametersToSave) > 0 { // when len(parameters) % modifyApiBatchSize != 0 @@ -280,16 +293,14 @@ func (rm *resourceManager) saveParameters( func (rm *resourceManager) modifyCacheParameterGroup( ctx context.Context, desired *resource, - parameters []*svcsdk.ParameterNameValue, + parameters []svcsdktypes.ParameterNameValue, ) (bool, error) { input := &svcsdk.ModifyCacheParameterGroupInput{} if desired.ko.Spec.CacheParameterGroupName != nil { - input.SetCacheParameterGroupName(*desired.ko.Spec.CacheParameterGroupName) + input.CacheParameterGroupName = desired.ko.Spec.CacheParameterGroupName } - if parameters != nil && len(parameters) > 0 { - input.SetParameterNameValues(parameters) - } - _, err := rm.sdkapi.ModifyCacheParameterGroupWithContext(ctx, input) + input.ParameterNameValues = parameters + _, err := rm.sdkapi.ModifyCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyCacheParameterGroup", err) if err != nil { rm.log.V(1).Info("Error during ModifyCacheParameterGroup", "error", err) @@ -324,3 +335,147 @@ func (rm *resourceManager) setCondition( condition.Status = cStatus } } + +func (rm *resourceManager) CustomDescribeCacheParameterGroupsSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.DescribeCacheParameterGroupsOutput, + ko *svcapitypes.CacheParameterGroup, +) (*svcapitypes.CacheParameterGroup, error) { + // Retrieve parameters using DescribeCacheParameters API and populate ko.Status.ParameterNameValues + if len(resp.CacheParameterGroups) == 0 { + return ko, nil + } + cpg := resp.CacheParameterGroups[0] + // Populate latest.ko.Spec.ParameterNameValues with latest parameter values + // Populate latest.ko.Status.Parameters with latest detailed parameters + error := rm.customSetOutputDescribeCacheParameters(ctx, cpg.CacheParameterGroupName, ko) + if error != nil { + return nil, error + } + return ko, nil +} + +func (rm *resourceManager) CustomCreateCacheParameterGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateCacheParameterGroupOutput, + ko *svcapitypes.CacheParameterGroup, +) (*svcapitypes.CacheParameterGroup, error) { + if r.ko.Spec.ParameterNameValues != nil && len(r.ko.Spec.ParameterNameValues) != 0 { + // Spec has parameters name and values. Create API does not save these, but Modify API does. + // Thus, Create needs to be followed by Modify call to save parameters from Spec. + // Setting synched condition to false, so that reconciler gets invoked again + // and modify logic gets executed. + rm.setCondition(ko, ackv1alpha1.ConditionTypeResourceSynced, corev1.ConditionFalse) + } + return ko, nil +} + +// Implements specialized logic for update CacheParameterGroup. +func (rm *resourceManager) customUpdateCacheParameterGroup( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + desiredParameters := desired.ko.Spec.ParameterNameValues + latestParameters := latest.ko.Spec.ParameterNameValues + + updated := false + var err error + // Update + if (desiredParameters == nil || len(desiredParameters) == 0) && + (latestParameters != nil && len(latestParameters) > 0) { + updated, err = rm.resetAllParameters(ctx, desired) + if !updated || err != nil { + return nil, err + } + } else { + removedParameters, modifiedParameters, addedParameters := rm.provideDelta(desiredParameters, latestParameters) + if removedParameters != nil && len(removedParameters) > 0 { + updated, err = rm.resetParameters(ctx, desired, removedParameters) + if !updated || err != nil { + return nil, err + } + } + if modifiedParameters != nil && len(modifiedParameters) > 0 { + updated, err = rm.saveParameters(ctx, desired, modifiedParameters) + if !updated || err != nil { + return nil, err + } + } + if addedParameters != nil && len(addedParameters) > 0 { + updated, err = rm.saveParameters(ctx, desired, addedParameters) + if !updated || err != nil { + return nil, err + } + } + } + if updated { + rm.setStatusDefaults(latest.ko) + // Populate latest.ko.Spec.ParameterNameValues with latest parameter values + // Populate latest.ko.Status.Parameters with latest detailed parameters + error := rm.customSetOutputDescribeCacheParameters(ctx, desired.ko.Spec.CacheParameterGroupName, latest.ko) + if error != nil { + return nil, error + } + } + return latest, nil +} + +// provideDelta compares given desired and latest Parameters and returns +// removedParameters, modifiedParameters, addedParameters +func (rm *resourceManager) provideDelta( + desiredParameters []*svcapitypes.ParameterNameValue, + latestParameters []*svcapitypes.ParameterNameValue, +) ([]*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue, []*svcapitypes.ParameterNameValue) { + + desiredPametersMap := map[string]*svcapitypes.ParameterNameValue{} + for _, parameter := range desiredParameters { + p := *parameter + desiredPametersMap[*p.ParameterName] = &p + } + latestPametersMap := map[string]*svcapitypes.ParameterNameValue{} + for _, parameter := range latestParameters { + p := *parameter + latestPametersMap[*p.ParameterName] = &p + } + + removedParameters := []*svcapitypes.ParameterNameValue{} // available in latest but not found in desired + modifiedParameters := []*svcapitypes.ParameterNameValue{} // available in both desired, latest but values differ + addedParameters := []*svcapitypes.ParameterNameValue{} // available in desired but not found in latest + for latestParameterName, latestParameterNameValue := range latestPametersMap { + desiredParameterNameValue, found := desiredPametersMap[latestParameterName] + if found && desiredParameterNameValue != nil && + desiredParameterNameValue.ParameterValue != nil && *desiredParameterNameValue.ParameterValue != "" { + if *desiredParameterNameValue.ParameterValue != *latestParameterNameValue.ParameterValue { + // available in both desired, latest but values differ + modified := *desiredParameterNameValue + modifiedParameters = append(modifiedParameters, &modified) + } + } else { + // available in latest but not found in desired + removed := *latestParameterNameValue + removedParameters = append(removedParameters, &removed) + } + } + for desiredParameterName, desiredParameterNameValue := range desiredPametersMap { + _, found := latestPametersMap[desiredParameterName] + if !found && desiredParameterNameValue != nil { + // available in desired but not found in latest + added := *desiredParameterNameValue + if added.ParameterValue != nil && *added.ParameterValue != "" { + addedParameters = append(addedParameters, &added) + } + } + } + return removedParameters, modifiedParameters, addedParameters +} + +func Int32OrNil(i *int64) *int32 { + if i == nil { + return nil + } + return aws.Int32(int32(*i)) +} diff --git a/pkg/resource/cache_parameter_group/manager.go b/pkg/resource/cache_parameter_group/manager.go index 7f96de9e..b80739d9 100644 --- a/pkg/resource/cache_parameter_group/manager.go +++ b/pkg/resource/cache_parameter_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -103,6 +102,7 @@ func (rm *resourceManager) ReadOne( panic("resource manager's ReadOne() method received resource with nil CR object") } observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) if err != nil { if observed != nil { return rm.onError(observed, err) @@ -291,32 +291,76 @@ func (rm *resourceManager) EnsureTags( defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) tags := acktags.Merge(resourceTags, defaultTags) - r.ko.Spec.Tags = FromACKTags(tags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) return nil } +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/cache_parameter_group/manager_factory.go b/pkg/resource/cache_parameter_group/manager_factory.go index 76549b75..2eaca521 100644 --- a/pkg/resource/cache_parameter_group/manager_factory.go +++ b/pkg/resource/cache_parameter_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/cache_parameter_group/references.go b/pkg/resource/cache_parameter_group/references.go index eef3cf6b..727b5ded 100644 --- a/pkg/resource/cache_parameter_group/references.go +++ b/pkg/resource/cache_parameter_group/references.go @@ -17,6 +17,7 @@ package cache_parameter_group import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -24,19 +25,29 @@ import ( svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + return &resource{ko} +} + // ResolveReferences finds if there are any Reference field(s) present -// inside AWSResource passed in the parameter and attempts to resolve -// those reference field(s) into target field(s). -// It returns an AWSResource with resolved reference(s), and an error if the -// passed AWSResource's reference field(s) cannot be resolved. -// This method also adds/updates the ConditionTypeReferencesResolved for the -// AWSResource. +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. func (rm *resourceManager) ResolveReferences( ctx context.Context, apiReader client.Reader, res acktypes.AWSResource, -) (acktypes.AWSResource, error) { - return res, nil +) (acktypes.AWSResource, bool, error) { + return res, false, nil } // validateReferenceFields validates the reference field and corresponding @@ -44,9 +55,3 @@ func (rm *resourceManager) ResolveReferences( func validateReferenceFields(ko *svcapitypes.CacheParameterGroup) error { return nil } - -// hasNonNilReferences returns true if resource contains a reference to another -// resource -func hasNonNilReferences(ko *svcapitypes.CacheParameterGroup) bool { - return false -} diff --git a/pkg/resource/cache_parameter_group/resource.go b/pkg/resource/cache_parameter_group/resource.go index a187eb16..af682551 100644 --- a/pkg/resource/cache_parameter_group/resource.go +++ b/pkg/resource/cache_parameter_group/resource.go @@ -16,6 +16,8 @@ package cache_parameter_group import ( + "fmt" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -45,7 +47,7 @@ func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { } // IsBeingDeleted returns true if the Kubernetes resource has a non-zero -// deletion timestemp +// deletion timestamp func (r *resource) IsBeingDeleted() bool { return !r.ko.DeletionTimestamp.IsZero() } @@ -93,6 +95,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f0, ok := fields["cacheParameterGroupName"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: cacheParameterGroupName")) + } + r.ko.Spec.CacheParameterGroupName = &f0 + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/cache_parameter_group/sdk.go b/pkg/resource/cache_parameter_group/sdk.go index d263271a..cc020f9d 100644 --- a/pkg/resource/cache_parameter_group/sdk.go +++ b/pkg/resource/cache_parameter_group/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.CacheParameterGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeCacheParameterGroupsOutput - resp, err = rm.sdkapi.DescribeCacheParameterGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeCacheParameterGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheParameterGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheParameterGroupNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheParameterGroupNotFound" { return nil, ackerr.NotFound } return nil, err @@ -149,7 +152,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeCacheParameterGroupsInput{} if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } return res, nil @@ -174,7 +177,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateCacheParameterGroupOutput _ = resp - resp, err = rm.sdkapi.CreateCacheParameterGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateCacheParameterGroup", err) if err != nil { return nil, err @@ -229,27 +232,27 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateCacheParameterGroupInput{} if r.ko.Spec.CacheParameterGroupFamily != nil { - res.SetCacheParameterGroupFamily(*r.ko.Spec.CacheParameterGroupFamily) + res.CacheParameterGroupFamily = r.ko.Spec.CacheParameterGroupFamily } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.Description != nil { - res.SetDescription(*r.ko.Spec.Description) + res.Description = r.ko.Spec.Description } if r.ko.Spec.Tags != nil { - f3 := []*svcsdk.Tag{} + f3 := []svcsdktypes.Tag{} for _, f3iter := range r.ko.Spec.Tags { - f3elem := &svcsdk.Tag{} + f3elem := &svcsdktypes.Tag{} if f3iter.Key != nil { - f3elem.SetKey(*f3iter.Key) + f3elem.Key = f3iter.Key } if f3iter.Value != nil { - f3elem.SetValue(*f3iter.Value) + f3elem.Value = f3iter.Value } - f3 = append(f3, f3elem) + f3 = append(f3, *f3elem) } - res.SetTags(f3) + res.Tags = f3 } return res, nil @@ -282,7 +285,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteCacheParameterGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteCacheParameterGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteCacheParameterGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteCacheParameterGroup", err) return nil, err } @@ -295,7 +298,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteCacheParameterGroupInput{} if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } return res, nil @@ -403,14 +406,14 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "CacheParameterGroupAlreadyExists", "CacheParameterGroupQuotaExceeded", - "InvalidCacheParameterGroupState", "InvalidGlobalReplicationGroupState", "InvalidParameterCombination", "InvalidParameterValue": diff --git a/pkg/resource/cache_parameter_group/tags.go b/pkg/resource/cache_parameter_group/tags.go index b550a785..65bdb0c7 100644 --- a/pkg/resource/cache_parameter_group/tags.go +++ b/pkg/resource/cache_parameter_group/tags.go @@ -16,48 +16,104 @@ package cache_parameter_group import ( + "slices" + "strings" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) var ( - _ = svcapitypes.CacheParameterGroup{} - _ = acktags.NewTags() + _ = svcapitypes.CacheParameterGroup{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} ) -// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. // This method helps in creating the hub(acktags.Tags) for merging -// default controller tags with existing resource tags. -func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { result := acktags.NewTags() - if tags == nil || len(tags) == 0 { - return result - } + keyOrder := []string{} + if len(tags) == 0 { + return result, keyOrder + } for _, t := range tags { if t.Key != nil { - if t.Value == nil { - result[*t.Key] = "" - } else { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" } } } - return result + return result, keyOrder } -// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. // This method helps in setting the tags back inside AWSResource after merging -// default controller tags with existing resource tags. -func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } for k, v := range tags { - kCopy := k - vCopy := v - tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + tag := svcapitypes.Tag{Key: &k, Value: &v} result = append(result, &tag) } + return result } + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/cache_subnet_group/delta.go b/pkg/resource/cache_subnet_group/delta.go index d2783896..6cd99bbe 100644 --- a/pkg/resource/cache_subnet_group/delta.go +++ b/pkg/resource/cache_subnet_group/delta.go @@ -57,10 +57,19 @@ func newResourceDelta( delta.Add("Spec.CacheSubnetGroupName", a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) } } - if !ackcompare.SliceStringPEqual(a.ko.Spec.SubnetIDs, b.ko.Spec.SubnetIDs) { + if len(a.ko.Spec.SubnetIDs) != len(b.ko.Spec.SubnetIDs) { delta.Add("Spec.SubnetIDs", a.ko.Spec.SubnetIDs, b.ko.Spec.SubnetIDs) + } else if len(a.ko.Spec.SubnetIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SubnetIDs, b.ko.Spec.SubnetIDs) { + delta.Add("Spec.SubnetIDs", a.ko.Spec.SubnetIDs, b.ko.Spec.SubnetIDs) + } + } + if !reflect.DeepEqual(a.ko.Spec.SubnetRefs, b.ko.Spec.SubnetRefs) { + delta.Add("Spec.SubnetRefs", a.ko.Spec.SubnetRefs, b.ko.Spec.SubnetRefs) } - if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) } diff --git a/pkg/resource/cache_subnet_group/descriptor.go b/pkg/resource/cache_subnet_group/descriptor.go index b3d853d8..7ffe0f4f 100644 --- a/pkg/resource/cache_subnet_group/descriptor.go +++ b/pkg/resource/cache_subnet_group/descriptor.go @@ -20,6 +20,7 @@ import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" rtclient "sigs.k8s.io/controller-runtime/pkg/client" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/CacheSubnetGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/CacheSubnetGroup" ) var ( @@ -43,10 +44,10 @@ var ( type resourceDescriptor struct { } -// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the -// API Group and Kind of CRs described by the descriptor -func (d *resourceDescriptor) GroupKind() *metav1.GroupKind { - return &GroupKind +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) } // EmptyRuntimeObject returns an empty object prototype that may be used in @@ -87,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -117,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -132,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/cache_subnet_group/custom_set_output.go b/pkg/resource/cache_subnet_group/hooks.go similarity index 76% rename from pkg/resource/cache_subnet_group/custom_set_output.go rename to pkg/resource/cache_subnet_group/hooks.go index 96ae1900..56246c67 100644 --- a/pkg/resource/cache_subnet_group/custom_set_output.go +++ b/pkg/resource/cache_subnet_group/hooks.go @@ -15,8 +15,11 @@ package cache_subnet_group import ( "context" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,14 +32,14 @@ const ( func (rm *resourceManager) CustomDescribeCacheSubnetGroupsSetOutput( ctx context.Context, r *resource, - resp *elasticache.DescribeCacheSubnetGroupsOutput, + resp *svcsdk.DescribeCacheSubnetGroupsOutput, ko *svcapitypes.CacheSubnetGroup, ) (*svcapitypes.CacheSubnetGroup, error) { if len(resp.CacheSubnetGroups) == 0 { return ko, nil } elem := resp.CacheSubnetGroups[0] - err := rm.customSetOutputSupplementAPIs(ctx, r, elem, ko) + err := rm.customSetOutputSupplementAPIs(ctx, r, &elem, ko) if err != nil { return nil, err } @@ -46,7 +49,7 @@ func (rm *resourceManager) CustomDescribeCacheSubnetGroupsSetOutput( func (rm *resourceManager) customSetOutputSupplementAPIs( ctx context.Context, r *resource, - subnetGroup *elasticache.CacheSubnetGroup, + subnetGroup *svcsdktypes.CacheSubnetGroup, ko *svcapitypes.CacheSubnetGroup, ) error { events, err := rm.provideEvents(ctx, r.ko.Spec.CacheSubnetGroupName, 20) @@ -62,12 +65,12 @@ func (rm *resourceManager) provideEvents( subnetGroupName *string, maxRecords int64, ) ([]*svcapitypes.Event, error) { - input := &elasticache.DescribeEventsInput{} - input.SetSourceType("cache-subnet-group") - input.SetSourceIdentifier(*subnetGroupName) - input.SetMaxRecords(maxRecords) - input.SetDuration(eventsDuration) - resp, err := rm.sdkapi.DescribeEventsWithContext(ctx, input) + input := &svcsdk.DescribeEventsInput{} + input.SourceType = svcsdktypes.SourceTypeCacheSubnetGroup + input.SourceIdentifier = subnetGroupName + input.MaxRecords = aws.Int32(int32(maxRecords)) + input.Duration = aws.Int32(eventsDuration) + resp, err := rm.sdkapi.DescribeEvents(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-CacheSubnetGroup", err) if err != nil { rm.log.V(1).Info("Error during DescribeEvents-CacheSubnetGroup", "error", err) @@ -92,3 +95,10 @@ func (rm *resourceManager) provideEvents( } return events, nil } + +func Int32OrNil(i *int64) *int32 { + if i == nil { + return nil + } + return aws.Int32(int32(*i)) +} diff --git a/pkg/resource/cache_subnet_group/manager.go b/pkg/resource/cache_subnet_group/manager.go index ae0ce3f2..e962804d 100644 --- a/pkg/resource/cache_subnet_group/manager.go +++ b/pkg/resource/cache_subnet_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -103,6 +102,7 @@ func (rm *resourceManager) ReadOne( panic("resource manager's ReadOne() method received resource with nil CR object") } observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) if err != nil { if observed != nil { return rm.onError(observed, err) @@ -291,32 +291,76 @@ func (rm *resourceManager) EnsureTags( defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) tags := acktags.Merge(resourceTags, defaultTags) - r.ko.Spec.Tags = FromACKTags(tags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) return nil } +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/cache_subnet_group/manager_factory.go b/pkg/resource/cache_subnet_group/manager_factory.go index 9296270e..016ed5cb 100644 --- a/pkg/resource/cache_subnet_group/manager_factory.go +++ b/pkg/resource/cache_subnet_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/cache_subnet_group/references.go b/pkg/resource/cache_subnet_group/references.go index d4c87d15..70fa70ff 100644 --- a/pkg/resource/cache_subnet_group/references.go +++ b/pkg/resource/cache_subnet_group/references.go @@ -17,36 +17,159 @@ package cache_subnet_group import ( "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=subnets,verbs=get;list +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=subnets/status,verbs=get;list + +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + if len(ko.Spec.SubnetRefs) > 0 { + ko.Spec.SubnetIDs = nil + } + + return &resource{ko} +} + // ResolveReferences finds if there are any Reference field(s) present -// inside AWSResource passed in the parameter and attempts to resolve -// those reference field(s) into target field(s). -// It returns an AWSResource with resolved reference(s), and an error if the -// passed AWSResource's reference field(s) cannot be resolved. -// This method also adds/updates the ConditionTypeReferencesResolved for the -// AWSResource. +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. func (rm *resourceManager) ResolveReferences( ctx context.Context, apiReader client.Reader, res acktypes.AWSResource, -) (acktypes.AWSResource, error) { - return res, nil +) (acktypes.AWSResource, bool, error) { + ko := rm.concreteResource(res).ko + + resourceHasReferences := false + err := validateReferenceFields(ko) + if fieldHasReferences, err := rm.resolveReferenceForSubnetIDs(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + return &resource{ko}, resourceHasReferences, err } // validateReferenceFields validates the reference field and corresponding // identifier field. func validateReferenceFields(ko *svcapitypes.CacheSubnetGroup) error { + + if len(ko.Spec.SubnetRefs) > 0 && len(ko.Spec.SubnetIDs) > 0 { + return ackerr.ResourceReferenceAndIDNotSupportedFor("SubnetIDs", "SubnetRefs") + } + if len(ko.Spec.SubnetRefs) == 0 && len(ko.Spec.SubnetIDs) == 0 { + return ackerr.ResourceReferenceOrIDRequiredFor("SubnetIDs", "SubnetRefs") + } return nil } -// hasNonNilReferences returns true if resource contains a reference to another -// resource -func hasNonNilReferences(ko *svcapitypes.CacheSubnetGroup) bool { - return false +// resolveReferenceForSubnetIDs reads the resource referenced +// from SubnetRefs field and sets the SubnetIDs +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForSubnetIDs( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.CacheSubnetGroup, +) (hasReferences bool, err error) { + for _, f0iter := range ko.Spec.SubnetRefs { + if f0iter != nil && f0iter.From != nil { + hasReferences = true + arr := f0iter.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SubnetRefs") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &ec2apitypes.Subnet{} + if err := getReferencedResourceState_Subnet(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + if ko.Spec.SubnetIDs == nil { + ko.Spec.SubnetIDs = make([]*string, 0, 1) + } + ko.Spec.SubnetIDs = append(ko.Spec.SubnetIDs, (*string)(obj.Status.SubnetID)) + } + } + + return hasReferences, nil +} + +// getReferencedResourceState_Subnet looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_Subnet( + ctx context.Context, + apiReader client.Reader, + obj *ec2apitypes.Subnet, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "Subnet", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "Subnet", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "Subnet", + namespace, name) + } + if obj.Status.SubnetID == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "Subnet", + namespace, name, + "Status.SubnetID") + } + return nil } diff --git a/pkg/resource/cache_subnet_group/resource.go b/pkg/resource/cache_subnet_group/resource.go index 96d45184..83ccf562 100644 --- a/pkg/resource/cache_subnet_group/resource.go +++ b/pkg/resource/cache_subnet_group/resource.go @@ -16,6 +16,8 @@ package cache_subnet_group import ( + "fmt" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -45,7 +47,7 @@ func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { } // IsBeingDeleted returns true if the Kubernetes resource has a non-zero -// deletion timestemp +// deletion timestamp func (r *resource) IsBeingDeleted() bool { return !r.ko.DeletionTimestamp.IsZero() } @@ -93,6 +95,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f0, ok := fields["cacheSubnetGroupName"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: cacheSubnetGroupName")) + } + r.ko.Spec.CacheSubnetGroupName = &f0 + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/cache_subnet_group/sdk.go b/pkg/resource/cache_subnet_group/sdk.go index 3c9f6b05..74690536 100644 --- a/pkg/resource/cache_subnet_group/sdk.go +++ b/pkg/resource/cache_subnet_group/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.CacheSubnetGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeCacheSubnetGroupsOutput - resp, err = rm.sdkapi.DescribeCacheSubnetGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeCacheSubnetGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheSubnetGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheSubnetGroupNotFoundFault" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheSubnetGroupNotFoundFault" { return nil, ackerr.NotFound } return nil, err @@ -150,6 +153,15 @@ func (rm *resourceManager) sdkFind( if err != nil { return nil, err } + + subnets := make([]*string, 0, len(ko.Status.Subnets)) + for _, subnetIdIter := range ko.Status.Subnets { + if subnetIdIter.SubnetIdentifier != nil { + subnets = append(subnets, subnetIdIter.SubnetIdentifier) + } + } + ko.Spec.SubnetIDs = subnets + return &resource{ko}, nil } @@ -171,7 +183,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeCacheSubnetGroupsInput{} if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } return res, nil @@ -196,7 +208,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateCacheSubnetGroupOutput _ = resp - resp, err = rm.sdkapi.CreateCacheSubnetGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateCacheSubnetGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateCacheSubnetGroup", err) if err != nil { return nil, err @@ -268,33 +280,27 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateCacheSubnetGroupInput{} if r.ko.Spec.CacheSubnetGroupDescription != nil { - res.SetCacheSubnetGroupDescription(*r.ko.Spec.CacheSubnetGroupDescription) + res.CacheSubnetGroupDescription = r.ko.Spec.CacheSubnetGroupDescription } if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } if r.ko.Spec.SubnetIDs != nil { - f2 := []*string{} - for _, f2iter := range r.ko.Spec.SubnetIDs { - var f2elem string - f2elem = *f2iter - f2 = append(f2, &f2elem) - } - res.SetSubnetIds(f2) + res.SubnetIds = aws.ToStringSlice(r.ko.Spec.SubnetIDs) } if r.ko.Spec.Tags != nil { - f3 := []*svcsdk.Tag{} + f3 := []svcsdktypes.Tag{} for _, f3iter := range r.ko.Spec.Tags { - f3elem := &svcsdk.Tag{} + f3elem := &svcsdktypes.Tag{} if f3iter.Key != nil { - f3elem.SetKey(*f3iter.Key) + f3elem.Key = f3iter.Key } if f3iter.Value != nil { - f3elem.SetValue(*f3iter.Value) + f3elem.Value = f3iter.Value } - f3 = append(f3, f3elem) + f3 = append(f3, *f3elem) } - res.SetTags(f3) + res.Tags = f3 } return res, nil @@ -313,14 +319,14 @@ func (rm *resourceManager) sdkUpdate( defer func() { exit(err) }() - input, err := rm.newUpdateRequestPayload(ctx, desired) + input, err := rm.newUpdateRequestPayload(ctx, desired, delta) if err != nil { return nil, err } var resp *svcsdk.ModifyCacheSubnetGroupOutput _ = resp - resp, err = rm.sdkapi.ModifyCacheSubnetGroupWithContext(ctx, input) + resp, err = rm.sdkapi.ModifyCacheSubnetGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyCacheSubnetGroup", err) if err != nil { return nil, err @@ -388,23 +394,18 @@ func (rm *resourceManager) sdkUpdate( func (rm *resourceManager) newUpdateRequestPayload( ctx context.Context, r *resource, + delta *ackcompare.Delta, ) (*svcsdk.ModifyCacheSubnetGroupInput, error) { res := &svcsdk.ModifyCacheSubnetGroupInput{} if r.ko.Spec.CacheSubnetGroupDescription != nil { - res.SetCacheSubnetGroupDescription(*r.ko.Spec.CacheSubnetGroupDescription) + res.CacheSubnetGroupDescription = r.ko.Spec.CacheSubnetGroupDescription } if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } if r.ko.Spec.SubnetIDs != nil { - f2 := []*string{} - for _, f2iter := range r.ko.Spec.SubnetIDs { - var f2elem string - f2elem = *f2iter - f2 = append(f2, &f2elem) - } - res.SetSubnetIds(f2) + res.SubnetIds = aws.ToStringSlice(r.ko.Spec.SubnetIDs) } return res, nil @@ -426,7 +427,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteCacheSubnetGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteCacheSubnetGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteCacheSubnetGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteCacheSubnetGroup", err) return nil, err } @@ -439,7 +440,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteCacheSubnetGroupInput{} if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } return res, nil @@ -547,11 +548,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "CacheSubnetGroupQuotaExceeded", "CacheSubnetQuotaExceededFault", "SubnetInUse", diff --git a/pkg/resource/cache_subnet_group/tags.go b/pkg/resource/cache_subnet_group/tags.go index f722c7c7..0f2e959c 100644 --- a/pkg/resource/cache_subnet_group/tags.go +++ b/pkg/resource/cache_subnet_group/tags.go @@ -16,48 +16,104 @@ package cache_subnet_group import ( + "slices" + "strings" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) var ( - _ = svcapitypes.CacheSubnetGroup{} - _ = acktags.NewTags() + _ = svcapitypes.CacheSubnetGroup{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} ) -// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. // This method helps in creating the hub(acktags.Tags) for merging -// default controller tags with existing resource tags. -func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { result := acktags.NewTags() - if tags == nil || len(tags) == 0 { - return result - } + keyOrder := []string{} + if len(tags) == 0 { + return result, keyOrder + } for _, t := range tags { if t.Key != nil { - if t.Value == nil { - result[*t.Key] = "" - } else { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" } } } - return result + return result, keyOrder } -// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. // This method helps in setting the tags back inside AWSResource after merging -// default controller tags with existing resource tags. -func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } for k, v := range tags { - kCopy := k - vCopy := v - tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + tag := svcapitypes.Tag{Key: &k, Value: &v} result = append(result, &tag) } + return result } + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/replication_group/annotations.go b/pkg/resource/replication_group/annotations.go deleted file mode 100644 index 8fb03abf..00000000 --- a/pkg/resource/replication_group/annotations.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" -) - -const ( - // AnnotationLastRequestedLDCs is an annotation whose value is the marshaled list of pointers to - // LogDeliveryConfigurationRequest structs passed in as input to either the create or modify API called most - // recently - AnnotationLastRequestedLDCs = svcapitypes.AnnotationPrefix + "last-requested-log-delivery-configurations" - // AnnotationLastRequestedCNT is an annotation whose value is passed in as input to either the create or modify API - // called most recently - AnnotationLastRequestedCNT = svcapitypes.AnnotationPrefix + "last-requested-cache-node-type" - // AnnotationLastRequestedNNG is an annotation whose value is passed in as input to either the create or modify API - // called most recently - AnnotationLastRequestedNNG = svcapitypes.AnnotationPrefix + "last-requested-num-node-groups" - // AnnotationLastRequestedNGC is an annotation whose value is the marshaled list of pointers to - // NodeGroupConfiguration structs passed in as input to either the create or modify API called most - // recently - AnnotationLastRequestedNGC = svcapitypes.AnnotationPrefix + "last-requested-node-group-configuration" -) diff --git a/pkg/resource/replication_group/custom_set_output.go b/pkg/resource/replication_group/custom_set_output.go deleted file mode 100644 index 9aa8b320..00000000 --- a/pkg/resource/replication_group/custom_set_output.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - "encoding/json" - "strconv" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // The number of minutes worth of events to retrieve. - // 14 days in minutes - eventsDuration = 20160 -) - -func (rm *resourceManager) CustomDescribeReplicationGroupsSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.DescribeReplicationGroupsOutput, - ko *svcapitypes.ReplicationGroup, -) (*svcapitypes.ReplicationGroup, error) { - if len(resp.ReplicationGroups) == 0 { - return ko, nil - } - elem := resp.ReplicationGroups[0] - rm.customSetOutput(elem, ko) - err := rm.customSetOutputSupplementAPIs(ctx, r, elem, ko) - if err != nil { - return nil, err - } - return ko, nil -} - -func (rm *resourceManager) CustomCreateReplicationGroupSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.CreateReplicationGroupOutput, - ko *svcapitypes.ReplicationGroup, -) (*svcapitypes.ReplicationGroup, error) { - rm.customSetOutput(resp.ReplicationGroup, ko) - rm.setAnnotationsFields(r, ko) - rm.setLastRequestedNodeGroupConfiguration(r, ko) - rm.setLastRequestedNumNodeGroups(r, ko) - return ko, nil -} - -func (rm *resourceManager) CustomModifyReplicationGroupSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.ModifyReplicationGroupOutput, - ko *svcapitypes.ReplicationGroup, -) (*svcapitypes.ReplicationGroup, error) { - rm.customSetOutput(resp.ReplicationGroup, ko) - - // reset latest.spec.LDC to original value in desired to prevent stale data - // from the modify API being merged back into desired upon spec patching - var logDeliveryConfig []*svcapitypes.LogDeliveryConfigurationRequest - for _, ldc := range r.ko.Spec.LogDeliveryConfigurations { - logDeliveryConfig = append(logDeliveryConfig, ldc.DeepCopy()) - } - ko.Spec.LogDeliveryConfigurations = logDeliveryConfig - - // Keep the value of desired for CacheNodeType. - ko.Spec.CacheNodeType = r.ko.Spec.CacheNodeType - - rm.setAnnotationsFields(r, ko) - return ko, nil -} - -func (rm *resourceManager) customSetOutput( - respRG *elasticache.ReplicationGroup, - ko *svcapitypes.ReplicationGroup, -) { - if ko.Status.Conditions == nil { - ko.Status.Conditions = []*ackv1alpha1.Condition{} - } - - allNodeGroupsAvailable := true - nodeGroupMembersCount := 0 - memberClustersCount := 0 - if respRG.NodeGroups != nil { - for _, nodeGroup := range respRG.NodeGroups { - if nodeGroup.Status == nil || *nodeGroup.Status != "available" { - allNodeGroupsAvailable = false - break - } - } - for _, nodeGroup := range respRG.NodeGroups { - if nodeGroup.NodeGroupMembers == nil { - continue - } - nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) - } - } - if respRG.MemberClusters != nil { - memberClustersCount = len(respRG.MemberClusters) - } - - rgStatus := respRG.Status - syncConditionStatus := corev1.ConditionUnknown - if rgStatus != nil { - if (*rgStatus == "available" && allNodeGroupsAvailable && memberClustersCount == nodeGroupMembersCount) || - *rgStatus == "create-failed" { - syncConditionStatus = corev1.ConditionTrue - } else { - // resource in "creating", "modifying" , "deleting", "snapshotting" - // states is being modified at server end - // thus current status is considered out of sync. - syncConditionStatus = corev1.ConditionFalse - } - } - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncConditionStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncConditionStatus - } - - if rgStatus != nil && (*rgStatus == "available" || *rgStatus == "snapshotting") { - input, err := rm.newListAllowedNodeTypeModificationsPayLoad(respRG) - - if err == nil { - resp, apiErr := rm.sdkapi.ListAllowedNodeTypeModifications(input) - rm.metrics.RecordAPICall("READ_MANY", "ListAllowedNodeTypeModifications", apiErr) - // Overwrite the values for ScaleUp and ScaleDown - if apiErr == nil { - ko.Status.AllowedScaleDownModifications = resp.ScaleDownModifications - ko.Status.AllowedScaleUpModifications = resp.ScaleUpModifications - } - } - } else { - ko.Status.AllowedScaleDownModifications = nil - ko.Status.AllowedScaleUpModifications = nil - } - - // populate status logDeliveryConfigurations struct - if respRG.LogDeliveryConfigurations != nil { - var f11 []*svcapitypes.LogDeliveryConfiguration - for _, f11iter := range respRG.LogDeliveryConfigurations { - f11elem := &svcapitypes.LogDeliveryConfiguration{} - if f11iter.DestinationDetails != nil { - f11elemf0 := &svcapitypes.DestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { - f11elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup - } - f11elemf0.CloudWatchLogsDetails = f11elemf0f0 - } - if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { - f11elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream - } - f11elemf0.KinesisFirehoseDetails = f11elemf0f1 - } - f11elem.DestinationDetails = f11elemf0 - } - if f11iter.DestinationType != nil { - f11elem.DestinationType = f11iter.DestinationType - } - if f11iter.LogFormat != nil { - f11elem.LogFormat = f11iter.LogFormat - } - if f11iter.LogType != nil { - f11elem.LogType = f11iter.LogType - } - if f11iter.Status != nil { - f11elem.Status = f11iter.Status - } - if f11iter.Message != nil { - f11elem.Message = f11iter.Message - } - f11 = append(f11, f11elem) - } - ko.Status.LogDeliveryConfigurations = f11 - } else { - ko.Status.LogDeliveryConfigurations = nil - } -} - -// newListAllowedNodeTypeModificationsPayLoad returns an SDK-specific struct for the HTTP request -// payload of the ListAllowedNodeTypeModifications API call. -func (rm *resourceManager) newListAllowedNodeTypeModificationsPayLoad(respRG *elasticache.ReplicationGroup) ( - *svcsdk.ListAllowedNodeTypeModificationsInput, error) { - res := &svcsdk.ListAllowedNodeTypeModificationsInput{} - - if respRG.ReplicationGroupId != nil { - res.SetReplicationGroupId(*respRG.ReplicationGroupId) - } - - return res, nil -} - -func (rm *resourceManager) customSetOutputSupplementAPIs( - ctx context.Context, - r *resource, - respRG *elasticache.ReplicationGroup, - ko *svcapitypes.ReplicationGroup, -) error { - events, err := rm.provideEvents(ctx, r.ko.Spec.ReplicationGroupID, 20) - if err != nil { - return err - } - ko.Status.Events = events - return nil -} - -func (rm *resourceManager) provideEvents( - ctx context.Context, - replicationGroupId *string, - maxRecords int64, -) ([]*svcapitypes.Event, error) { - input := &elasticache.DescribeEventsInput{} - input.SetSourceType("replication-group") - input.SetSourceIdentifier(*replicationGroupId) - input.SetMaxRecords(maxRecords) - input.SetDuration(eventsDuration) - resp, err := rm.sdkapi.DescribeEventsWithContext(ctx, input) - rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-ReplicationGroup", err) - if err != nil { - rm.log.V(1).Info("Error during DescribeEvents-ReplicationGroup", "error", err) - return nil, err - } - events := []*svcapitypes.Event{} - if resp.Events != nil { - for _, respEvent := range resp.Events { - event := &svcapitypes.Event{} - if respEvent.Message != nil { - event.Message = respEvent.Message - } - if respEvent.Date != nil { - eventDate := metav1.NewTime(*respEvent.Date) - event.Date = &eventDate - } - // Not copying redundant source id (replication id) - // and source type (replication group) - // into each event object - events = append(events, event) - } - } - return events, nil -} - -// setAnnotationsFields copies the desired object's annotations, populates any -// relevant fields, and sets the latest object's annotations to this newly populated map. -// Fields that are handled by custom modify implementation are not set here. -// This should only be called upon a successful create or modify call. -func (rm *resourceManager) setAnnotationsFields( - r *resource, - ko *svcapitypes.ReplicationGroup, -) { - annotations := getAnnotationsFields(r, ko) - - rm.setLastRequestedLogDeliveryConfigurations(r, annotations) - rm.setLastRequestedCacheNodeType(r, annotations) - ko.ObjectMeta.Annotations = annotations -} - -// getAnnotationsFields return the annotations map that would be used to set the fields -func getAnnotationsFields( - r *resource, - ko *svcapitypes.ReplicationGroup) map[string]string { - - if ko.ObjectMeta.Annotations != nil { - return ko.ObjectMeta.Annotations - } - - desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() - annotations := make(map[string]string) - for k, v := range desiredAnnotations { - annotations[k] = v - } - - ko.ObjectMeta.Annotations = annotations - return annotations -} - -// setLastRequestedLogDeliveryConfigurations copies desired.Spec.LogDeliveryConfigurations -// into the annotations of the object. -// r is the desired resource, and annotations is the annotations map modified by this method -func (rm *resourceManager) setLastRequestedLogDeliveryConfigurations( - r *resource, - annotations map[string]string, -) { - lastRequestedConfigs, err := json.Marshal(r.ko.Spec.LogDeliveryConfigurations) - if err != nil { - annotations[AnnotationLastRequestedLDCs] = "null" - } else { - annotations[AnnotationLastRequestedLDCs] = string(lastRequestedConfigs) - } -} - -// setLastRequestedCacheNodeType copies desired.Spec.CacheNodeType into the annotation -// of the object. -func (rm *resourceManager) setLastRequestedCacheNodeType( - r *resource, - annotations map[string]string, -) { - if r.ko.Spec.CacheNodeType != nil { - annotations[AnnotationLastRequestedCNT] = *r.ko.Spec.CacheNodeType - } -} - -// setLastRequestedNodeGroupConfiguration copies desired.spec.NodeGroupConfiguration into the -// annotation of the object -func (rm *resourceManager) setLastRequestedNodeGroupConfiguration( - r *resource, - ko *svcapitypes.ReplicationGroup, -) { - annotations := getAnnotationsFields(r, ko) - lastRequestedConfigs, err := json.Marshal(r.ko.Spec.NodeGroupConfiguration) - if err != nil { - annotations[AnnotationLastRequestedNGC] = "null" - } else { - annotations[AnnotationLastRequestedNGC] = string(lastRequestedConfigs) - } -} - -// setLastRequestedNumNodeGroups copies desired.spec.NumNodeGroups into the -// annotation of the object -func (rm *resourceManager) setLastRequestedNumNodeGroups( - r *resource, - ko *svcapitypes.ReplicationGroup, -) { - annotations := getAnnotationsFields(r, ko) - if r.ko.Spec.NumNodeGroups != nil { - annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(*r.ko.Spec.NumNodeGroups)) - } else { - annotations[AnnotationLastRequestedNNG] = "null" - } -} diff --git a/pkg/resource/replication_group/custom_update_api.go b/pkg/resource/replication_group/custom_update_api.go deleted file mode 100644 index e4c92896..00000000 --- a/pkg/resource/replication_group/custom_update_api.go +++ /dev/null @@ -1,771 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - "encoding/json" - "fmt" - "github.com/aws-controllers-k8s/runtime/pkg/requeue" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/pkg/errors" - "reflect" - "sort" - "strconv" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// Implements specialized logic for replication group updates. -func (rm *resourceManager) CustomModifyReplicationGroup( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - - latestRGStatus := latest.ko.Status.Status - - allNodeGroupsAvailable := true - nodeGroupMembersCount := 0 - if latest.ko.Status.NodeGroups != nil { - for _, nodeGroup := range latest.ko.Status.NodeGroups { - if nodeGroup.Status == nil || *nodeGroup.Status != "available" { - allNodeGroupsAvailable = false - break - } - } - for _, nodeGroup := range latest.ko.Status.NodeGroups { - if nodeGroup.NodeGroupMembers == nil { - continue - } - nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) - } - } - - if latestRGStatus == nil || *latestRGStatus != "available" || !allNodeGroupsAvailable { - return nil, requeue.NeededAfter( - errors.New("Replication Group can not be modified, it is not in 'available' state."), - requeue.DefaultRequeueAfterDuration) - } - - memberClustersCount := 0 - if latest.ko.Status.MemberClusters != nil { - memberClustersCount = len(latest.ko.Status.MemberClusters) - } - if memberClustersCount != nodeGroupMembersCount { - return nil, requeue.NeededAfter( - errors.New("Replication Group can not be modified, "+ - "need to wait for member clusters and node group members."), - requeue.DefaultRequeueAfterDuration) - } - - // Handle the asynchronous rollback case for while Scaling down. - // This means that we have already attempted to apply the CacheNodeType once and - // were not successful hence we will set a terminal condition. - if !cacheNodeTypeRequiresUpdate(desired) && delta.DifferentAt("Spec.CacheNodeType") { - return nil, awserr.New("InvalidParameterCombination", "Cannot update CacheNodeType, "+ - "Please refer to Events for more details", nil) - - } - - // Handle the asynchronous rollback for Resharding. - if !nodeGroupRequiresUpdate(desired) && rm.shardConfigurationsDiffer(desired, latest) { - - return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroups, "+ - "Please refer to Events for more details", nil) - } - - // Handle NodeGroupConfiguration asynchronous rollback situations other than Resharding. - if !nodeGroupRequiresUpdate(desired) && (rm.replicaCountDifference(desired, latest) != 0 && !delta.DifferentAt("Spec.ReplicasPerNodeGroup")) { - return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroupConfiguration, "+ - "Please refer to Events for more details", nil) - } - - // Order of operations when diffs map to multiple updates APIs: - // 1. When automaticFailoverEnabled differs: - // if automaticFailoverEnabled == false; do nothing in this custom logic, let the modify execute first. - // else if automaticFailoverEnabled == true then following logic should execute first. - // 2. When multiAZ differs - // if multiAZ = true then below is fine. - // else if multiAZ = false ; do nothing in custom logic, let the modify execute. - // 3. updateReplicaCount() is invoked Before updateShardConfiguration() - // because both accept availability zones, however the number of - // values depend on replica count. - if desired.ko.Spec.AutomaticFailoverEnabled != nil && *desired.ko.Spec.AutomaticFailoverEnabled == false { - latestAutomaticFailoverEnabled := latest.ko.Status.AutomaticFailover != nil && *latest.ko.Status.AutomaticFailover == "enabled" - if latestAutomaticFailoverEnabled != *desired.ko.Spec.AutomaticFailoverEnabled { - return rm.modifyReplicationGroup(ctx, desired, latest, delta) - } - } - if desired.ko.Spec.MultiAZEnabled != nil && *desired.ko.Spec.MultiAZEnabled == false { - latestMultiAZEnabled := latest.ko.Status.MultiAZ != nil && *latest.ko.Status.MultiAZ == "enabled" - if latestMultiAZEnabled != *desired.ko.Spec.MultiAZEnabled { - return rm.modifyReplicationGroup(ctx, desired, latest, delta) - } - } - - // increase/decrease replica count - if diff := rm.replicaCountDifference(desired, latest); diff != 0 { - if diff > 0 { - return rm.increaseReplicaCount(ctx, desired, latest) - } - return rm.decreaseReplicaCount(ctx, desired, latest) - } - - // If there is a scale up modification, then we would prioritize it - // over increase/decrease shards. This is important since performing - // scale in without scale up might fail due to insufficient memory. - if delta.DifferentAt("Spec.CacheNodeType") && desired.ko.Status.AllowedScaleUpModifications != nil { - if desired.ko.Spec.CacheNodeType != nil { - for _, scaleUpInstance := range desired.ko.Status.AllowedScaleUpModifications { - if *scaleUpInstance == *desired.ko.Spec.CacheNodeType { - return nil, nil - } - } - } - } - - // increase/decrease shards - if rm.shardConfigurationsDiffer(desired, latest) { - return rm.updateShardConfiguration(ctx, desired, latest) - } - - return rm.modifyReplicationGroup(ctx, desired, latest, delta) -} - -// modifyReplicationGroup updates replication group -// it handles properties that put replication group in -// modifying state if these are supplied to modify API -// irrespective of apply immediately. -func (rm *resourceManager) modifyReplicationGroup( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - // Method currently handles SecurityGroupIDs, EngineVersion - // Avoid making unnecessary DescribeCacheCluster API call if both fields are nil in spec. - if desired.ko.Spec.SecurityGroupIDs == nil && desired.ko.Spec.EngineVersion == nil { - // no updates done - return nil, nil - } - - // Get details using describe cache cluster to compute diff - latestCacheCluster, err := rm.describeCacheCluster(ctx, latest) - if err != nil { - return nil, err - } - - // SecurityGroupIds, EngineVersion - if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) || - delta.DifferentAt("Spec.EngineVersion") { - input := rm.newModifyReplicationGroupRequestPayload(desired, latest, latestCacheCluster, delta) - resp, respErr := rm.sdkapi.ModifyReplicationGroupWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroup", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during ModifyReplicationGroup", "error", respErr) - return nil, respErr - } - - return rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) - } - - // no updates done - return nil, nil -} - -// replicaConfigurationsDifference returns -// positive number if desired replica count is greater than latest replica count -// negative number if desired replica count is less than latest replica count -// 0 otherwise -func (rm *resourceManager) replicaCountDifference( - desired *resource, - latest *resource, -) int { - desiredSpec := desired.ko.Spec - - // There are two ways of setting replica counts for NodeGroups in Elasticache ReplicationGroup. - // - The first way is to have the same replica count for all node groups. - // In this case, the Spec.ReplicasPerNodeGroup field is set to a non-nil-value integer pointer. - // - The second way is to set different replica counts per node group. - // In this case, the Spec.NodeGroupConfiguration field is set to a non-nil NodeGroupConfiguration slice - // of NodeGroupConfiguration structs that each have a ReplicaCount non-nil-value integer pointer field - // that contains the number of replicas for that particular node group. - if desiredSpec.ReplicasPerNodeGroup != nil { - return int(*desiredSpec.ReplicasPerNodeGroup - *latest.ko.Spec.ReplicasPerNodeGroup) - } else if desiredSpec.NodeGroupConfiguration != nil { - return rm.diffReplicasNodeGroupConfiguration(desired, latest) - } - return 0 -} - -// diffReplicasNodeGroupConfiguration takes desired Spec.NodeGroupConfiguration slice field into account to return -// positive number if desired replica count is greater than latest replica count -// negative number if desired replica count is less than latest replica count -// 0 otherwise -func (rm *resourceManager) diffReplicasNodeGroupConfiguration( - desired *resource, - latest *resource, -) int { - desiredSpec := desired.ko.Spec - latestStatus := latest.ko.Status - // each shard could have different value for replica count - latestReplicaCounts := map[string]int{} - for _, latestShard := range latestStatus.NodeGroups { - if latestShard.NodeGroupID == nil { - continue - } - latestReplicaCount := 0 - if latestShard.NodeGroupMembers != nil { - if len(latestShard.NodeGroupMembers) > 0 { - latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 - } - } - latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount - } - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - if desiredShard.NodeGroupID == nil || desiredShard.ReplicaCount == nil { - // no specs to compare for this shard - continue - } - latestShardReplicaCount, found := latestReplicaCounts[*desiredShard.NodeGroupID] - if !found { - // shard not present in status - continue - } - if desiredShardReplicaCount := int(*desiredShard.ReplicaCount); desiredShardReplicaCount != latestShardReplicaCount { - rm.log.V(1).Info( - "ReplicaCount differs", - "NodeGroup", *desiredShard.NodeGroupID, - "desired", int(*desiredShard.ReplicaCount), - "latest", latestShardReplicaCount, - ) - return desiredShardReplicaCount - latestShardReplicaCount - } - } - return 0 -} - -// shardConfigurationsDiffer returns true if shard -// configuration differs between desired, latest resource. -func (rm *resourceManager) shardConfigurationsDiffer( - desired *resource, - latest *resource, -) bool { - desiredSpec := desired.ko.Spec - latestStatus := latest.ko.Status - - // desired shards - var desiredShardsCount *int64 = desiredSpec.NumNodeGroups - if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { - numShards := int64(len(desiredSpec.NodeGroupConfiguration)) - desiredShardsCount = &numShards - } - if desiredShardsCount == nil { - // no shards config in desired specs - return false - } - - // latest shards - var latestShardsCount *int64 = nil - if latestStatus.NodeGroups != nil { - numShards := int64(len(latestStatus.NodeGroups)) - latestShardsCount = &numShards - } - - return latestShardsCount == nil || *desiredShardsCount != *latestShardsCount -} - -func (rm *resourceManager) increaseReplicaCount( - ctx context.Context, - desired *resource, - latest *resource, -) (*resource, error) { - input, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) - if err != nil { - return nil, err - } - resp, respErr := rm.sdkapi.IncreaseReplicaCountWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "IncreaseReplicaCount", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during IncreaseReplicaCount", "error", respErr) - return nil, respErr - } - return rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) -} - -func (rm *resourceManager) decreaseReplicaCount( - ctx context.Context, - desired *resource, - latest *resource, -) (*resource, error) { - input, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) - if err != nil { - return nil, err - } - resp, respErr := rm.sdkapi.DecreaseReplicaCountWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "DecreaseReplicaCount", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during DecreaseReplicaCount", "error", respErr) - return nil, respErr - } - return rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) -} - -func (rm *resourceManager) updateShardConfiguration( - ctx context.Context, - desired *resource, - latest *resource, -) (*resource, error) { - input, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - if err != nil { - return nil, err - } - resp, respErr := rm.sdkapi.ModifyReplicationGroupShardConfigurationWithContext(ctx, input) - rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroupShardConfiguration", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during ModifyReplicationGroupShardConfiguration", "error", respErr) - return nil, respErr - } - - r, err := rm.setReplicationGroupOutput(desired, resp.ReplicationGroup) - - if err != nil { - return r, err - } - - ko := r.ko.DeepCopy() - // Update the annotations since API call was successful - rm.setLastRequestedNodeGroupConfiguration(desired, ko) - rm.setLastRequestedNumNodeGroups(desired, ko) - return &resource{ko}, nil -} - -// newIncreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request -// payload of the Create API call for the resource -func (rm *resourceManager) newIncreaseReplicaCountRequestPayload( - desired *resource, - latest *resource, -) (*svcsdk.IncreaseReplicaCountInput, error) { - res := &svcsdk.IncreaseReplicaCountInput{} - desiredSpec := desired.ko.Spec - - res.SetApplyImmediately(true) - if desiredSpec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*desiredSpec.ReplicationGroupID) - } - if desiredSpec.ReplicasPerNodeGroup != nil { - res.SetNewReplicaCount(*desiredSpec.ReplicasPerNodeGroup) - } - - latestStatus := latest.ko.Status - // each shard could have different value for replica count - latestReplicaCounts := map[string]int{} - for _, latestShard := range latestStatus.NodeGroups { - if latestShard.NodeGroupID == nil { - continue - } - latestReplicaCount := 0 - if latestShard.NodeGroupMembers != nil { - if len(latestShard.NodeGroupMembers) > 0 { - latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 - } - } - latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount - } - - if desiredSpec.NodeGroupConfiguration != nil { - shardsConfig := []*svcsdk.ConfigureShard{} - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - if desiredShard.NodeGroupID == nil { - continue - } - _, found := latestReplicaCounts[*desiredShard.NodeGroupID] - if !found { - continue - } - // shard has an Id and it is present on server. - shardConfig := &svcsdk.ConfigureShard{} - shardConfig.SetNodeGroupId(*desiredShard.NodeGroupID) - if desiredShard.ReplicaCount != nil { - shardConfig.SetNewReplicaCount(*desiredShard.ReplicaCount) - } - shardAZs := []*string{} - if desiredShard.PrimaryAvailabilityZone != nil { - shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) - } - if desiredShard.ReplicaAvailabilityZones != nil { - for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { - shardAZs = append(shardAZs, desiredAZ) - } - } - if len(shardAZs) > 0 { - shardConfig.SetPreferredAvailabilityZones(shardAZs) - } - shardsConfig = append(shardsConfig, shardConfig) - } - res.SetReplicaConfiguration(shardsConfig) - } - - return res, nil -} - -// newDecreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request -// payload of the Create API call for the resource -func (rm *resourceManager) newDecreaseReplicaCountRequestPayload( - desired *resource, - latest *resource, -) (*svcsdk.DecreaseReplicaCountInput, error) { - res := &svcsdk.DecreaseReplicaCountInput{} - desiredSpec := desired.ko.Spec - - res.SetApplyImmediately(true) - if desiredSpec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*desiredSpec.ReplicationGroupID) - } - if desiredSpec.ReplicasPerNodeGroup != nil { - res.SetNewReplicaCount(*desiredSpec.ReplicasPerNodeGroup) - } - - latestStatus := latest.ko.Status - // each shard could have different value for replica count - latestReplicaCounts := map[string]int{} - for _, latestShard := range latestStatus.NodeGroups { - if latestShard.NodeGroupID == nil { - continue - } - latestReplicaCount := 0 - if latestShard.NodeGroupMembers != nil { - if len(latestShard.NodeGroupMembers) > 0 { - latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 - } - } - latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount - } - - if desiredSpec.NodeGroupConfiguration != nil { - shardsConfig := []*svcsdk.ConfigureShard{} - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - if desiredShard.NodeGroupID == nil { - continue - } - _, found := latestReplicaCounts[*desiredShard.NodeGroupID] - if !found { - continue - } - // shard has an Id and it is present on server. - shardConfig := &svcsdk.ConfigureShard{} - shardConfig.SetNodeGroupId(*desiredShard.NodeGroupID) - if desiredShard.ReplicaCount != nil { - shardConfig.SetNewReplicaCount(*desiredShard.ReplicaCount) - } - shardAZs := []*string{} - if desiredShard.PrimaryAvailabilityZone != nil { - shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) - } - if desiredShard.ReplicaAvailabilityZones != nil { - for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { - shardAZs = append(shardAZs, desiredAZ) - } - } - if len(shardAZs) > 0 { - shardConfig.SetPreferredAvailabilityZones(shardAZs) - } - shardsConfig = append(shardsConfig, shardConfig) - } - res.SetReplicaConfiguration(shardsConfig) - } - - return res, nil -} - -// newUpdateShardConfigurationRequestPayload returns an SDK-specific struct for the HTTP request -// payload of the Update API call for the resource -func (rm *resourceManager) newUpdateShardConfigurationRequestPayload( - desired *resource, - latest *resource, -) (*svcsdk.ModifyReplicationGroupShardConfigurationInput, error) { - res := &svcsdk.ModifyReplicationGroupShardConfigurationInput{} - - desiredSpec := desired.ko.Spec - latestStatus := latest.ko.Status - - // Mandatory arguments - // - ApplyImmediately - // - ReplicationGroupId - // - NodeGroupCount - res.SetApplyImmediately(true) - if desiredSpec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*desiredSpec.ReplicationGroupID) - } - var desiredShardsCount *int64 = desiredSpec.NumNodeGroups - if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { - numShards := int64(len(desiredSpec.NodeGroupConfiguration)) - desiredShardsCount = &numShards - } - if desiredShardsCount != nil { - res.SetNodeGroupCount(*desiredShardsCount) - } - - // If desired nodegroup count (number of shards): - // - increases, then (optional) provide ReshardingConfiguration - // - decreases, then (mandatory) provide - // either NodeGroupsToRemove - // or NodeGroupsToRetain - var latestShardsCount *int64 = nil - if latestStatus.NodeGroups != nil { - numShards := int64(len(latestStatus.NodeGroups)) - latestShardsCount = &numShards - } - - increase := (desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount > *latestShardsCount) || - (desiredShardsCount != nil && latestShardsCount == nil) - decrease := desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount < *latestShardsCount - // Additional arguments - shardsConfig := []*svcsdk.ReshardingConfiguration{} - shardsToRetain := []*string{} - if desiredSpec.NodeGroupConfiguration != nil { - for _, desiredShard := range desiredSpec.NodeGroupConfiguration { - shardConfig := &svcsdk.ReshardingConfiguration{} - if desiredShard.NodeGroupID != nil { - shardConfig.SetNodeGroupId(*desiredShard.NodeGroupID) - shardsToRetain = append(shardsToRetain, desiredShard.NodeGroupID) - } - shardAZs := []*string{} - if desiredShard.PrimaryAvailabilityZone != nil { - shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) - } - if desiredShard.ReplicaAvailabilityZones != nil { - for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { - shardAZs = append(shardAZs, desiredAZ) - } - shardConfig.SetPreferredAvailabilityZones(shardAZs) - } - shardsConfig = append(shardsConfig, shardConfig) - } - } else if decrease { - for i := 0; i < int(*desiredShardsCount); i++ { - shardsToRetain = append(shardsToRetain, desired.ko.Status.NodeGroups[i].NodeGroupID) - } - } - - if increase { - if len(shardsConfig) > 0 { - res.SetReshardingConfiguration(shardsConfig) - } - } else if decrease { - if len(shardsToRetain) == 0 { - return nil, awserr.New("InvalidParameterValue", "At least one node group should be present.", nil) - } - res.SetNodeGroupsToRetain(shardsToRetain) - } - - return res, nil -} - -// getAnyCacheClusterIDFromNodeGroups returns a cache cluster ID from supplied node groups. -// Any cache cluster Id which is not nil is returned. -func (rm *resourceManager) getAnyCacheClusterIDFromNodeGroups( - nodeGroups []*svcapitypes.NodeGroup, -) *string { - if nodeGroups == nil { - return nil - } - - var cacheClusterId *string = nil - for _, nodeGroup := range nodeGroups { - if nodeGroup.NodeGroupMembers == nil { - continue - } - for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { - if nodeGroupMember.CacheClusterID == nil { - continue - } - cacheClusterId = nodeGroupMember.CacheClusterID - break - } - if cacheClusterId != nil { - break - } - } - return cacheClusterId -} - -// describeCacheCluster provides CacheCluster object -// per the supplied latest Replication Group Id -// it invokes DescribeCacheClusters API to do so -func (rm *resourceManager) describeCacheCluster( - ctx context.Context, - resource *resource, -) (*svcsdk.CacheCluster, error) { - input := &svcsdk.DescribeCacheClustersInput{} - - ko := resource.ko - latestStatus := ko.Status - if latestStatus.NodeGroups == nil { - return nil, nil - } - cacheClusterId := rm.getAnyCacheClusterIDFromNodeGroups(latestStatus.NodeGroups) - if cacheClusterId == nil { - return nil, nil - } - - input.SetCacheClusterId(*cacheClusterId) - resp, respErr := rm.sdkapi.DescribeCacheClustersWithContext(ctx, input) - rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheClusters", respErr) - if respErr != nil { - rm.log.V(1).Info("Error during DescribeCacheClusters", "error", respErr) - return nil, respErr - } - if resp.CacheClusters == nil { - return nil, nil - } - - for _, cc := range resp.CacheClusters { - if cc == nil { - continue - } - return cc, nil - } - return nil, fmt.Errorf("could not find a non-nil cache cluster from API response") -} - -// securityGroupIdsDiffer return true if -// Security Group Ids differ between desired spec and latest (from cache cluster) status -func (rm *resourceManager) securityGroupIdsDiffer( - desired *resource, - latest *resource, - latestCacheCluster *svcsdk.CacheCluster, -) bool { - if desired.ko.Spec.SecurityGroupIDs == nil { - return false - } - - desiredIds := []*string{} - for _, id := range desired.ko.Spec.SecurityGroupIDs { - if id == nil { - continue - } - var value string - value = *id - desiredIds = append(desiredIds, &value) - } - sort.Slice(desiredIds, func(i, j int) bool { - return *desiredIds[i] < *desiredIds[j] - }) - - latestIds := []*string{} - if latestCacheCluster != nil && latestCacheCluster.SecurityGroups != nil { - for _, latestSG := range latestCacheCluster.SecurityGroups { - if latestSG == nil { - continue - } - var value string - value = *latestSG.SecurityGroupId - latestIds = append(latestIds, &value) - } - } - sort.Slice(latestIds, func(i, j int) bool { - return *latestIds[i] < *latestIds[j] - }) - - if len(desiredIds) != len(latestIds) { - return true // differ - } - for index, desiredId := range desiredIds { - if *desiredId != *latestIds[index] { - return true // differ - } - } - // no difference - return false -} - -// newModifyReplicationGroupRequestPayload provides request input object -func (rm *resourceManager) newModifyReplicationGroupRequestPayload( - desired *resource, - latest *resource, - latestCacheCluster *svcsdk.CacheCluster, - delta *ackcompare.Delta, -) *svcsdk.ModifyReplicationGroupInput { - input := &svcsdk.ModifyReplicationGroupInput{} - - input.SetApplyImmediately(true) - if desired.ko.Spec.ReplicationGroupID != nil { - input.SetReplicationGroupId(*desired.ko.Spec.ReplicationGroupID) - } - - if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) && - desired.ko.Spec.SecurityGroupIDs != nil { - ids := []*string{} - for _, id := range desired.ko.Spec.SecurityGroupIDs { - var value string - value = *id - ids = append(ids, &value) - } - input.SetSecurityGroupIds(ids) - } - - if delta.DifferentAt("Spec.EngineVersion") && - desired.ko.Spec.EngineVersion != nil { - input.SetEngineVersion(*desired.ko.Spec.EngineVersion) - } - - return input -} - -// cacheNodeTypeRequiresUpdate retrieves the last requested cacheNodeType saved in annotations and compares them -// to the current desired cacheNodeType -func cacheNodeTypeRequiresUpdate(desired *resource) bool { - annotations := desired.ko.ObjectMeta.GetAnnotations() - if val, ok := annotations[AnnotationLastRequestedCNT]; ok && desired.ko.Spec.CacheNodeType != nil { - return val != *desired.ko.Spec.CacheNodeType - } - - // This means there is delta and no value in annotation or in Spec - return true -} - -// nodeGroupRequiresUpdate retrieves the last applied NumNodeGroups and NodeGroupConfiguration and compares them -// to the current desired NumNodeGroups and NodeGroupConfiguration -func nodeGroupRequiresUpdate(desired *resource) bool { - annotations := desired.ko.ObjectMeta.GetAnnotations() - - if val, ok := annotations[AnnotationLastRequestedNNG]; ok && val != "null" { - numNodes, err := strconv.ParseInt(val, 10, 64) - - if err != nil { - return false - } - - if numNodes != *desired.ko.Spec.NumNodeGroups { - return true - } - - return false - } - - desiredNodeGroupConfig := desired.ko.Spec.NodeGroupConfiguration - if val, ok := annotations[AnnotationLastRequestedNGC]; ok && val != "null" { - var lastRequestedNodeGroupConfig []*svcapitypes.NodeGroupConfiguration - _ = json.Unmarshal([]byte(val), &lastRequestedNodeGroupConfig) - return !reflect.DeepEqual(desiredNodeGroupConfig, lastRequestedNodeGroupConfig) - } - - // This means there is delta and no value in annotation or in Spec - return true -} diff --git a/pkg/resource/replication_group/custom_update_api_test.go b/pkg/resource/replication_group/custom_update_api_test.go deleted file mode 100644 index 619f3f82..00000000 --- a/pkg/resource/replication_group/custom_update_api_test.go +++ /dev/null @@ -1,1058 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - "fmt" - "path/filepath" - "strconv" - "testing" - - "github.com/aws-controllers-k8s/elasticache-controller/pkg/testutil" - ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" - "github.com/aws-controllers-k8s/runtime/pkg/requeue" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/pkg/errors" - "github.com/stretchr/testify/mock" - "go.uber.org/zap/zapcore" - ctrlrtzap "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - mocksvcsdkapi "github.com/aws-controllers-k8s/elasticache-controller/mocks/aws-sdk-go/elasticache" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// Helper methods to setup tests -// provideResourceManager returns pointer to resourceManager -func provideResourceManager() *resourceManager { - return provideResourceManagerWithMockSDKAPI(&mocksvcsdkapi.ElastiCacheAPI{}) -} - -// provideResourceManagerWithMockSDKAPI accepts MockElastiCacheAPI and returns pointer to resourceManager -// the returned resourceManager is configured to use mockapi api. -func provideResourceManagerWithMockSDKAPI(mockElastiCacheAPI *mocksvcsdkapi.ElastiCacheAPI) *resourceManager { - zapOptions := ctrlrtzap.Options{ - Development: true, - Level: zapcore.InfoLevel, - } - fakeLogger := ctrlrtzap.New(ctrlrtzap.UseFlagOptions(&zapOptions)) - return &resourceManager{ - rr: nil, - awsAccountID: "", - awsRegion: "", - sess: nil, - sdkapi: mockElastiCacheAPI, - log: fakeLogger, - metrics: ackmetrics.NewMetrics("elasticache"), - } -} - -// provideResource returns pointer to resource -func provideResource() *resource { - return provideResourceWithStatus("available") -} - -// provideCacheCluster returns pointer to CacheCluster -func provideCacheCluster() *svcsdk.CacheCluster { - return &svcsdk.CacheCluster{} -} - -// provideResource returns pointer to resource -func provideResourceWithStatus(rgStatus string) *resource { - return &resource{ - ko: &svcapitypes.ReplicationGroup{ - Status: svcapitypes.ReplicationGroupStatus{ - Status: &rgStatus, - }, - }, - } -} - -// provideNodeGroups provides NodeGroups array for given node IDs -func provideNodeGroups(IDs ...string) []*svcapitypes.NodeGroup { - return provideNodeGroupsWithReplicas(3, IDs...) -} - -// provideMemberClusters returns the member cluster Ids from given node groups -func provideMemberClusters(nodeGroups []*svcapitypes.NodeGroup) []*string { - if nodeGroups == nil { - return nil - } - memberClusters := []*string{} - for _, nodeGroup := range nodeGroups { - for _, member := range nodeGroup.NodeGroupMembers { - cacheClusterId := *member.CacheClusterID - memberClusters = append(memberClusters, &cacheClusterId) - } - } - return memberClusters -} - -// provideNodeGroupsWithReplicas provides NodeGroups array for given node IDs -// each node group is populated with supplied numbers of replica nodes and a primary node. -func provideNodeGroupsWithReplicas(replicasCount int, IDs ...string) []*svcapitypes.NodeGroup { - nodeGroups := []*svcapitypes.NodeGroup{} - for _, ID := range IDs { - nodeId := ID - nodeGroups = append(nodeGroups, &svcapitypes.NodeGroup{ - NodeGroupID: &nodeId, - NodeGroupMembers: provideNodeGroupMembers(&nodeId, replicasCount+1), // primary node + replicas - PrimaryEndpoint: nil, - ReaderEndpoint: nil, - Slots: nil, - Status: nil, - }) - } - return nodeGroups -} - -// provideNodeGroupMembers returns array of NodeGroupMember (replicas and a primary node) for given shard id -func provideNodeGroupMembers(nodeID *string, membersCount int) []*svcapitypes.NodeGroupMember { - if membersCount <= 0 { - return nil - } - rolePrimary := "primary" - roleReplica := "replica" - availabilityZones := provideAvailabilityZones(*nodeID, membersCount) - - members := []*svcapitypes.NodeGroupMember{} - // primary - primary := &svcapitypes.NodeGroupMember{} - cacheClusterId := fmt.Sprintf("RG-%s-00%d", *nodeID, 1) - primary.CacheClusterID = &cacheClusterId - primary.CurrentRole = &rolePrimary - primary.PreferredAvailabilityZone = availabilityZones[0] - members = append(members, primary) - // replicas - for i := 1; i <= membersCount-1; i++ { - replica := &svcapitypes.NodeGroupMember{} - cacheClusterId := fmt.Sprintf("RG-%s-00%d", *nodeID, i+1) - replica.CacheClusterID = &cacheClusterId - replica.CacheNodeID = nodeID - replica.CurrentRole = &roleReplica - replica.PreferredAvailabilityZone = availabilityZones[i] - members = append(members, replica) - } - return members -} - -func provideNodeGroupConfiguration(IDs ...string) []*svcapitypes.NodeGroupConfiguration { - replicasCount := 3 - return provideNodeGroupConfigurationWithReplicas(replicasCount, IDs...) -} - -// provideNodeGroupConfiguration provides NodeGroupConfiguration array for given node IDs and replica count -func provideNodeGroupConfigurationWithReplicas( - replicaCount int, IDs ...string, -) []*svcapitypes.NodeGroupConfiguration { - nodeGroupConfig := []*svcapitypes.NodeGroupConfiguration{} - for _, ID := range IDs { - nodeId := ID - azCount := replicaCount + 1 // replicas + a primary node - numberOfReplicas := int64(replicaCount) - availabilityZones := provideAvailabilityZones(nodeId, azCount) - nodeGroupConfig = append(nodeGroupConfig, &svcapitypes.NodeGroupConfiguration{ - NodeGroupID: &nodeId, - PrimaryAvailabilityZone: availabilityZones[0], - ReplicaAvailabilityZones: availabilityZones[1:], - ReplicaCount: &numberOfReplicas, - Slots: nil, - }) - } - - return nodeGroupConfig -} - -// provideAvailabilityZones returns availability zones array for given nodeId -func provideAvailabilityZones(nodeId string, count int) []*string { - if count <= 0 { - return nil - } - - availabilityZones := []*string{} - for i := 1; i <= count; i++ { - az := fmt.Sprintf("%s_%s%d", nodeId, "az", i) - availabilityZones = append(availabilityZones, &az) - } - return availabilityZones -} - -// validatePayloadReshardingConfig validates given payloadReshardingConfigs against given desiredNodeGroupConfigs -// this is used for tests that are related to shard configuration (scale in/out) -func validatePayloadReshardingConfig( - desiredNodeGroupConfigs []*svcapitypes.NodeGroupConfiguration, - payloadReshardingConfigs []*svcsdk.ReshardingConfiguration, - assert *assert.Assertions, - require *require.Assertions, -) { - assert.NotNil(desiredNodeGroupConfigs) - require.NotNil(payloadReshardingConfigs) // built as provided in desired object NodeGroupConfiguration - for _, desiredNodeGroup := range desiredNodeGroupConfigs { - found := false - for _, payloadReshardConfig := range payloadReshardingConfigs { - require.NotNil(payloadReshardConfig.PreferredAvailabilityZones) - if *desiredNodeGroup.NodeGroupID == *payloadReshardConfig.NodeGroupId { - found = true - expectedShardAZs := []*string{desiredNodeGroup.PrimaryAvailabilityZone} - for _, expectedAZ := range desiredNodeGroup.ReplicaAvailabilityZones { - expectedShardAZs = append(expectedShardAZs, expectedAZ) - } - assert.Equal(len(expectedShardAZs), len(payloadReshardConfig.PreferredAvailabilityZones), - "Node group id %s", *desiredNodeGroup.NodeGroupID) - for i := 0; i < len(expectedShardAZs); i++ { - assert.Equal(*expectedShardAZs[i], *payloadReshardConfig.PreferredAvailabilityZones[i], - "Node group id %s", *desiredNodeGroup.NodeGroupID) - } - break - } - } - assert.True(found, "Expected node group id %s not found in payload", *desiredNodeGroup.NodeGroupID) - } - assert.Equal(len(desiredNodeGroupConfigs), len(payloadReshardingConfigs)) -} - -// validatePayloadReplicaConfig validates given payloadReplicaConfigs against given desiredNodeGroupConfigs -// this is used for tests that are related to increase/decrease replica count. -func validatePayloadReplicaConfig( - desiredNodeGroupConfigs []*svcapitypes.NodeGroupConfiguration, - payloadReplicaConfigs []*svcsdk.ConfigureShard, - assert *assert.Assertions, - require *require.Assertions, -) { - assert.NotNil(desiredNodeGroupConfigs) - require.NotNil(payloadReplicaConfigs) // built as provided in desired object NodeGroupConfiguration - for _, desiredNodeGroup := range desiredNodeGroupConfigs { - found := false - for _, payloadShard := range payloadReplicaConfigs { - require.NotNil(payloadShard.PreferredAvailabilityZones) - if *desiredNodeGroup.NodeGroupID == *payloadShard.NodeGroupId { - found = true - // validate replica count - assert.Equal(*desiredNodeGroup.ReplicaCount, *payloadShard.NewReplicaCount) - - // validate AZs - expectedShardAZs := []*string{desiredNodeGroup.PrimaryAvailabilityZone} - for _, expectedAZ := range desiredNodeGroup.ReplicaAvailabilityZones { - expectedShardAZs = append(expectedShardAZs, expectedAZ) - } - assert.Equal(len(expectedShardAZs), len(payloadShard.PreferredAvailabilityZones), - "Node group id %s", *desiredNodeGroup.NodeGroupID) - for i := 0; i < len(expectedShardAZs); i++ { - assert.Equal(*expectedShardAZs[i], *payloadShard.PreferredAvailabilityZones[i], - "Node group id %s", *desiredNodeGroup.NodeGroupID) - } - break - } - } - assert.True(found, "Expected node group id %s not found in payload", *desiredNodeGroup.NodeGroupID) - } - assert.Equal(len(desiredNodeGroupConfigs), len(payloadReplicaConfigs)) -} - -func TestDecreaseReplicaCountMock(t *testing.T) { - assert := assert.New(t) - // Setup mock API response - var mockDescription = "mock_replication_group_description" - var mockOutput svcsdk.DecreaseReplicaCountOutput - testutil.LoadFromFixture(filepath.Join("testdata", "DecreaseReplicaCountOutput.json"), &mockOutput) - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - mocksdkapi.On("DecreaseReplicaCountWithContext", mock.Anything, mock.Anything).Return(&mockOutput, nil) - rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) - // Tests - t.Run("MockAPI=DecreaseReplicaCount", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - res, _ := rm.decreaseReplicaCount(context.Background(), desired, latest) - assert.Equal(mockDescription, *res.ko.Spec.Description) - }) -} - -func TestCustomModifyReplicationGroup(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("NoAction=NoDiff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - var delta ackcompare.Delta - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.Nil(err) - }) -} - -func TestCustomModifyReplicationGroup_Unavailable(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("UnavailableRG=Requeue", func(t *testing.T) { - desired := provideResource() - latest := provideResourceWithStatus("modifying") - var delta ackcompare.Delta - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - var requeueNeededAfter *requeue.RequeueNeededAfter - assert.True(errors.As(err, &requeueNeededAfter)) - }) -} - -func TestCustomModifyReplicationGroup_NodeGroup_Unvailable(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("UnavailableNodeGroup=Requeue", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - unavailableStatus := "modifying" - for _, nodeGroup := range latest.ko.Status.NodeGroups { - nodeGroup.Status = &unavailableStatus - } - var delta ackcompare.Delta - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - var requeueNeededAfter *requeue.RequeueNeededAfter - assert.True(errors.As(err, &requeueNeededAfter)) - }) -} - -func TestCustomModifyReplicationGroup_NodeGroup_MemberClusters_mismatch(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("nodeGroup_memberClustersMismatch=Diff", func(t *testing.T) { - desired := provideResource() - desired.ko.Status.NodeGroups = provideNodeGroups("1001", "1002") - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002") - latest.ko.Status.MemberClusters = provideMemberClusters(latest.ko.Status.NodeGroups) - surplusMemberCluster := "RG-Surplus-Member-Cluster" - latest.ko.Status.MemberClusters = append(latest.ko.Status.MemberClusters, &surplusMemberCluster) - availableStatus := "available" - for _, nodeGroup := range latest.ko.Status.NodeGroups { - nodeGroup.Status = &availableStatus - } - var delta ackcompare.Delta - require.NotNil(latest.ko.Status.MemberClusters) - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) // due to surplus member cluster - var requeueNeededAfter *requeue.RequeueNeededAfter - assert.True(errors.As(err, &requeueNeededAfter)) - }) -} - -func TestCustomModifyReplicationGroup_NodeGroup_available(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // Setup - rm := provideResourceManager() - // Tests - t.Run("availableNodeGroup=NoDiff", func(t *testing.T) { - desired := provideResource() - desired.ko.Status.NodeGroups = provideNodeGroups("1001") - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - latest.ko.Status.MemberClusters = provideMemberClusters(latest.ko.Status.NodeGroups) - availableStatus := "available" - for _, nodeGroup := range latest.ko.Status.NodeGroups { - nodeGroup.Status = &availableStatus - } - var delta ackcompare.Delta - require.NotNil(latest.ko.Status.MemberClusters) - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.Nil(err) - }) -} - -func TestCustomModifyReplicationGroup_Scaling_Async_Rollback(t *testing.T) { - assert := assert.New(t) - t.Run("ScaleDownRollback=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - desired.ko.ObjectMeta.Annotations = make(map[string]string) - desiredCacheNodeType := "cache.t3.micro" - currentCacheNodeType := "cache.m5.large" - desired.ko.Annotations[AnnotationLastRequestedCNT] = desiredCacheNodeType - desired.ko.Spec.CacheNodeType = &desiredCacheNodeType - - rm := provideResourceManager() - - var delta ackcompare.Delta - delta.Add("Spec.CacheNodeType", currentCacheNodeType, desiredCacheNodeType) - - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - assert.Equal("InvalidParameterCombination: Cannot update CacheNodeType, Please refer to Events for more details", err.Error()) - }) - - t.Run("ScaleInRollback=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - desired.ko.ObjectMeta.Annotations = make(map[string]string) - - desiredNodeGroup := int64(4) - currentNodeGroup := int64(3) - desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(desiredNodeGroup)) - desired.ko.Spec.NumNodeGroups = &desiredNodeGroup - rm := provideResourceManager() - - var delta ackcompare.Delta - delta.Add("Spec.NumNodeGroups", currentNodeGroup, desiredNodeGroup) - - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - assert.Equal("InvalidParameterCombination: Cannot update NodeGroups, Please refer to Events for more details", err.Error()) - }) -} -func TestCustomModifyReplicationGroup_ScaleUpAndDown_And_Resharding(t *testing.T) { - assert := assert.New(t) - - // Tests - t.Run("ScaleInAndScaleUp=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - desired.ko.ObjectMeta.Annotations = make(map[string]string) - desiredCacheNodeType := "cache.m5.large" - currentCacheNodeType := "cache.t3.small" - desired.ko.Annotations[AnnotationLastRequestedCNT] = currentCacheNodeType - desired.ko.Spec.CacheNodeType = &desiredCacheNodeType - - desiredNodeGroup := int64(4) - currentNodeGroup := int64(3) - desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(currentNodeGroup)) - desired.ko.Spec.NumNodeGroups = &desiredNodeGroup - allowedNodeModifications := []*string{&desiredCacheNodeType} - desired.ko.Status.AllowedScaleUpModifications = allowedNodeModifications - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) - - var delta ackcompare.Delta - delta.Add("Spec.CacheNodeType", currentCacheNodeType, desiredCacheNodeType) - delta.Add("Spec.NumNodeGroups", currentNodeGroup, desiredNodeGroup) - - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.Nil(err) - assert.Empty(mocksdkapi.Calls) - }) - - t.Run("ScaleOutAndScaleDown=Diff", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - rgId := "RGID" - desired.ko.Spec.ReplicationGroupID = &rgId - latest.ko.Spec.ReplicationGroupID = &rgId - - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - rm := provideResourceManagerWithMockSDKAPI(mocksdkapi) - - var delta ackcompare.Delta - delta.Add("Spec.CacheNodeType", "cache.t3.small", "cache.t3.micro") - desired.ko.ObjectMeta.Annotations = make(map[string]string) - cacheNodeType := "cache.t3.small" - desired.ko.Annotations[AnnotationLastRequestedCNT] = "cache.t3.micro" - desired.ko.Spec.CacheNodeType = &cacheNodeType - oldshardCount := int64(4) - newShardCount := int64(10) - delta.Add("Spec.NumNodeGroups", oldshardCount, newShardCount) - desired.ko.Spec.NumNodeGroups = &newShardCount - latest.ko.Spec.NumNodeGroups = &oldshardCount - desired.ko.Annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(oldshardCount)) - mocksdkapi.On("ModifyReplicationGroupShardConfigurationWithContext", mock.Anything, mock.Anything).Return(nil, - awserr.New("Invalid", "Invalid error", nil)) - res, err := rm.CustomModifyReplicationGroup(context.TODO(), desired, latest, &delta) - assert.Nil(res) - assert.NotNil(err) - assert.NotEmpty(mocksdkapi.Calls) - assert.Equal("ModifyReplicationGroupShardConfigurationWithContext", mocksdkapi.Calls[0].Method) - }) - -} - -// TestReplicaCountDifference tests scenarios to check if desired, latest replica count -// configurations differ -func TestReplicaCountDifference(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("NoDiff=DesiredNil_LatestNil", func(t *testing.T) { - // neither desired nor latest have either ReplicasPerNodeGroup nor NodeGroupConfiguration set - desired := provideResource() - latest := provideResource() - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.ReplicasPerNodeGroup) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.Nil(latest.ko.Spec.ReplicasPerNodeGroup) - assert.Nil(latest.ko.Spec.NodeGroupConfiguration) - assert.Equal(0, diff) - }) - t.Run("NoDiff=DesiredNonNil_LatestNonNil", func(t *testing.T) { - // both desired and latest have ReplicasPerNodeGroup set (but not NodeGroupConfiguration) - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.Nil(latest.ko.Spec.NodeGroupConfiguration) - assert.NotNil(desired.ko.Spec.ReplicasPerNodeGroup) - assert.NotNil(latest.ko.Spec.ReplicasPerNodeGroup) - assert.Equal(0, diff) - }) - t.Run("NoDiff=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - // no 'ReplicasPerNodeGroup' in spec but spec has 'NodeGroupConfiguration' with replicas details - // status has matching number of replicas - desired := provideResource() - latest := provideResource() - replicaCount := 2 - desired.ko.Spec.ReplicasPerNodeGroup = nil - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(replicaCount, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(replicaCount, "1001") - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(replicaCount, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(replicaCount+1, len(nodeGroup.NodeGroupMembers)) // replica + primary node - } - assert.Equal(0, diff) - }) - t.Run("NoDiff=Prefer_Spec.ReplicasPerNodeGroup", func(t *testing.T) { - // prefer 'ReplicasPerNodeGroup over 'NodeGroupConfiguration' in desired configuration: - // 'ReplicasPerNodeGroup' in desired spec as well as 'NodeGroupConfiguration' with different desired replicas details. - // latest status has matching number of replicas with desired 'ReplicasPerNodeGroup' - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(int(desiredReplicaCount)+1, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(int(latestReplicaCount), "1001") - - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(int(desiredReplicaCount)+1, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(int(desiredReplicaCount)+1, len(nodeGroup.NodeGroupMembers)) // replica + primary node - } - assert.Equal(0, diff) - }) - t.Run("DiffIncreaseReplica=Spec.ReplicasPerNodeGroup", func(t *testing.T) { - // desired ReplicasPerNodeGroup is greater than latest.ReplicasPerNodeGroup, NodeGroupConfiguration nil - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(1) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.True(diff > 0) // desired replicas > latest replicas - }) - t.Run("DiffIncreaseReplica=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - // no 'ReplicasPerNodeGroup' in spec but spec has 'NodeGroupConfiguration' with replicas details - // status has matching number of replicas - desired := provideResource() - latest := provideResource() - desiredReplicaCount := 2 - latestReplicaCount := 1 - desired.ko.Spec.ReplicasPerNodeGroup = nil - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(desiredReplicaCount, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(latestReplicaCount, "1001") - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(desiredReplicaCount, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(latestReplicaCount+1, len(nodeGroup.NodeGroupMembers)) // replicas + primary node - } - assert.True(diff > 0) // desired replicas > latest replicas - }) - t.Run("DiffDecreaseReplica=Spec.ReplicasPerNodeGroup", func(t *testing.T) { - // desired ReplicasPerNodeGroup is lesser than latest.ReplicasPerNodeGroup, NodeGroupConfiguration nil - desired := provideResource() - latest := provideResource() - desiredReplicaCount := int64(2) - latestReplicaCount := int64(3) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - latest.ko.Spec.ReplicasPerNodeGroup = &latestReplicaCount - diff := rm.replicaCountDifference(desired, latest) - assert.Nil(desired.ko.Spec.NodeGroupConfiguration) - assert.True(diff < 0) // desired replicas < latest replicas - }) - t.Run("DiffDecreaseReplica=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - // no 'ReplicasPerNodeGroup' in spec but spec has 'NodeGroupConfiguration' with replicas details - // status has matching number of replicas - desired := provideResource() - latest := provideResource() - desiredReplicaCount := 2 - latestReplicaCount := 3 - desired.ko.Spec.ReplicasPerNodeGroup = nil - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas(desiredReplicaCount, "1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas(latestReplicaCount, "1001") - diff := rm.replicaCountDifference(desired, latest) - assert.NotNil(desired.ko.Spec.NodeGroupConfiguration) - for _, nodeGroupConfig := range desired.ko.Spec.NodeGroupConfiguration { - require.NotNil(nodeGroupConfig.ReplicaCount) - assert.Equal(desiredReplicaCount, int(*nodeGroupConfig.ReplicaCount)) - } - assert.NotNil(latest.ko.Status.NodeGroups) - for _, nodeGroup := range latest.ko.Status.NodeGroups { - require.NotNil(nodeGroup.NodeGroupMembers) - assert.Equal(latestReplicaCount+1, len(nodeGroup.NodeGroupMembers)) // replicas + primary node - } - assert.True(diff < 0) // desired replicas < latest replicas - }) -} - -// TestNewIncreaseReplicaCountRequestPayload tests scenarios to -// check request payload by providing desired spec details for increase replica count. -func TestNewIncreaseReplicaCountRequestPayload(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("EmptyPayload=NoSpec", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - payload, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NewReplicaCount) - assert.Nil(payload.ReplicaConfiguration) - assert.Nil(err) - }) - t.Run("Payload=Spec", func(t *testing.T) { - desired := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desiredReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001", "1002") - // expected: only node groups that are present on server are included in payload. - expectedPayloadReplicaConfiguration := provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001") - latest := provideResource() - latestReplicaCount := int64(3) - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas( - int(latestReplicaCount), "1001", "1003") - payload, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(replicationGroupID, *payload.ReplicationGroupId) - assert.Equal(desiredReplicaCount, *payload.NewReplicaCount) - assert.NotNil(payload.ReplicaConfiguration) - validatePayloadReplicaConfig(expectedPayloadReplicaConfiguration, payload.ReplicaConfiguration, assert, require) - assert.Nil(err) - }) -} - -// TestNewDecreaseReplicaCountRequestPayload tests scenarios to -// check request payload by providing desired spec details for decrease replica count. -func TestNewDecreaseReplicaCountRequestPayload(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("EmptyPayload=NoSpec", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - payload, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NewReplicaCount) - assert.Nil(payload.ReplicaConfiguration) - assert.Nil(err) - }) - t.Run("Payload=Spec_Server", func(t *testing.T) { - desired := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desiredReplicaCount := int64(2) - desired.ko.Spec.ReplicasPerNodeGroup = &desiredReplicaCount - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001", "1002") - // expected: only node groups that are present on server are included in payload. - expectedPayloadReplicaConfiguration := provideNodeGroupConfigurationWithReplicas( - int(desiredReplicaCount), "1001") - latest := provideResource() - latestReplicaCount := int64(1) - latest.ko.Status.NodeGroups = provideNodeGroupsWithReplicas( - int(latestReplicaCount), "1001") - payload, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(replicationGroupID, *payload.ReplicationGroupId) - assert.Equal(desiredReplicaCount, *payload.NewReplicaCount) - assert.NotNil(payload.ReplicaConfiguration) - validatePayloadReplicaConfig(expectedPayloadReplicaConfiguration, payload.ReplicaConfiguration, assert, require) - assert.Nil(err) - }) -} - -// TestShardConfigurationsDiffer tests scenarios to check if desired, latest shards -// configurations differ. -func TestShardConfigurationsDiffer(t *testing.T) { - assert := assert.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("NoDiff=NoSpec_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("NoDiff=NoSpec_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("Diff=Spec.NumNodeGroups_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("Diff=Spec.NodeGroupConfiguration_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("NoDiff=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("Diff=ScaleIn_Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002", "1003") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("Diff=ScaleOut_Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - t.Run("NoDiff=Spec.NumNodeGroups_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(1) - desired.ko.Spec.NumNodeGroups = &desiredShards - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) - t.Run("Diff=Spec.NumNodeGroups_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.True(differ) - }) - - t.Run("NoDiff=Prefer_Spec.NumNodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002") - differ := rm.shardConfigurationsDiffer(desired, latest) - assert.False(differ) - }) -} - -// TestNewUpdateShardConfigurationRequestPayload tests scenarios to -// check request payload by providing desired, latest details -func TestNewUpdateShardConfigurationRequestPayload(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("EmptyPayload=NoSpec_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NodeGroupCount) - assert.Nil(payload.ReshardingConfiguration) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("EmptyPayload=NoSpec_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Nil(payload.ReplicationGroupId) - assert.Nil(payload.NodeGroupCount) - assert.Nil(payload.ReshardingConfiguration) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Prefer_Spec.NumNodeGroups_NoStatus", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - require.NotNil(payload.NodeGroupCount) - assert.Equal(*desired.ko.Spec.NumNodeGroups, *payload.NodeGroupCount) // preferred NumNodeGroups over len(NodeGroupConfiguration) - require.NotNil(payload.ReshardingConfiguration) // built as provided in desired object NodeGroupConfiguration - assert.Equal(len(desired.ko.Spec.NodeGroupConfiguration), len(payload.ReshardingConfiguration)) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Computed_Spec.NodeGroupConfiguration_NoStatus", func(t *testing.T) { - desired := provideResource() - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest := provideResource() - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - require.NotNil(payload.NodeGroupCount) - assert.Equal(int64(len(desired.ko.Spec.NodeGroupConfiguration)), *payload.NodeGroupCount) - require.NotNil(payload.ReshardingConfiguration) // increase scenario as no-status - assert.Equal(len(desired.ko.Spec.NodeGroupConfiguration), len(payload.ReshardingConfiguration)) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Prefer_Spec.NumNodeGroups_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desiredShards := int64(2) - desired.ko.Spec.NumNodeGroups = &desiredShards - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(*desired.ko.Spec.ReplicationGroupID, *payload.ReplicationGroupId) - require.NotNil(payload.NodeGroupCount) - assert.Equal(*desired.ko.Spec.NumNodeGroups, *payload.NodeGroupCount) - validatePayloadReshardingConfig(desired.ko.Spec.NodeGroupConfiguration, payload.ReshardingConfiguration, assert, require) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleOutPayload=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001", "1002", "1003") - latest.ko.Status.NodeGroups = provideNodeGroups("1001") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(*desired.ko.Spec.ReplicationGroupID, *payload.ReplicationGroupId) - require.NotNil(payload.NodeGroupCount) - assert.Equal(int64(len(desired.ko.Spec.NodeGroupConfiguration)), *payload.NodeGroupCount) - require.NotNil(payload.ReshardingConfiguration) - validatePayloadReshardingConfig(desired.ko.Spec.NodeGroupConfiguration, payload.ReshardingConfiguration, assert, require) - assert.Nil(payload.NodeGroupsToRetain) - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) - t.Run("ScaleInPayload=Spec.NodeGroupConfiguration_Status.NodeGroups", func(t *testing.T) { - desired := provideResource() - latest := provideResource() - replicationGroupID := "test-rg" - desired.ko.Spec.ReplicationGroupID = &replicationGroupID - desired.ko.Spec.NodeGroupConfiguration = provideNodeGroupConfiguration("1001") - latest.ko.Status.NodeGroups = provideNodeGroups("1001", "1002", "1003") - payload, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) - require.NotNil(payload) - require.NotNil(payload.ApplyImmediately) - assert.True(*payload.ApplyImmediately) - assert.Equal(*desired.ko.Spec.ReplicationGroupID, *payload.ReplicationGroupId) - require.NotNil(payload.NodeGroupCount) - assert.Equal(int64(len(desired.ko.Spec.NodeGroupConfiguration)), *payload.NodeGroupCount) - assert.Nil(payload.ReshardingConfiguration) - require.NotNil(payload.NodeGroupsToRetain) - assert.Equal(len(desired.ko.Spec.NodeGroupConfiguration), len(payload.NodeGroupsToRetain)) - for _, desiredNodeGroup := range desired.ko.Spec.NodeGroupConfiguration { - found := false - for _, nodeGroupId := range payload.NodeGroupsToRetain { - if *desiredNodeGroup.NodeGroupID == *nodeGroupId { - found = true - break - } - } - assert.True(found, "Expected node group id %s not found in payload", desiredNodeGroup.NodeGroupID) - } - assert.Nil(payload.NodeGroupsToRemove) - assert.Nil(err) - }) -} - -// TestSecurityGroupIdsDiffer tests scenarios to check if desired, latest (from cache cluster) -// security group ids configuration differs. -func TestSecurityGroupIdsDiffer(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - // setup - rm := provideResourceManager() - // Tests - t.Run("NoDiff=NoSpec_NoStatus", func(t *testing.T) { - desiredRG := provideResource() - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - require.Nil(desiredRG.ko.Spec.SecurityGroupIDs) - require.Nil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.False(differ) - }) - t.Run("NoDiff=NoSpec_HasStatus", func(t *testing.T) { - desiredRG := provideResource() - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - latestCacheCluster.SecurityGroups = provideCacheClusterSecurityGroups("sg-001, sg-002") - require.Nil(desiredRG.ko.Spec.SecurityGroupIDs) - require.NotNil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.False(differ) - }) - t.Run("NoDiff=Spec_Status_Match", func(t *testing.T) { - desiredRG := provideResource() - sg1 := "sg-001" - sg2 := "sg-002" - desiredRG.ko.Spec.SecurityGroupIDs = []*string{&sg1, &sg2} - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - latestCacheCluster.SecurityGroups = provideCacheClusterSecurityGroups(sg2, sg1) // same but out of order - require.NotNil(desiredRG.ko.Spec.SecurityGroupIDs) - require.NotNil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.False(differ) - }) - t.Run("Diff=Spec_Status_MisMatch", func(t *testing.T) { - desiredRG := provideResource() - sg1 := "sg-001" - sg2 := "sg-002" - desiredRG.ko.Spec.SecurityGroupIDs = []*string{&sg1} - latestRG := provideResource() - latestCacheCluster := provideCacheCluster() - latestCacheCluster.SecurityGroups = provideCacheClusterSecurityGroups(sg2, sg1) // sg2 is additional - require.NotNil(desiredRG.ko.Spec.SecurityGroupIDs) - require.NotNil(latestCacheCluster.SecurityGroups) - differ := rm.securityGroupIdsDiffer(desiredRG, latestRG, latestCacheCluster) - assert.True(differ) - }) -} - -// provideNodeGroupsWithReplicas provides NodeGroups array for given node IDs -// each node group is populated with supplied numbers of replica nodes and a primary node. -func provideCacheClusterSecurityGroups(IDs ...string) []*svcsdk.SecurityGroupMembership { - securityGroups := []*svcsdk.SecurityGroupMembership{} - for _, ID := range IDs { - securityGroupId := ID - status := "available" - securityGroups = append(securityGroups, &svcsdk.SecurityGroupMembership{ - SecurityGroupId: &securityGroupId, - Status: &status, - }) - } - return securityGroups -} diff --git a/pkg/resource/replication_group/delta.go b/pkg/resource/replication_group/delta.go index a7ea05a3..ce61adda 100644 --- a/pkg/resource/replication_group/delta.go +++ b/pkg/resource/replication_group/delta.go @@ -71,8 +71,15 @@ func newResourceDelta( delta.Add("Spec.CacheParameterGroupName", a.ko.Spec.CacheParameterGroupName, b.ko.Spec.CacheParameterGroupName) } } - if !ackcompare.SliceStringPEqual(a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) { + if !reflect.DeepEqual(a.ko.Spec.CacheParameterGroupRef, b.ko.Spec.CacheParameterGroupRef) { + delta.Add("Spec.CacheParameterGroupRef", a.ko.Spec.CacheParameterGroupRef, b.ko.Spec.CacheParameterGroupRef) + } + if len(a.ko.Spec.CacheSecurityGroupNames) != len(b.ko.Spec.CacheSecurityGroupNames) { delta.Add("Spec.CacheSecurityGroupNames", a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) + } else if len(a.ko.Spec.CacheSecurityGroupNames) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) { + delta.Add("Spec.CacheSecurityGroupNames", a.ko.Spec.CacheSecurityGroupNames, b.ko.Spec.CacheSecurityGroupNames) + } } if ackcompare.HasNilDifference(a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) { delta.Add("Spec.CacheSubnetGroupName", a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) @@ -81,6 +88,9 @@ func newResourceDelta( delta.Add("Spec.CacheSubnetGroupName", a.ko.Spec.CacheSubnetGroupName, b.ko.Spec.CacheSubnetGroupName) } } + if !reflect.DeepEqual(a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) { + delta.Add("Spec.CacheSubnetGroupRef", a.ko.Spec.CacheSubnetGroupRef, b.ko.Spec.CacheSubnetGroupRef) + } if ackcompare.HasNilDifference(a.ko.Spec.DataTieringEnabled, b.ko.Spec.DataTieringEnabled) { delta.Add("Spec.DataTieringEnabled", a.ko.Spec.DataTieringEnabled, b.ko.Spec.DataTieringEnabled) } else if a.ko.Spec.DataTieringEnabled != nil && b.ko.Spec.DataTieringEnabled != nil { @@ -109,6 +119,13 @@ func newResourceDelta( delta.Add("Spec.EngineVersion", a.ko.Spec.EngineVersion, b.ko.Spec.EngineVersion) } } + if ackcompare.HasNilDifference(a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } else if a.ko.Spec.IPDiscovery != nil && b.ko.Spec.IPDiscovery != nil { + if *a.ko.Spec.IPDiscovery != *b.ko.Spec.IPDiscovery { + delta.Add("Spec.IPDiscovery", a.ko.Spec.IPDiscovery, b.ko.Spec.IPDiscovery) + } + } if ackcompare.HasNilDifference(a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) { delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) } else if a.ko.Spec.KMSKeyID != nil && b.ko.Spec.KMSKeyID != nil { @@ -116,8 +133,19 @@ func newResourceDelta( delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) } } - if !reflect.DeepEqual(a.ko.Spec.NodeGroupConfiguration, b.ko.Spec.NodeGroupConfiguration) { + if ackcompare.HasNilDifference(a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } else if a.ko.Spec.NetworkType != nil && b.ko.Spec.NetworkType != nil { + if *a.ko.Spec.NetworkType != *b.ko.Spec.NetworkType { + delta.Add("Spec.NetworkType", a.ko.Spec.NetworkType, b.ko.Spec.NetworkType) + } + } + if len(a.ko.Spec.NodeGroupConfiguration) != len(b.ko.Spec.NodeGroupConfiguration) { delta.Add("Spec.NodeGroupConfiguration", a.ko.Spec.NodeGroupConfiguration, b.ko.Spec.NodeGroupConfiguration) + } else if len(a.ko.Spec.NodeGroupConfiguration) > 0 { + if !reflect.DeepEqual(a.ko.Spec.NodeGroupConfiguration, b.ko.Spec.NodeGroupConfiguration) { + delta.Add("Spec.NodeGroupConfiguration", a.ko.Spec.NodeGroupConfiguration, b.ko.Spec.NodeGroupConfiguration) + } } if ackcompare.HasNilDifference(a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) { delta.Add("Spec.NotificationTopicARN", a.ko.Spec.NotificationTopicARN, b.ko.Spec.NotificationTopicARN) @@ -140,8 +168,12 @@ func newResourceDelta( delta.Add("Spec.Port", a.ko.Spec.Port, b.ko.Spec.Port) } } - if !ackcompare.SliceStringPEqual(a.ko.Spec.PreferredCacheClusterAZs, b.ko.Spec.PreferredCacheClusterAZs) { + if len(a.ko.Spec.PreferredCacheClusterAZs) != len(b.ko.Spec.PreferredCacheClusterAZs) { delta.Add("Spec.PreferredCacheClusterAZs", a.ko.Spec.PreferredCacheClusterAZs, b.ko.Spec.PreferredCacheClusterAZs) + } else if len(a.ko.Spec.PreferredCacheClusterAZs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.PreferredCacheClusterAZs, b.ko.Spec.PreferredCacheClusterAZs) { + delta.Add("Spec.PreferredCacheClusterAZs", a.ko.Spec.PreferredCacheClusterAZs, b.ko.Spec.PreferredCacheClusterAZs) + } } if ackcompare.HasNilDifference(a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) { delta.Add("Spec.PreferredMaintenanceWindow", a.ko.Spec.PreferredMaintenanceWindow, b.ko.Spec.PreferredMaintenanceWindow) @@ -164,11 +196,22 @@ func newResourceDelta( delta.Add("Spec.ReplicationGroupID", a.ko.Spec.ReplicationGroupID, b.ko.Spec.ReplicationGroupID) } } - if !ackcompare.SliceStringPEqual(a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) { + if len(a.ko.Spec.SecurityGroupIDs) != len(b.ko.Spec.SecurityGroupIDs) { delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } else if len(a.ko.Spec.SecurityGroupIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) { + delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } } - if !ackcompare.SliceStringPEqual(a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) { + if !reflect.DeepEqual(a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) { + delta.Add("Spec.SecurityGroupRefs", a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) + } + if len(a.ko.Spec.SnapshotARNs) != len(b.ko.Spec.SnapshotARNs) { delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) + } else if len(a.ko.Spec.SnapshotARNs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) { + delta.Add("Spec.SnapshotARNs", a.ko.Spec.SnapshotARNs, b.ko.Spec.SnapshotARNs) + } } if ackcompare.HasNilDifference(a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) { delta.Add("Spec.SnapshotName", a.ko.Spec.SnapshotName, b.ko.Spec.SnapshotName) @@ -191,7 +234,9 @@ func newResourceDelta( delta.Add("Spec.SnapshotWindow", a.ko.Spec.SnapshotWindow, b.ko.Spec.SnapshotWindow) } } - if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) } if ackcompare.HasNilDifference(a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) { @@ -201,8 +246,12 @@ func newResourceDelta( delta.Add("Spec.TransitEncryptionEnabled", a.ko.Spec.TransitEncryptionEnabled, b.ko.Spec.TransitEncryptionEnabled) } } - if !ackcompare.SliceStringPEqual(a.ko.Spec.UserGroupIDs, b.ko.Spec.UserGroupIDs) { + if len(a.ko.Spec.UserGroupIDs) != len(b.ko.Spec.UserGroupIDs) { delta.Add("Spec.UserGroupIDs", a.ko.Spec.UserGroupIDs, b.ko.Spec.UserGroupIDs) + } else if len(a.ko.Spec.UserGroupIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.UserGroupIDs, b.ko.Spec.UserGroupIDs) { + delta.Add("Spec.UserGroupIDs", a.ko.Spec.UserGroupIDs, b.ko.Spec.UserGroupIDs) + } } modifyDelta(delta, a, b) diff --git a/pkg/resource/replication_group/delta_util.go b/pkg/resource/replication_group/delta_util.go deleted file mode 100644 index df697c84..00000000 --- a/pkg/resource/replication_group/delta_util.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "encoding/json" - "reflect" - "regexp" - "strings" - - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" -) - -// modifyDelta removes non-meaningful differences from the delta and adds additional differences if necessary -func modifyDelta( - delta *ackcompare.Delta, - desired *resource, - latest *resource, -) { - - if delta.DifferentAt("Spec.EngineVersion") { - if desired.ko.Spec.EngineVersion != nil && latest.ko.Spec.EngineVersion != nil { - if engineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { - common.RemoveFromDelta(delta, "Spec.EngineVersion") - } - } - // TODO: handle the case of a nil difference (especially when desired EV is nil) - } - - // if server has given PreferredMaintenanceWindow a default value, no action needs to be taken - if delta.DifferentAt("Spec.PreferredMaintenanceWindow") { - if desired.ko.Spec.PreferredMaintenanceWindow == nil && latest.ko.Spec.PreferredMaintenanceWindow != nil { - common.RemoveFromDelta(delta, "Spec.PreferredMaintenanceWindow") - } - } - - // note that the comparison is actually done between desired.Spec.LogDeliveryConfigurations and - // the last requested configurations saved in annotations (as opposed to latest.Spec.LogDeliveryConfigurations) - if logDeliveryRequiresUpdate(desired) { - delta.Add("Spec.LogDeliveryConfigurations", desired.ko.Spec.LogDeliveryConfigurations, - unmarshalLastRequestedLDCs(desired)) - } - - if multiAZRequiresUpdate(desired, latest) { - delta.Add("Spec.MultiAZEnabled", desired.ko.Spec.MultiAZEnabled, latest.ko.Status.MultiAZ) - } - - if autoFailoverRequiresUpdate(desired, latest) { - delta.Add("Spec.AutomaticFailoverEnabled", desired.ko.Spec.AutomaticFailoverEnabled, - latest.ko.Status.AutomaticFailover) - } - - if updateRequired, current := primaryClusterIDRequiresUpdate(desired, latest); updateRequired { - delta.Add("Spec.PrimaryClusterID", desired.ko.Spec.PrimaryClusterID, *current) - } -} - -// returns true if desired and latest engine versions match and false otherwise -// precondition: both desiredEV and latestEV are non-nil -// this handles the case where only the major EV is specified, e.g. "6.x" (or similar), but the latest -// -// version shows the minor version, e.g. "6.0.5" -func engineVersionsMatch( - desiredEV string, - latestEV string, -) bool { - if desiredEV == latestEV { - return true - } - - // if the last character of desiredEV is "x", only check for a major version match - last := len(desiredEV) - 1 - if desiredEV[last:] == "x" { - // cut off the "x" and replace all occurrences of '.' with '\.' (as '.' is a special regex character) - desired := strings.Replace(desiredEV[:last], ".", "\\.", -1) - r, _ := regexp.Compile(desired + ".*") - return r.MatchString(latestEV) - } - - return false -} - -// logDeliveryRequiresUpdate retrieves the last requested configurations saved in annotations and compares them -// to the current desired configurations -func logDeliveryRequiresUpdate(desired *resource) bool { - desiredConfigs := desired.ko.Spec.LogDeliveryConfigurations - lastRequestedConfigs := unmarshalLastRequestedLDCs(desired) - return !reflect.DeepEqual(desiredConfigs, lastRequestedConfigs) -} - -// unmarshal the value found in annotations for the LogDeliveryConfigurations field requested in the last -// successful create or modify call -func unmarshalLastRequestedLDCs(desired *resource) []*svcapitypes.LogDeliveryConfigurationRequest { - var lastRequestedConfigs []*svcapitypes.LogDeliveryConfigurationRequest - - annotations := desired.ko.ObjectMeta.GetAnnotations() - if val, ok := annotations[AnnotationLastRequestedLDCs]; ok { - _ = json.Unmarshal([]byte(val), &lastRequestedConfigs) - } - - return lastRequestedConfigs -} - -// multiAZRequiresUpdate returns true if the latest multi AZ status does not yet match the -// desired state, and false otherwise -func multiAZRequiresUpdate(desired *resource, latest *resource) bool { - // no preference for multi AZ specified; no update required - if desired.ko.Spec.MultiAZEnabled == nil { - return false - } - - // API should return a non-nil value, but if it doesn't then attempt to update - if latest.ko.Status.MultiAZ == nil { - return true - } - - // true maps to "enabled"; false maps to "disabled" - // this accounts for values such as "enabling" and "disabling" - if *desired.ko.Spec.MultiAZEnabled { - return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_enabled) - } else { - return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_disabled) - } -} - -// autoFailoverRequiresUpdate returns true if the latest auto failover status does not yet match the -// desired state, and false otherwise -func autoFailoverRequiresUpdate(desired *resource, latest *resource) bool { - // the logic is exactly analogous to multiAZRequiresUpdate above - if desired.ko.Spec.AutomaticFailoverEnabled == nil { - return false - } - - if latest.ko.Status.AutomaticFailover == nil { - return true - } - - if *desired.ko.Spec.AutomaticFailoverEnabled { - return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_enabled) - } else { - return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_disabled) - } -} - -// primaryClusterIDRequiresUpdate retrieves the current primary cluster ID and determines whether -// an update is required. If no desired state is specified or there is an issue retrieving the -// latest state, return false, nil. Otherwise, return false or true depending on equality of -// the latest and desired states, and a non-nil pointer to the latest value -func primaryClusterIDRequiresUpdate(desired *resource, latest *resource) (bool, *string) { - if desired.ko.Spec.PrimaryClusterID == nil { - return false, nil - } - - // primary cluster ID applies to cluster mode disabled only; if API returns multiple - // or no node groups, or the provided node group is nil, there is nothing that can be done - if len(latest.ko.Status.NodeGroups) != 1 || latest.ko.Status.NodeGroups[0] == nil { - return false, nil - } - - // attempt to find primary cluster in node group. If for some reason it is not present, we - // don't have a reliable latest state, so do nothing - ng := *latest.ko.Status.NodeGroups[0] - for _, member := range ng.NodeGroupMembers { - if member == nil { - continue - } - - if member.CurrentRole != nil && *member.CurrentRole == "primary" && member.CacheClusterID != nil { - val := *member.CacheClusterID - return val != *desired.ko.Spec.PrimaryClusterID, &val - } - } - - return false, nil -} diff --git a/pkg/resource/replication_group/delta_util_test.go b/pkg/resource/replication_group/delta_util_test.go deleted file mode 100644 index a1eaafdb..00000000 --- a/pkg/resource/replication_group/delta_util_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import "testing" -import "github.com/stretchr/testify/require" - -func TestEngineVersionsMatch(t *testing.T) { - require := require.New(t) - - require.True(engineVersionsMatch("6.x", "6.0.5")) - require.False(engineVersionsMatch("13.x", "6.0.6")) - require.True(engineVersionsMatch("5.0.3", "5.0.3")) - require.False(engineVersionsMatch("5.0.3", "5.0.4")) -} diff --git a/pkg/resource/replication_group/descriptor.go b/pkg/resource/replication_group/descriptor.go index 34e52447..973139ee 100644 --- a/pkg/resource/replication_group/descriptor.go +++ b/pkg/resource/replication_group/descriptor.go @@ -20,6 +20,7 @@ import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" rtclient "sigs.k8s.io/controller-runtime/pkg/client" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/ReplicationGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/ReplicationGroup" ) var ( @@ -43,10 +44,10 @@ var ( type resourceDescriptor struct { } -// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the -// API Group and Kind of CRs described by the descriptor -func (d *resourceDescriptor) GroupKind() *metav1.GroupKind { - return &GroupKind +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) } // EmptyRuntimeObject returns an empty object prototype that may be used in @@ -87,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -117,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -132,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/replication_group/hooks.go b/pkg/resource/replication_group/hooks.go index 5b9e1235..d987645d 100644 --- a/pkg/resource/replication_group/hooks.go +++ b/pkg/resource/replication_group/hooks.go @@ -14,17 +14,61 @@ package replication_group import ( + "context" + "encoding/json" "errors" + "fmt" + "reflect" + "sort" + "strconv" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + "github.com/aws/aws-sdk-go/aws/awserr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ) +const ( + // AnnotationLastRequestedLDCs is an annotation whose value is the marshaled list of pointers to + // LogDeliveryConfigurationRequest structs passed in as input to either the create or modify API called most + // recently + AnnotationLastRequestedLDCs = svcapitypes.AnnotationPrefix + "last-requested-log-delivery-configurations" + // AnnotationLastRequestedCNT is an annotation whose value is passed in as input to either the create or modify API + // called most recently + AnnotationLastRequestedCNT = svcapitypes.AnnotationPrefix + "last-requested-cache-node-type" + // AnnotationLastRequestedNNG is an annotation whose value is passed in as input to either the create or modify API + // called most recently + AnnotationLastRequestedNNG = svcapitypes.AnnotationPrefix + "last-requested-num-node-groups" + // AnnotationLastRequestedNGC is an annotation whose value is the marshaled list of pointers to + // NodeGroupConfiguration structs passed in as input to either the create or modify API called most + // recently + AnnotationLastRequestedNGC = svcapitypes.AnnotationPrefix + "last-requested-node-group-configuration" +) + var ( + condMsgCurrentlyCreating string = "replication group currently being created." condMsgCurrentlyDeleting string = "replication group currently being deleted." condMsgNoDeleteWhileModifying string = "replication group currently being modified. cannot delete." condMsgTerminalCreateFailed string = "replication group in create-failed status." ) +const ( + statusDeleting string = "deleting" + statusModifying string = "modifying" + statusCreating string = "creating" + statusCreateFailed string = "create-failed" +) + var ( requeueWaitWhileDeleting = ackrequeue.NeededAfter( errors.New("Delete is in progress."), @@ -42,7 +86,7 @@ func isDeleting(r *resource) bool { return false } status := *r.ko.Status.Status - return status == "deleting" + return status == statusDeleting } // isModifying returns true if supplied replication group resource state is 'modifying' @@ -51,7 +95,16 @@ func isModifying(r *resource) bool { return false } status := *r.ko.Status.Status - return status == "modifying" + return status == statusModifying +} + +// isCreating returns true if supplied replication group resource state is 'modifying' +func isCreating(r *resource) bool { + if r == nil || r.ko.Status.Status == nil { + return false + } + status := *r.ko.Status.Status + return status == statusCreating } // isCreateFailed returns true if supplied replication group resource state is @@ -61,5 +114,1398 @@ func isCreateFailed(r *resource) bool { return false } status := *r.ko.Status.Status - return status == "create-failed" + return status == statusCreateFailed +} + +// getTags retrieves the resource's associated tags. +func (rm *resourceManager) getTags( + ctx context.Context, + resourceARN string, +) ([]*svcapitypes.Tag, error) { + return util.GetTags(ctx, rm.sdkapi, rm.metrics, resourceARN) +} + +// syncTags keeps the resource's tags in sync. +func (rm *resourceManager) syncTags( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + return util.SyncTags(ctx, desired.ko.Spec.Tags, latest.ko.Spec.Tags, latest.ko.Status.ACKResourceMetadata, convertToOrderedACKTags, rm.sdkapi, rm.metrics) +} + +const ( + // The number of minutes worth of events to retrieve. + // 14 days in minutes + eventsDuration = 20160 +) + +func (rm *resourceManager) CustomDescribeReplicationGroupsSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.DescribeReplicationGroupsOutput, + ko *svcapitypes.ReplicationGroup, +) (*svcapitypes.ReplicationGroup, error) { + if len(resp.ReplicationGroups) == 0 { + return ko, nil + } + elem := resp.ReplicationGroups[0] + rm.customSetOutput(ctx, elem, ko) + err := rm.customSetOutputSupplementAPIs(ctx, r, &elem, ko) + if err != nil { + return nil, err + } + return ko, nil +} + +func (rm *resourceManager) CustomCreateReplicationGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateReplicationGroupOutput, + ko *svcapitypes.ReplicationGroup, +) (*svcapitypes.ReplicationGroup, error) { + rm.customSetOutput(ctx, *resp.ReplicationGroup, ko) + rm.setAnnotationsFields(r, ko) + rm.setLastRequestedNodeGroupConfiguration(r, ko) + rm.setLastRequestedNumNodeGroups(r, ko) + return ko, nil +} + +func (rm *resourceManager) CustomModifyReplicationGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.ModifyReplicationGroupOutput, + ko *svcapitypes.ReplicationGroup, +) (*svcapitypes.ReplicationGroup, error) { + rm.customSetOutput(ctx, *resp.ReplicationGroup, ko) + + // reset latest.spec.LDC to original value in desired to prevent stale data + // from the modify API being merged back into desired upon spec patching + var logDeliveryConfig []*svcapitypes.LogDeliveryConfigurationRequest + for _, ldc := range r.ko.Spec.LogDeliveryConfigurations { + logDeliveryConfig = append(logDeliveryConfig, ldc.DeepCopy()) + } + ko.Spec.LogDeliveryConfigurations = logDeliveryConfig + + // Keep the value of desired for CacheNodeType. + ko.Spec.CacheNodeType = r.ko.Spec.CacheNodeType + + rm.setAnnotationsFields(r, ko) + return ko, nil +} + +func (rm *resourceManager) customSetOutput( + ctx context.Context, + respRG svcsdktypes.ReplicationGroup, + ko *svcapitypes.ReplicationGroup, +) { + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } + + allNodeGroupsAvailable := true + nodeGroupMembersCount := 0 + memberClustersCount := 0 + if respRG.NodeGroups != nil { + for _, nodeGroup := range respRG.NodeGroups { + if nodeGroup.Status == nil || *nodeGroup.Status != "available" { + allNodeGroupsAvailable = false + break + } + } + for _, nodeGroup := range respRG.NodeGroups { + if nodeGroup.NodeGroupMembers == nil { + continue + } + nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) + } + } + if respRG.MemberClusters != nil { + memberClustersCount = len(respRG.MemberClusters) + } + + rgStatus := respRG.Status + syncConditionStatus := corev1.ConditionUnknown + if rgStatus != nil { + if (*rgStatus == "available" && allNodeGroupsAvailable && memberClustersCount == nodeGroupMembersCount) || + *rgStatus == "create-failed" { + syncConditionStatus = corev1.ConditionTrue + } else { + // resource in "creating", "modifying" , "deleting", "snapshotting" + // states is being modified at server end + // thus current status is considered out of sync. + syncConditionStatus = corev1.ConditionFalse + } + } + + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncConditionStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncConditionStatus + } + + if rgStatus != nil && (*rgStatus == "available" || *rgStatus == "snapshotting") { + input, err := rm.newListAllowedNodeTypeModificationsPayLoad(&respRG) + + if err == nil { + resp, apiErr := rm.sdkapi.ListAllowedNodeTypeModifications(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "ListAllowedNodeTypeModifications", apiErr) + // Overwrite the values for ScaleUp and ScaleDown + if apiErr == nil { + ko.Status.AllowedScaleDownModifications = aws.StringSlice(resp.ScaleDownModifications) + ko.Status.AllowedScaleUpModifications = aws.StringSlice(resp.ScaleUpModifications) + } + } + } else { + ko.Status.AllowedScaleDownModifications = nil + ko.Status.AllowedScaleUpModifications = nil + } + + // populate status logDeliveryConfigurations struct + if respRG.LogDeliveryConfigurations != nil { + var f11 []*svcapitypes.LogDeliveryConfiguration + for _, f11iter := range respRG.LogDeliveryConfigurations { + f11elem := &svcapitypes.LogDeliveryConfiguration{} + if f11iter.DestinationDetails != nil { + f11elemf0 := &svcapitypes.DestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { + f11elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + } + f11elemf0.CloudWatchLogsDetails = f11elemf0f0 + } + if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { + f11elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + } + f11elemf0.KinesisFirehoseDetails = f11elemf0f1 + } + f11elem.DestinationDetails = f11elemf0 + } + if f11iter.DestinationType != "" { + f11elem.DestinationType = aws.String(string(f11iter.DestinationType)) + } + if f11iter.LogFormat != "" { + f11elem.LogFormat = aws.String(string(f11iter.LogFormat)) + } + if f11iter.LogType != "" { + f11elem.LogType = aws.String(string(f11iter.LogType)) + } + if f11iter.Status != "" { + f11elem.Status = aws.String(string(f11iter.Status)) + } + if f11iter.Message != nil && *f11iter.Message != "" { + f11elem.Message = f11iter.Message + } + f11 = append(f11, f11elem) + } + ko.Status.LogDeliveryConfigurations = f11 + } else { + ko.Status.LogDeliveryConfigurations = nil + } +} + +// newListAllowedNodeTypeModificationsPayLoad returns an SDK-specific struct for the HTTP request +// payload of the ListAllowedNodeTypeModifications API call. +func (rm *resourceManager) newListAllowedNodeTypeModificationsPayLoad(respRG *svcsdktypes.ReplicationGroup) ( + *svcsdk.ListAllowedNodeTypeModificationsInput, error) { + res := &svcsdk.ListAllowedNodeTypeModificationsInput{} + + if respRG.ReplicationGroupId != nil { + res.ReplicationGroupId = respRG.ReplicationGroupId + } + + return res, nil +} + +func (rm *resourceManager) customSetOutputSupplementAPIs( + ctx context.Context, + r *resource, + respRG *svcsdktypes.ReplicationGroup, + ko *svcapitypes.ReplicationGroup, +) error { + events, err := rm.provideEvents(ctx, r.ko.Spec.ReplicationGroupID, 20) + if err != nil { + return err + } + ko.Status.Events = events + return nil +} + +func (rm *resourceManager) provideEvents( + ctx context.Context, + replicationGroupId *string, + maxRecords int64, +) ([]*svcapitypes.Event, error) { + input := &svcsdk.DescribeEventsInput{} + input.SourceType = svcsdktypes.SourceTypeReplicationGroup + input.SourceIdentifier = replicationGroupId + input.MaxRecords = aws.Int32(int32(maxRecords)) + input.Duration = aws.Int32(eventsDuration) + resp, err := rm.sdkapi.DescribeEvents(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeEvents-ReplicationGroup", err) + if err != nil { + rm.log.V(1).Info("Error during DescribeEvents-ReplicationGroup", "error", err) + return nil, err + } + events := []*svcapitypes.Event{} + if resp.Events != nil { + for _, respEvent := range resp.Events { + event := &svcapitypes.Event{} + if respEvent.Message != nil { + event.Message = respEvent.Message + } + if respEvent.Date != nil { + eventDate := metav1.NewTime(*respEvent.Date) + event.Date = &eventDate + } + // Not copying redundant source id (replication id) + // and source type (replication group) + // into each event object + events = append(events, event) + } + } + return events, nil +} + +// setAnnotationsFields copies the desired object's annotations, populates any +// relevant fields, and sets the latest object's annotations to this newly populated map. +// Fields that are handled by custom modify implementation are not set here. +// This should only be called upon a successful create or modify call. +func (rm *resourceManager) setAnnotationsFields( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + + rm.setLastRequestedLogDeliveryConfigurations(r, annotations) + rm.setLastRequestedCacheNodeType(r, annotations) + ko.ObjectMeta.Annotations = annotations +} + +// getAnnotationsFields return the annotations map that would be used to set the fields +func getAnnotationsFields( + r *resource, + ko *svcapitypes.ReplicationGroup) map[string]string { + + if ko.ObjectMeta.Annotations != nil { + return ko.ObjectMeta.Annotations + } + + desiredAnnotations := r.ko.ObjectMeta.GetAnnotations() + annotations := make(map[string]string) + for k, v := range desiredAnnotations { + annotations[k] = v + } + + ko.ObjectMeta.Annotations = annotations + return annotations +} + +// setLastRequestedLogDeliveryConfigurations copies desired.Spec.LogDeliveryConfigurations +// into the annotations of the object. +// r is the desired resource, and annotations is the annotations map modified by this method +func (rm *resourceManager) setLastRequestedLogDeliveryConfigurations( + r *resource, + annotations map[string]string, +) { + lastRequestedConfigs, err := json.Marshal(r.ko.Spec.LogDeliveryConfigurations) + if err != nil { + annotations[AnnotationLastRequestedLDCs] = "null" + } else { + annotations[AnnotationLastRequestedLDCs] = string(lastRequestedConfigs) + } +} + +// setLastRequestedCacheNodeType copies desired.Spec.CacheNodeType into the annotation +// of the object. +func (rm *resourceManager) setLastRequestedCacheNodeType( + r *resource, + annotations map[string]string, +) { + if r.ko.Spec.CacheNodeType != nil { + annotations[AnnotationLastRequestedCNT] = *r.ko.Spec.CacheNodeType + } +} + +// setLastRequestedNodeGroupConfiguration copies desired.spec.NodeGroupConfiguration into the +// annotation of the object +func (rm *resourceManager) setLastRequestedNodeGroupConfiguration( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + lastRequestedConfigs, err := json.Marshal(r.ko.Spec.NodeGroupConfiguration) + if err != nil { + annotations[AnnotationLastRequestedNGC] = "null" + } else { + annotations[AnnotationLastRequestedNGC] = string(lastRequestedConfigs) + } +} + +// setLastRequestedNumNodeGroups copies desired.spec.NumNodeGroups into the +// annotation of the object +func (rm *resourceManager) setLastRequestedNumNodeGroups( + r *resource, + ko *svcapitypes.ReplicationGroup, +) { + annotations := getAnnotationsFields(r, ko) + if r.ko.Spec.NumNodeGroups != nil { + annotations[AnnotationLastRequestedNNG] = strconv.Itoa(int(*r.ko.Spec.NumNodeGroups)) + } else { + annotations[AnnotationLastRequestedNNG] = "null" + } +} + +// Implements specialized logic for replication group updates. +func (rm *resourceManager) CustomModifyReplicationGroup( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + + latestRGStatus := latest.ko.Status.Status + + allNodeGroupsAvailable := true + nodeGroupMembersCount := 0 + if latest.ko.Status.NodeGroups != nil { + for _, nodeGroup := range latest.ko.Status.NodeGroups { + if nodeGroup.Status == nil || *nodeGroup.Status != "available" { + allNodeGroupsAvailable = false + break + } + } + for _, nodeGroup := range latest.ko.Status.NodeGroups { + if nodeGroup.NodeGroupMembers == nil { + continue + } + nodeGroupMembersCount = nodeGroupMembersCount + len(nodeGroup.NodeGroupMembers) + } + } + + if latestRGStatus == nil || *latestRGStatus != "available" || !allNodeGroupsAvailable { + return nil, requeue.NeededAfter( + errors.New("Replication Group can not be modified, it is not in 'available' state."), + requeue.DefaultRequeueAfterDuration) + } + + memberClustersCount := 0 + if latest.ko.Status.MemberClusters != nil { + memberClustersCount = len(latest.ko.Status.MemberClusters) + } + if memberClustersCount != nodeGroupMembersCount { + return nil, requeue.NeededAfter( + errors.New("Replication Group can not be modified, "+ + "need to wait for member clusters and node group members."), + requeue.DefaultRequeueAfterDuration) + } + + // Handle the asynchronous rollback case for while Scaling down. + // This means that we have already attempted to apply the CacheNodeType once and + // were not successful hence we will set a terminal condition. + if !cacheNodeTypeRequiresUpdate(desired) && delta.DifferentAt("Spec.CacheNodeType") { + return nil, awserr.New("InvalidParameterCombination", "Cannot update CacheNodeType, "+ + "Please refer to Events for more details", nil) + + } + + // Handle the asynchronous rollback for Resharding. + if !nodeGroupRequiresUpdate(desired) && rm.shardConfigurationsDiffer(desired, latest) { + + return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroups, "+ + "Please refer to Events for more details", nil) + } + + // Handle NodeGroupConfiguration asynchronous rollback situations other than Resharding. + if !nodeGroupRequiresUpdate(desired) && (rm.replicaCountDifference(desired, latest) != 0 && !delta.DifferentAt("Spec.ReplicasPerNodeGroup")) { + return nil, awserr.New("InvalidParameterCombination", "Cannot update NodeGroupConfiguration, "+ + "Please refer to Events for more details", nil) + } + + // Order of operations when diffs map to multiple updates APIs: + // 1. When automaticFailoverEnabled differs: + // if automaticFailoverEnabled == false; do nothing in this custom logic, let the modify execute first. + // else if automaticFailoverEnabled == true then following logic should execute first. + // 2. When multiAZ differs + // if multiAZ = true then below is fine. + // else if multiAZ = false ; do nothing in custom logic, let the modify execute. + // 3. updateReplicaCount() is invoked Before updateShardConfiguration() + // because both accept availability zones, however the number of + // values depend on replica count. + if desired.ko.Spec.AutomaticFailoverEnabled != nil && *desired.ko.Spec.AutomaticFailoverEnabled == false { + latestAutomaticFailoverEnabled := latest.ko.Status.AutomaticFailover != nil && *latest.ko.Status.AutomaticFailover == "enabled" + if latestAutomaticFailoverEnabled != *desired.ko.Spec.AutomaticFailoverEnabled { + return rm.modifyReplicationGroup(ctx, desired, latest, delta) + } + } + if desired.ko.Spec.MultiAZEnabled != nil && *desired.ko.Spec.MultiAZEnabled == false { + latestMultiAZEnabled := latest.ko.Status.MultiAZ != nil && *latest.ko.Status.MultiAZ == "enabled" + if latestMultiAZEnabled != *desired.ko.Spec.MultiAZEnabled { + return rm.modifyReplicationGroup(ctx, desired, latest, delta) + } + } + + // increase/decrease replica count + if diff := rm.replicaCountDifference(desired, latest); diff != 0 { + if diff > 0 { + return rm.increaseReplicaCount(ctx, desired, latest) + } + return rm.decreaseReplicaCount(ctx, desired, latest) + } + + // If there is a scale up modification, then we would prioritize it + // over increase/decrease shards. This is important since performing + // scale in without scale up might fail due to insufficient memory. + if delta.DifferentAt("Spec.CacheNodeType") && desired.ko.Status.AllowedScaleUpModifications != nil { + if desired.ko.Spec.CacheNodeType != nil { + for _, scaleUpInstance := range desired.ko.Status.AllowedScaleUpModifications { + if *scaleUpInstance == *desired.ko.Spec.CacheNodeType { + return nil, nil + } + } + } + } + + // increase/decrease shards + if rm.shardConfigurationsDiffer(desired, latest) { + return rm.updateShardConfiguration(ctx, desired, latest) + } + + return rm.modifyReplicationGroup(ctx, desired, latest, delta) +} + +// modifyReplicationGroup updates replication group +// it handles properties that put replication group in +// modifying state if these are supplied to modify API +// irrespective of apply immediately. +func (rm *resourceManager) modifyReplicationGroup( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + // Method currently handles SecurityGroupIDs, EngineVersion + // Avoid making unnecessary DescribeCacheCluster API call if both fields are nil in spec. + if desired.ko.Spec.SecurityGroupIDs == nil && desired.ko.Spec.EngineVersion == nil { + // no updates done + return nil, nil + } + + // Get details using describe cache cluster to compute diff + latestCacheCluster, err := rm.describeCacheCluster(ctx, latest) + if err != nil { + return nil, err + } + + // SecurityGroupIds, EngineVersion + if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) || + delta.DifferentAt("Spec.EngineVersion") || delta.DifferentAt("Spec.Engine") || delta.DifferentAt("Spec.CacheParameterGroupName") { + input := rm.newModifyReplicationGroupRequestPayload(desired, latest, latestCacheCluster, delta) + resp, respErr := rm.sdkapi.ModifyReplicationGroup(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroup", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during ModifyReplicationGroup", "error", respErr) + return nil, respErr + } + + // The ModifyReplicationGroup API returns stale field Engine that don't + // immediately reflect the requested changes, causing the controller to detect false + // differences and trigger terminal conditions. Override these fields with the user's + // intended values before passing to the generated setReplicationGroupOutput function. + normalizedRG := *resp.ReplicationGroup + if desired.ko.Spec.Engine != nil { + normalizedRG.Engine = desired.ko.Spec.Engine + } + + return rm.setReplicationGroupOutput(ctx, desired, &normalizedRG) + } + + // no updates done + return nil, nil +} + +// replicaConfigurationsDifference returns +// positive number if desired replica count is greater than latest replica count +// negative number if desired replica count is less than latest replica count +// 0 otherwise +func (rm *resourceManager) replicaCountDifference( + desired *resource, + latest *resource, +) int { + desiredSpec := desired.ko.Spec + + // There are two ways of setting replica counts for NodeGroups in Elasticache ReplicationGroup. + // - The first way is to have the same replica count for all node groups. + // In this case, the Spec.ReplicasPerNodeGroup field is set to a non-nil-value integer pointer. + // - The second way is to set different replica counts per node group. + // In this case, the Spec.NodeGroupConfiguration field is set to a non-nil NodeGroupConfiguration slice + // of NodeGroupConfiguration structs that each have a ReplicaCount non-nil-value integer pointer field + // that contains the number of replicas for that particular node group. + if desiredSpec.ReplicasPerNodeGroup != nil { + return int(*desiredSpec.ReplicasPerNodeGroup - *latest.ko.Spec.ReplicasPerNodeGroup) + } else if desiredSpec.NodeGroupConfiguration != nil { + return rm.diffReplicasNodeGroupConfiguration(desired, latest) + } + return 0 +} + +// diffReplicasNodeGroupConfiguration takes desired Spec.NodeGroupConfiguration slice field into account to return +// positive number if desired replica count is greater than latest replica count +// negative number if desired replica count is less than latest replica count +// 0 otherwise +func (rm *resourceManager) diffReplicasNodeGroupConfiguration( + desired *resource, + latest *resource, +) int { + desiredSpec := desired.ko.Spec + latestStatus := latest.ko.Status + // each shard could have different value for replica count + latestReplicaCounts := map[string]int{} + for _, latestShard := range latestStatus.NodeGroups { + if latestShard.NodeGroupID == nil { + continue + } + latestReplicaCount := 0 + if latestShard.NodeGroupMembers != nil { + if len(latestShard.NodeGroupMembers) > 0 { + latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 + } + } + latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount + } + for _, desiredShard := range desiredSpec.NodeGroupConfiguration { + if desiredShard.NodeGroupID == nil || desiredShard.ReplicaCount == nil { + // no specs to compare for this shard + continue + } + latestShardReplicaCount, found := latestReplicaCounts[*desiredShard.NodeGroupID] + if !found { + // shard not present in status + continue + } + if desiredShardReplicaCount := int(*desiredShard.ReplicaCount); desiredShardReplicaCount != latestShardReplicaCount { + rm.log.V(1).Info( + "ReplicaCount differs", + "NodeGroup", *desiredShard.NodeGroupID, + "desired", int(*desiredShard.ReplicaCount), + "latest", latestShardReplicaCount, + ) + return desiredShardReplicaCount - latestShardReplicaCount + } + } + return 0 +} + +// shardConfigurationsDiffer returns true if shard +// configuration differs between desired, latest resource. +func (rm *resourceManager) shardConfigurationsDiffer( + desired *resource, + latest *resource, +) bool { + desiredSpec := desired.ko.Spec + latestStatus := latest.ko.Status + + // desired shards + var desiredShardsCount *int64 = desiredSpec.NumNodeGroups + if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { + numShards := int64(len(desiredSpec.NodeGroupConfiguration)) + desiredShardsCount = &numShards + } + if desiredShardsCount == nil { + // no shards config in desired specs + return false + } + + // latest shards + var latestShardsCount *int64 = nil + if latestStatus.NodeGroups != nil { + numShards := int64(len(latestStatus.NodeGroups)) + latestShardsCount = &numShards + } + + return latestShardsCount == nil || *desiredShardsCount != *latestShardsCount +} + +func (rm *resourceManager) increaseReplicaCount( + ctx context.Context, + desired *resource, + latest *resource, +) (*resource, error) { + input, err := rm.newIncreaseReplicaCountRequestPayload(desired, latest) + if err != nil { + return nil, err + } + resp, respErr := rm.sdkapi.IncreaseReplicaCount(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "IncreaseReplicaCount", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during IncreaseReplicaCount", "error", respErr) + return nil, respErr + } + return rm.setReplicationGroupOutput(ctx, desired, resp.ReplicationGroup) +} + +func (rm *resourceManager) decreaseReplicaCount( + ctx context.Context, + desired *resource, + latest *resource, +) (*resource, error) { + input, err := rm.newDecreaseReplicaCountRequestPayload(desired, latest) + if err != nil { + return nil, err + } + resp, respErr := rm.sdkapi.DecreaseReplicaCount(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "DecreaseReplicaCount", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during DecreaseReplicaCount", "error", respErr) + return nil, respErr + } + return rm.setReplicationGroupOutput(ctx, desired, resp.ReplicationGroup) +} + +func (rm *resourceManager) updateShardConfiguration( + ctx context.Context, + desired *resource, + latest *resource, +) (*resource, error) { + input, err := rm.newUpdateShardConfigurationRequestPayload(desired, latest) + if err != nil { + return nil, err + } + resp, respErr := rm.sdkapi.ModifyReplicationGroupShardConfiguration(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroupShardConfiguration", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during ModifyReplicationGroupShardConfiguration", "error", respErr) + return nil, respErr + } + + r, err := rm.setReplicationGroupOutput(ctx, desired, resp.ReplicationGroup) + + if err != nil { + return r, err + } + + ko := r.ko.DeepCopy() + // Update the annotations since API call was successful + rm.setLastRequestedNodeGroupConfiguration(desired, ko) + rm.setLastRequestedNumNodeGroups(desired, ko) + return &resource{ko}, nil +} + +// newIncreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newIncreaseReplicaCountRequestPayload( + desired *resource, + latest *resource, +) (*svcsdk.IncreaseReplicaCountInput, error) { + res := &svcsdk.IncreaseReplicaCountInput{} + desiredSpec := desired.ko.Spec + + res.ApplyImmediately = aws.Bool(true) + if desiredSpec.ReplicationGroupID != nil { + res.ReplicationGroupId = desiredSpec.ReplicationGroupID + } + if desiredSpec.ReplicasPerNodeGroup != nil { + res.NewReplicaCount = Int32OrNil(desiredSpec.ReplicasPerNodeGroup) + } + + latestStatus := latest.ko.Status + // each shard could have different value for replica count + latestReplicaCounts := map[string]int{} + for _, latestShard := range latestStatus.NodeGroups { + if latestShard.NodeGroupID == nil { + continue + } + latestReplicaCount := 0 + if latestShard.NodeGroupMembers != nil { + if len(latestShard.NodeGroupMembers) > 0 { + latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 + } + } + latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount + } + + if desiredSpec.NodeGroupConfiguration != nil { + shardsConfig := []*svcsdktypes.ConfigureShard{} + for _, desiredShard := range desiredSpec.NodeGroupConfiguration { + if desiredShard.NodeGroupID == nil { + continue + } + _, found := latestReplicaCounts[*desiredShard.NodeGroupID] + if !found { + continue + } + // shard has an Id and it is present on server. + shardConfig := &svcsdktypes.ConfigureShard{} + shardConfig.NodeGroupId = desiredShard.NodeGroupID + if desiredShard.ReplicaCount != nil { + shardConfig.NewReplicaCount = Int32OrNil(desiredShard.ReplicaCount) + } + shardAZs := []*string{} + if desiredShard.PrimaryAvailabilityZone != nil { + shardAZs = append(shardAZs, desiredShard.PrimaryAvailabilityZone) + } + if desiredShard.ReplicaAvailabilityZones != nil { + for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { + shardAZs = append(shardAZs, desiredAZ) + } + } + if len(shardAZs) > 0 { + stringAZs := make([]string, len(shardAZs)) + for i, az := range shardAZs { + if az != nil { + stringAZs[i] = *az + } + } + shardConfig.PreferredAvailabilityZones = stringAZs + } + shardsConfig = append(shardsConfig, shardConfig) + } + + // Convert []*ConfigureShard to []ConfigureShard + configShards := make([]svcsdktypes.ConfigureShard, len(shardsConfig)) + for i, config := range shardsConfig { + if config != nil { + configShards[i] = *config + } + } + res.ReplicaConfiguration = configShards + } + + return res, nil +} + +// newDecreaseReplicaCountRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newDecreaseReplicaCountRequestPayload( + desired *resource, + latest *resource, +) (*svcsdk.DecreaseReplicaCountInput, error) { + res := &svcsdk.DecreaseReplicaCountInput{} + desiredSpec := desired.ko.Spec + + res.ApplyImmediately = aws.Bool(true) + if desiredSpec.ReplicationGroupID != nil { + res.ReplicationGroupId = desiredSpec.ReplicationGroupID + } + if desiredSpec.ReplicasPerNodeGroup != nil { + res.NewReplicaCount = Int32OrNil(desiredSpec.ReplicasPerNodeGroup) + } + + latestStatus := latest.ko.Status + // each shard could have different value for replica count + latestReplicaCounts := map[string]int{} + for _, latestShard := range latestStatus.NodeGroups { + if latestShard.NodeGroupID == nil { + continue + } + latestReplicaCount := 0 + if latestShard.NodeGroupMembers != nil { + if len(latestShard.NodeGroupMembers) > 0 { + latestReplicaCount = len(latestShard.NodeGroupMembers) - 1 + } + } + latestReplicaCounts[*latestShard.NodeGroupID] = latestReplicaCount + } + + if desiredSpec.NodeGroupConfiguration != nil { + configShards := make([]svcsdktypes.ConfigureShard, len(desiredSpec.NodeGroupConfiguration)) + for i, config := range desiredSpec.NodeGroupConfiguration { + stringAZs := make([]string, len(config.ReplicaAvailabilityZones)) + for i, az := range config.ReplicaAvailabilityZones { + stringAZs[i] = *az + } + configShards[i] = svcsdktypes.ConfigureShard{ + NodeGroupId: config.NodeGroupID, + NewReplicaCount: Int32OrNil(config.ReplicaCount), + PreferredAvailabilityZones: stringAZs, + } + } + res.ReplicaConfiguration = configShards + } + + return res, nil +} + +// newUpdateShardConfigurationRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Update API call for the resource +func (rm *resourceManager) newUpdateShardConfigurationRequestPayload( + desired *resource, + latest *resource, +) (*svcsdk.ModifyReplicationGroupShardConfigurationInput, error) { + res := &svcsdk.ModifyReplicationGroupShardConfigurationInput{} + + desiredSpec := desired.ko.Spec + latestStatus := latest.ko.Status + + // Mandatory arguments + // - ApplyImmediately + // - ReplicationGroupId + // - NodeGroupCount + res.ApplyImmediately = aws.Bool(true) + if desiredSpec.ReplicationGroupID != nil { + res.ReplicationGroupId = desiredSpec.ReplicationGroupID + } + desiredShardsCount := desiredSpec.NumNodeGroups + if desiredShardsCount == nil && desiredSpec.NodeGroupConfiguration != nil { + numShards := int64(len(desiredSpec.NodeGroupConfiguration)) + desiredShardsCount = &numShards + } + if desiredShardsCount != nil { + res.NodeGroupCount = Int32OrNil(desiredShardsCount) + } + + // If desired nodegroup count (number of shards): + // - increases, then (optional) provide ReshardingConfiguration + // - decreases, then (mandatory) provide + // either NodeGroupsToRemove + // or NodeGroupsToRetain + var latestShardsCount *int64 = nil + if latestStatus.NodeGroups != nil { + numShards := int64(len(latestStatus.NodeGroups)) + latestShardsCount = &numShards + } + + increase := (desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount > *latestShardsCount) || + (desiredShardsCount != nil && latestShardsCount == nil) + decrease := desiredShardsCount != nil && latestShardsCount != nil && *desiredShardsCount < *latestShardsCount + // Additional arguments + shardsConfig := []*svcsdktypes.ReshardingConfiguration{} + shardsToRetain := []string{} + if desiredSpec.NodeGroupConfiguration != nil { + for _, desiredShard := range desiredSpec.NodeGroupConfiguration { + shardConfig := &svcsdktypes.ReshardingConfiguration{} + if desiredShard.NodeGroupID != nil { + shardConfig.NodeGroupId = desiredShard.NodeGroupID + shardsToRetain = append(shardsToRetain, *desiredShard.NodeGroupID) + } + shardAZs := []string{} + if desiredShard.PrimaryAvailabilityZone != nil { + shardAZs = append(shardAZs, *desiredShard.PrimaryAvailabilityZone) + } + if desiredShard.ReplicaAvailabilityZones != nil { + for _, desiredAZ := range desiredShard.ReplicaAvailabilityZones { + shardAZs = append(shardAZs, *desiredAZ) + } + shardConfig.PreferredAvailabilityZones = shardAZs + } + shardsConfig = append(shardsConfig, shardConfig) + } + } else if decrease { + for i := 0; i < int(*desiredShardsCount); i++ { + if desired.ko.Status.NodeGroups[i] != nil && desired.ko.Status.NodeGroups[i].NodeGroupID != nil { + shardsToRetain = append(shardsToRetain, *desired.ko.Status.NodeGroups[i].NodeGroupID) + } + } + } + + if increase { + if len(shardsConfig) > 0 { + reshardConfig := make([]svcsdktypes.ReshardingConfiguration, len(shardsConfig)) + for i, config := range shardsConfig { + reshardConfig[i] = *config + } + res.ReshardingConfiguration = reshardConfig + } + } else if decrease { + if len(shardsToRetain) == 0 { + return nil, awserr.New("InvalidParameterValue", "At least one node group should be present.", nil) + } + res.NodeGroupsToRetain = shardsToRetain + } + + return res, nil +} + +// getAnyCacheClusterIDFromNodeGroups returns a cache cluster ID from supplied node groups. +// Any cache cluster Id which is not nil is returned. +func (rm *resourceManager) getAnyCacheClusterIDFromNodeGroups( + nodeGroups []*svcapitypes.NodeGroup, +) *string { + if nodeGroups == nil { + return nil + } + + var cacheClusterId *string = nil + for _, nodeGroup := range nodeGroups { + if nodeGroup.NodeGroupMembers == nil { + continue + } + for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { + if nodeGroupMember.CacheClusterID == nil { + continue + } + cacheClusterId = nodeGroupMember.CacheClusterID + break + } + if cacheClusterId != nil { + break + } + } + return cacheClusterId +} + +// describeCacheCluster provides CacheCluster object +// per the supplied latest Replication Group Id +// it invokes DescribeCacheClusters API to do so +func (rm *resourceManager) describeCacheCluster( + ctx context.Context, + resource *resource, +) (*svcsdktypes.CacheCluster, error) { + input := &svcsdk.DescribeCacheClustersInput{} + + ko := resource.ko + latestStatus := ko.Status + if latestStatus.NodeGroups == nil { + return nil, nil + } + cacheClusterId := rm.getAnyCacheClusterIDFromNodeGroups(latestStatus.NodeGroups) + if cacheClusterId == nil { + return nil, nil + } + + input.CacheClusterId = cacheClusterId + resp, respErr := rm.sdkapi.DescribeCacheClusters(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeCacheClusters", respErr) + if respErr != nil { + rm.log.V(1).Info("Error during DescribeCacheClusters", "error", respErr) + return nil, respErr + } + if resp.CacheClusters == nil { + return nil, nil + } + + for _, cc := range resp.CacheClusters { + if cc.CacheClusterId == nil { + continue + } + return &cc, nil + } + return nil, fmt.Errorf("could not find a non-nil cache cluster from API response") +} + +// securityGroupIdsDiffer return true if +// Security Group Ids differ between desired spec and latest (from cache cluster) status +func (rm *resourceManager) securityGroupIdsDiffer( + desired *resource, + latest *resource, + latestCacheCluster *svcsdktypes.CacheCluster, +) bool { + if desired.ko.Spec.SecurityGroupIDs == nil { + return false + } + + desiredIds := []*string{} + for _, id := range desired.ko.Spec.SecurityGroupIDs { + if id == nil { + continue + } + var value string + value = *id + desiredIds = append(desiredIds, &value) + } + sort.Slice(desiredIds, func(i, j int) bool { + return *desiredIds[i] < *desiredIds[j] + }) + + latestIds := []*string{} + if latestCacheCluster != nil && latestCacheCluster.SecurityGroups != nil { + for _, latestSG := range latestCacheCluster.SecurityGroups { + if latestSG.SecurityGroupId == nil { + continue + } + var value string + value = *latestSG.SecurityGroupId + latestIds = append(latestIds, &value) + } + } + sort.Slice(latestIds, func(i, j int) bool { + return *latestIds[i] < *latestIds[j] + }) + + if len(desiredIds) != len(latestIds) { + return true // differ + } + for index, desiredId := range desiredIds { + if *desiredId != *latestIds[index] { + return true // differ + } + } + // no difference + return false +} + +// newModifyReplicationGroupRequestPayload provides request input object +func (rm *resourceManager) newModifyReplicationGroupRequestPayload( + desired *resource, + latest *resource, + latestCacheCluster *svcsdktypes.CacheCluster, + delta *ackcompare.Delta, +) *svcsdk.ModifyReplicationGroupInput { + input := &svcsdk.ModifyReplicationGroupInput{} + + input.ApplyImmediately = aws.Bool(true) + if desired.ko.Spec.ReplicationGroupID != nil { + input.ReplicationGroupId = desired.ko.Spec.ReplicationGroupID + } + + if rm.securityGroupIdsDiffer(desired, latest, latestCacheCluster) && + desired.ko.Spec.SecurityGroupIDs != nil { + ids := []string{} + for _, id := range desired.ko.Spec.SecurityGroupIDs { + var value string + value = *id + ids = append(ids, value) + } + input.SecurityGroupIds = ids + } + + if delta.DifferentAt("Spec.EngineVersion") && + desired.ko.Spec.EngineVersion != nil { + input.EngineVersion = desired.ko.Spec.EngineVersion + } + + if delta.DifferentAt("Spec.Engine") && + desired.ko.Spec.Engine != nil { + input.Engine = desired.ko.Spec.Engine + } + + if delta.DifferentAt("Spec.CacheParameterGroupName") && + desired.ko.Spec.CacheParameterGroupName != nil { + input.CacheParameterGroupName = desired.ko.Spec.CacheParameterGroupName + } + + return input +} + +// cacheNodeTypeRequiresUpdate retrieves the last requested cacheNodeType saved in annotations and compares them +// to the current desired cacheNodeType +func cacheNodeTypeRequiresUpdate(desired *resource) bool { + annotations := desired.ko.ObjectMeta.GetAnnotations() + if val, ok := annotations[AnnotationLastRequestedCNT]; ok && desired.ko.Spec.CacheNodeType != nil { + return val != *desired.ko.Spec.CacheNodeType + } + + // This means there is delta and no value in annotation or in Spec + return true +} + +// nodeGroupRequiresUpdate retrieves the last applied NumNodeGroups and NodeGroupConfiguration and compares them +// to the current desired NumNodeGroups and NodeGroupConfiguration +func nodeGroupRequiresUpdate(desired *resource) bool { + annotations := desired.ko.ObjectMeta.GetAnnotations() + + if val, ok := annotations[AnnotationLastRequestedNNG]; ok && val != "null" { + numNodes, err := strconv.ParseInt(val, 10, 64) + + if err != nil { + return false + } + + if numNodes != *desired.ko.Spec.NumNodeGroups { + return true + } + + return false + } + + desiredNodeGroupConfig := desired.ko.Spec.NodeGroupConfiguration + if val, ok := annotations[AnnotationLastRequestedNGC]; ok && val != "null" { + var lastRequestedNodeGroupConfig []*svcapitypes.NodeGroupConfiguration + _ = json.Unmarshal([]byte(val), &lastRequestedNodeGroupConfig) + return !reflect.DeepEqual(desiredNodeGroupConfig, lastRequestedNodeGroupConfig) + } + + // This means there is delta and no value in annotation or in Spec + return true +} + +/* +To be called in sdkFind, this function updates the replication group's Spec fields with the latest observed state +This requires extra processing of the API response as well as additional API calls, and is necessary because +sdkFind does not update many of these Spec fields by default. "resource" is a wrapper around "ko", the object +which will eventually be returned as "latest". +*/ +func (rm *resourceManager) updateSpecFields( + ctx context.Context, + respRG svcsdktypes.ReplicationGroup, + resource *resource, +) { + if isDeleting(resource) { + return + } + // populate relevant ko.Spec fields with observed state of respRG.NodeGroups + setReplicasPerNodeGroup(respRG, resource) + setNodeGroupConfiguration(respRG, resource) + + // updating some Spec fields requires a DescribeCacheClusters call + latestCacheCluster, err := rm.describeCacheCluster(ctx, resource) + if err == nil && latestCacheCluster != nil { + setEngineVersion(latestCacheCluster, resource) + setMaintenanceWindow(latestCacheCluster, resource) + setCacheParameterGroup(latestCacheCluster, resource) + } +} + +// if NodeGroupConfiguration was given in the desired.Spec, update ko.Spec with the latest observed value +func setNodeGroupConfiguration( + respRG svcsdktypes.ReplicationGroup, + resource *resource, +) { + ko := resource.ko + if respRG.NodeGroups != nil && ko.Spec.NodeGroupConfiguration != nil { + nodeGroupConfigurations := []*svcapitypes.NodeGroupConfiguration{} + for _, nodeGroup := range respRG.NodeGroups { + nodeGroupConfiguration := &svcapitypes.NodeGroupConfiguration{} + + if nodeGroup.NodeGroupId != nil { + nodeGroupConfiguration.NodeGroupID = nodeGroup.NodeGroupId + } + replicaAZs := []*string{} + + for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { + if nodeGroupMember.CurrentRole != nil && *nodeGroupMember.CurrentRole == "primary" { + nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone + } + + // In this case we cannot say what is primary AZ and replica AZ. + if nodeGroupMember.CurrentRole == nil && nodeGroupConfiguration.PrimaryAvailabilityZone == nil { + // We cannot determine the correct AZ so we would use the first node group member as primary + nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone + } + + if nodeGroupConfiguration.PrimaryAvailabilityZone != nil || *nodeGroupMember.CurrentRole == "replica" { + replicaAZs = append(replicaAZs, nodeGroupMember.PreferredAvailabilityZone) + } + } + + if len(replicaAZs) > 0 { + nodeGroupConfiguration.ReplicaAvailabilityZones = replicaAZs + } + + replicaCount := int64(len(replicaAZs)) + nodeGroupConfiguration.ReplicaCount = &replicaCount + } + + ko.Spec.NodeGroupConfiguration = nodeGroupConfigurations + } + + if respRG.NodeGroups != nil && ko.Spec.NumNodeGroups != nil { + *ko.Spec.NumNodeGroups = int64(len(respRG.NodeGroups)) + } +} + +//TODO: for all the fields here, reevaluate if the latest observed state should always be populated, +// even if the corresponding field was not specified in desired + +// if ReplicasPerNodeGroup was given in desired.Spec, update ko.Spec with the latest observed value +func setReplicasPerNodeGroup( + respRG svcsdktypes.ReplicationGroup, + resource *resource, +) { + ko := resource.ko + if respRG.NodeGroups != nil && ko.Spec.ReplicasPerNodeGroup != nil { + // if ReplicasPerNodeGroup is specified, all node groups should have the same # replicas so use the first + nodeGroup := respRG.NodeGroups[0] + if nodeGroup.NodeGroupMembers != nil { + if len(nodeGroup.NodeGroupMembers) > 0 { + *ko.Spec.ReplicasPerNodeGroup = int64(len(nodeGroup.NodeGroupMembers) - 1) + } + } + } +} + +// if EngineVersion was specified in desired.Spec, update ko.Spec with the latest observed value (if non-nil) +func setEngineVersion( + latestCacheCluster *svcsdktypes.CacheCluster, + resource *resource, +) { + ko := resource.ko + if ko.Spec.EngineVersion != nil && latestCacheCluster.EngineVersion != nil { + *ko.Spec.EngineVersion = *latestCacheCluster.EngineVersion + } +} + +// update maintenance window (if non-nil in API response) regardless of whether it was specified in desired +func setMaintenanceWindow( + latestCacheCluster *svcsdktypes.CacheCluster, + resource *resource, +) { + ko := resource.ko + if latestCacheCluster.PreferredMaintenanceWindow != nil { + pmw := *latestCacheCluster.PreferredMaintenanceWindow + ko.Spec.PreferredMaintenanceWindow = &pmw + } +} + +// setCacheParameterGroup updates the cache parameter group associated with the replication group +// +// (if non-nil in API response) regardless of whether it was specified in desired +func setCacheParameterGroup( + latestCacheCluster *svcsdktypes.CacheCluster, + resource *resource, +) { + ko := resource.ko + if latestCacheCluster.CacheParameterGroup != nil && latestCacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { + cpgName := *latestCacheCluster.CacheParameterGroup.CacheParameterGroupName + ko.Spec.CacheParameterGroupName = &cpgName + } +} + +// modifyDelta removes non-meaningful differences from the delta and adds additional differences if necessary +func modifyDelta( + delta *ackcompare.Delta, + desired *resource, + latest *resource, +) { + + if delta.DifferentAt("Spec.EngineVersion") { + if desired.ko.Spec.EngineVersion != nil && latest.ko.Spec.EngineVersion != nil { + if util.EngineVersionsMatch(*desired.ko.Spec.EngineVersion, *latest.ko.Spec.EngineVersion) { + common.RemoveFromDelta(delta, "Spec.EngineVersion") + } + } + // TODO: handle the case of a nil difference (especially when desired EV is nil) + } + + // if server has given PreferredMaintenanceWindow a default value, no action needs to be taken + if delta.DifferentAt("Spec.PreferredMaintenanceWindow") { + if desired.ko.Spec.PreferredMaintenanceWindow == nil && latest.ko.Spec.PreferredMaintenanceWindow != nil { + common.RemoveFromDelta(delta, "Spec.PreferredMaintenanceWindow") + } + } + + // note that the comparison is actually done between desired.Spec.LogDeliveryConfigurations and + // the last requested configurations saved in annotations (as opposed to latest.Spec.LogDeliveryConfigurations) + if logDeliveryRequiresUpdate(desired) { + delta.Add("Spec.LogDeliveryConfigurations", desired.ko.Spec.LogDeliveryConfigurations, + unmarshalLastRequestedLDCs(desired)) + } + + if multiAZRequiresUpdate(desired, latest) { + delta.Add("Spec.MultiAZEnabled", desired.ko.Spec.MultiAZEnabled, latest.ko.Status.MultiAZ) + } + + if autoFailoverRequiresUpdate(desired, latest) { + delta.Add("Spec.AutomaticFailoverEnabled", desired.ko.Spec.AutomaticFailoverEnabled, + latest.ko.Status.AutomaticFailover) + } + + if updateRequired, current := primaryClusterIDRequiresUpdate(desired, latest); updateRequired { + delta.Add("Spec.PrimaryClusterID", desired.ko.Spec.PrimaryClusterID, *current) + } +} + +// logDeliveryRequiresUpdate retrieves the last requested configurations saved in annotations and compares them +// to the current desired configurations +func logDeliveryRequiresUpdate(desired *resource) bool { + desiredConfigs := desired.ko.Spec.LogDeliveryConfigurations + lastRequestedConfigs := unmarshalLastRequestedLDCs(desired) + return !reflect.DeepEqual(desiredConfigs, lastRequestedConfigs) +} + +// unmarshal the value found in annotations for the LogDeliveryConfigurations field requested in the last +// successful create or modify call +func unmarshalLastRequestedLDCs(desired *resource) []*svcapitypes.LogDeliveryConfigurationRequest { + var lastRequestedConfigs []*svcapitypes.LogDeliveryConfigurationRequest + + annotations := desired.ko.ObjectMeta.GetAnnotations() + if val, ok := annotations[AnnotationLastRequestedLDCs]; ok { + _ = json.Unmarshal([]byte(val), &lastRequestedConfigs) + } + + return lastRequestedConfigs +} + +// multiAZRequiresUpdate returns true if the latest multi AZ status does not yet match the +// desired state, and false otherwise +func multiAZRequiresUpdate(desired *resource, latest *resource) bool { + // no preference for multi AZ specified; no update required + if desired.ko.Spec.MultiAZEnabled == nil { + return false + } + + // API should return a non-nil value, but if it doesn't then attempt to update + if latest.ko.Status.MultiAZ == nil { + return true + } + + // true maps to "enabled"; false maps to "disabled" + // this accounts for values such as "enabling" and "disabling" + if *desired.ko.Spec.MultiAZEnabled { + return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_enabled) + } else { + return *latest.ko.Status.MultiAZ != string(svcapitypes.MultiAZStatus_disabled) + } +} + +// autoFailoverRequiresUpdate returns true if the latest auto failover status does not yet match the +// desired state, and false otherwise +func autoFailoverRequiresUpdate(desired *resource, latest *resource) bool { + // the logic is exactly analogous to multiAZRequiresUpdate above + if desired.ko.Spec.AutomaticFailoverEnabled == nil { + return false + } + + if latest.ko.Status.AutomaticFailover == nil { + return true + } + + if *desired.ko.Spec.AutomaticFailoverEnabled { + return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_enabled) + } else { + return *latest.ko.Status.AutomaticFailover != string(svcapitypes.AutomaticFailoverStatus_disabled) + } +} + +// primaryClusterIDRequiresUpdate retrieves the current primary cluster ID and determines whether +// an update is required. If no desired state is specified or there is an issue retrieving the +// latest state, return false, nil. Otherwise, return false or true depending on equality of +// the latest and desired states, and a non-nil pointer to the latest value +func primaryClusterIDRequiresUpdate(desired *resource, latest *resource) (bool, *string) { + if desired.ko.Spec.PrimaryClusterID == nil { + return false, nil + } + + // primary cluster ID applies to cluster mode disabled only; if API returns multiple + // or no node groups, or the provided node group is nil, there is nothing that can be done + if len(latest.ko.Status.NodeGroups) != 1 || latest.ko.Status.NodeGroups[0] == nil { + return false, nil + } + + // attempt to find primary cluster in node group. If for some reason it is not present, we + // don't have a reliable latest state, so do nothing + ng := *latest.ko.Status.NodeGroups[0] + for _, member := range ng.NodeGroupMembers { + if member == nil { + continue + } + + if member.CurrentRole != nil && *member.CurrentRole == "primary" && member.CacheClusterID != nil { + val := *member.CacheClusterID + return val != *desired.ko.Spec.PrimaryClusterID, &val + } + } + + return false, nil +} + +func Int32OrNil(i *int64) *int32 { + if i == nil { + return nil + } + return aws.Int32(int32(*i)) } diff --git a/pkg/resource/replication_group/manager.go b/pkg/resource/replication_group/manager.go index 6f349203..da977e4b 100644 --- a/pkg/resource/replication_group/manager.go +++ b/pkg/resource/replication_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -103,6 +102,7 @@ func (rm *resourceManager) ReadOne( panic("resource manager's ReadOne() method received resource with nil CR object") } observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) if err != nil { if observed != nil { return rm.onError(observed, err) @@ -291,32 +291,76 @@ func (rm *resourceManager) EnsureTags( defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) tags := acktags.Merge(resourceTags, defaultTags) - r.ko.Spec.Tags = FromACKTags(tags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) return nil } +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/replication_group/manager_factory.go b/pkg/resource/replication_group/manager_factory.go index 279b8c73..2de9f614 100644 --- a/pkg/resource/replication_group/manager_factory.go +++ b/pkg/resource/replication_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/replication_group/manager_test_suite_test.go b/pkg/resource/replication_group/manager_test_suite_test.go deleted file mode 100644 index 823de2e2..00000000 --- a/pkg/resource/replication_group/manager_test_suite_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "errors" - "fmt" - mocksvcsdkapi "github.com/aws-controllers-k8s/elasticache-controller/mocks/aws-sdk-go/elasticache" - "github.com/aws-controllers-k8s/elasticache-controller/pkg/testutil" - acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "path/filepath" - "testing" -) - -// TestDeclarativeTestSuite runs the test suite for replication group -func TestDeclarativeTestSuite(t *testing.T) { - var ts = testutil.TestSuite{} - testutil.LoadFromFixture(filepath.Join("testdata", "test_suite.yaml"), &ts) - var delegate = testRunnerDelegate{t: t} - var runner = testutil.TestSuiteRunner{TestSuite: &ts, Delegate: &delegate} - runner.RunTests() -} - -// testRunnerDelegate implements testutil.TestRunnerDelegate -type testRunnerDelegate struct { - t *testing.T -} - -func (d *testRunnerDelegate) ResourceDescriptor() acktypes.AWSResourceDescriptor { - return &resourceDescriptor{} -} - -func (d *testRunnerDelegate) ResourceManager(mocksdkapi *mocksvcsdkapi.ElastiCacheAPI) acktypes.AWSResourceManager { - return provideResourceManagerWithMockSDKAPI(mocksdkapi) -} - -func (d *testRunnerDelegate) GoTestRunner() *testing.T { - return d.t -} - -func (d *testRunnerDelegate) EmptyServiceAPIOutput(apiName string) (interface{}, error) { - if apiName == "" { - return nil, errors.New("no API name specified") - } - //TODO: use reflection, template to auto generate this block/method. - switch apiName { - case "DescribeReplicationGroupsWithContext": - var output svcsdk.DescribeReplicationGroupsOutput - return &output, nil - case "ListAllowedNodeTypeModifications": - var output svcsdk.ListAllowedNodeTypeModificationsOutput - return &output, nil - case "DescribeEventsWithContext": - var output svcsdk.DescribeEventsOutput - return &output, nil - case "CreateReplicationGroupWithContext": - var output svcsdk.CreateReplicationGroupOutput - return &output, nil - case "DecreaseReplicaCountWithContext": - var output svcsdk.DecreaseReplicaCountOutput - return &output, nil - case "DeleteReplicationGroupWithContext": - var output svcsdk.DeleteReplicationGroupOutput - return &output, nil - case "DescribeCacheClustersWithContext": - var output svcsdk.DescribeCacheClustersOutput - return &output, nil - case "IncreaseReplicaCountWithContext": - var output svcsdk.IncreaseReplicaCountOutput - return &output, nil - case "ModifyReplicationGroupShardConfigurationWithContext": - var output svcsdk.ModifyReplicationGroupShardConfigurationOutput - return &output, nil - case "ModifyReplicationGroupWithContext": - var output svcsdk.ModifyReplicationGroupOutput - return &output, nil - } - return nil, errors.New(fmt.Sprintf("no matching API name found for: %s", apiName)) -} - -func (d *testRunnerDelegate) Equal(a acktypes.AWSResource, b acktypes.AWSResource) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - - var specMatch = false - if cmp.Equal(ac.ko.Spec, bc.ko.Spec, opts...) { - specMatch = true - } else { - fmt.Printf("Difference ko.Spec (-expected +actual):\n\n") - fmt.Println(cmp.Diff(ac.ko.Spec, bc.ko.Spec, opts...)) - specMatch = false - } - - var statusMatch = false - if cmp.Equal(ac.ko.Status, bc.ko.Status, opts...) { - statusMatch = true - } else { - fmt.Printf("Difference ko.Status (-expected +actual):\n\n") - fmt.Println(cmp.Diff(ac.ko.Status, bc.ko.Status, opts...)) - statusMatch = false - } - - return statusMatch && specMatch -} diff --git a/pkg/resource/replication_group/post_set_output.go b/pkg/resource/replication_group/post_set_output.go deleted file mode 100644 index 21ea6af7..00000000 --- a/pkg/resource/replication_group/post_set_output.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package replication_group - -import ( - "context" - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -/* -To be called in sdkFind, this function updates the replication group's Spec fields with the latest observed state -This requires extra processing of the API response as well as additional API calls, and is necessary because -sdkFind does not update many of these Spec fields by default. "resource" is a wrapper around "ko", the object -which will eventually be returned as "latest". -*/ -func (rm *resourceManager) updateSpecFields( - ctx context.Context, - respRG *svcsdk.ReplicationGroup, - resource *resource, -) { - if isDeleting(resource) { - return - } - // populate relevant ko.Spec fields with observed state of respRG.NodeGroups - setReplicasPerNodeGroup(respRG, resource) - setNodeGroupConfiguration(respRG, resource) - - // updating some Spec fields requires a DescribeCacheClusters call - latestCacheCluster, err := rm.describeCacheCluster(ctx, resource) - if err == nil && latestCacheCluster != nil { - setEngineVersion(latestCacheCluster, resource) - setMaintenanceWindow(latestCacheCluster, resource) - setCacheParameterGroup(latestCacheCluster, resource) - } -} - -// if NodeGroupConfiguration was given in the desired.Spec, update ko.Spec with the latest observed value -func setNodeGroupConfiguration( - respRG *svcsdk.ReplicationGroup, - resource *resource, -) { - ko := resource.ko - if respRG.NodeGroups != nil && ko.Spec.NodeGroupConfiguration != nil { - nodeGroupConfigurations := []*svcapitypes.NodeGroupConfiguration{} - for _, nodeGroup := range respRG.NodeGroups { - nodeGroupConfiguration := &svcapitypes.NodeGroupConfiguration{} - - if nodeGroup.NodeGroupId != nil { - nodeGroupConfiguration.NodeGroupID = nodeGroup.NodeGroupId - } - replicaAZs := []*string{} - - for _, nodeGroupMember := range nodeGroup.NodeGroupMembers { - if nodeGroupMember.CurrentRole != nil && *nodeGroupMember.CurrentRole == "primary" { - nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone - } - - // In this case we cannot say what is primary AZ and replica AZ. - if nodeGroupMember.CurrentRole == nil && nodeGroupConfiguration.PrimaryAvailabilityZone == nil { - // We cannot determine the correct AZ so we would use the first node group member as primary - nodeGroupConfiguration.PrimaryAvailabilityZone = nodeGroupMember.PreferredAvailabilityZone - } - - if nodeGroupConfiguration.PrimaryAvailabilityZone != nil || *nodeGroupMember.CurrentRole == "replica" { - replicaAZs = append(replicaAZs, nodeGroupMember.PreferredAvailabilityZone) - } - } - - if len(replicaAZs) > 0 { - nodeGroupConfiguration.ReplicaAvailabilityZones = replicaAZs - } - - replicaCount := int64(len(replicaAZs)) - nodeGroupConfiguration.ReplicaCount = &replicaCount - } - - ko.Spec.NodeGroupConfiguration = nodeGroupConfigurations - } -} - -//TODO: for all the fields here, reevaluate if the latest observed state should always be populated, -// even if the corresponding field was not specified in desired - -// if ReplicasPerNodeGroup was given in desired.Spec, update ko.Spec with the latest observed value -func setReplicasPerNodeGroup( - respRG *svcsdk.ReplicationGroup, - resource *resource, -) { - ko := resource.ko - if respRG.NodeGroups != nil && ko.Spec.ReplicasPerNodeGroup != nil { - // if ReplicasPerNodeGroup is specified, all node groups should have the same # replicas so use the first - nodeGroup := respRG.NodeGroups[0] - if nodeGroup != nil && nodeGroup.NodeGroupMembers != nil { - if len(nodeGroup.NodeGroupMembers) > 0 { - *ko.Spec.ReplicasPerNodeGroup = int64(len(nodeGroup.NodeGroupMembers) - 1) - } - } - } -} - -// if EngineVersion was specified in desired.Spec, update ko.Spec with the latest observed value (if non-nil) -func setEngineVersion( - latestCacheCluster *svcsdk.CacheCluster, - resource *resource, -) { - ko := resource.ko - if ko.Spec.EngineVersion != nil && latestCacheCluster.EngineVersion != nil { - *ko.Spec.EngineVersion = *latestCacheCluster.EngineVersion - } -} - -// update maintenance window (if non-nil in API response) regardless of whether it was specified in desired -func setMaintenanceWindow( - latestCacheCluster *svcsdk.CacheCluster, - resource *resource, -) { - ko := resource.ko - if latestCacheCluster.PreferredMaintenanceWindow != nil { - pmw := *latestCacheCluster.PreferredMaintenanceWindow - ko.Spec.PreferredMaintenanceWindow = &pmw - } -} - -// setCacheParameterGroup updates the cache parameter group associated with the replication group -// -// (if non-nil in API response) regardless of whether it was specified in desired -func setCacheParameterGroup( - latestCacheCluster *svcsdk.CacheCluster, - resource *resource, -) { - ko := resource.ko - if latestCacheCluster.CacheParameterGroup != nil && latestCacheCluster.CacheParameterGroup.CacheParameterGroupName != nil { - cpgName := *latestCacheCluster.CacheParameterGroup.CacheParameterGroupName - ko.Spec.CacheParameterGroupName = &cpgName - } -} diff --git a/pkg/resource/replication_group/references.go b/pkg/resource/replication_group/references.go index 2816d92e..8e631e7b 100644 --- a/pkg/resource/replication_group/references.go +++ b/pkg/resource/replication_group/references.go @@ -17,36 +17,350 @@ package replication_group import ( "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=securitygroups,verbs=get;list +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=securitygroups/status,verbs=get;list + +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + if ko.Spec.CacheParameterGroupRef != nil { + ko.Spec.CacheParameterGroupName = nil + } + + if ko.Spec.CacheSubnetGroupRef != nil { + ko.Spec.CacheSubnetGroupName = nil + } + + if len(ko.Spec.SecurityGroupRefs) > 0 { + ko.Spec.SecurityGroupIDs = nil + } + + return &resource{ko} +} + // ResolveReferences finds if there are any Reference field(s) present -// inside AWSResource passed in the parameter and attempts to resolve -// those reference field(s) into target field(s). -// It returns an AWSResource with resolved reference(s), and an error if the -// passed AWSResource's reference field(s) cannot be resolved. -// This method also adds/updates the ConditionTypeReferencesResolved for the -// AWSResource. +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. func (rm *resourceManager) ResolveReferences( ctx context.Context, apiReader client.Reader, res acktypes.AWSResource, -) (acktypes.AWSResource, error) { - return res, nil +) (acktypes.AWSResource, bool, error) { + ko := rm.concreteResource(res).ko + + resourceHasReferences := false + err := validateReferenceFields(ko) + if fieldHasReferences, err := rm.resolveReferenceForCacheParameterGroupName(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForCacheSubnetGroupName(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForSecurityGroupIDs(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + return &resource{ko}, resourceHasReferences, err } // validateReferenceFields validates the reference field and corresponding // identifier field. func validateReferenceFields(ko *svcapitypes.ReplicationGroup) error { + + if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("CacheParameterGroupName", "CacheParameterGroupRef") + } + + if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("CacheSubnetGroupName", "CacheSubnetGroupRef") + } + + if len(ko.Spec.SecurityGroupRefs) > 0 && len(ko.Spec.SecurityGroupIDs) > 0 { + return ackerr.ResourceReferenceAndIDNotSupportedFor("SecurityGroupIDs", "SecurityGroupRefs") + } return nil } -// hasNonNilReferences returns true if resource contains a reference to another -// resource -func hasNonNilReferences(ko *svcapitypes.ReplicationGroup) bool { - return false +// resolveReferenceForCacheParameterGroupName reads the resource referenced +// from CacheParameterGroupRef field and sets the CacheParameterGroupName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForCacheParameterGroupName( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.ReplicationGroup, +) (hasReferences bool, err error) { + if ko.Spec.CacheParameterGroupRef != nil && ko.Spec.CacheParameterGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.CacheParameterGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheParameterGroupRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &svcapitypes.CacheParameterGroup{} + if err := getReferencedResourceState_CacheParameterGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.CacheParameterGroupName = (*string)(obj.Spec.CacheParameterGroupName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_CacheParameterGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_CacheParameterGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.CacheParameterGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "CacheParameterGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "CacheParameterGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "CacheParameterGroup", + namespace, name) + } + if obj.Spec.CacheParameterGroupName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "CacheParameterGroup", + namespace, name, + "Spec.CacheParameterGroupName") + } + return nil +} + +// resolveReferenceForCacheSubnetGroupName reads the resource referenced +// from CacheSubnetGroupRef field and sets the CacheSubnetGroupName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForCacheSubnetGroupName( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.ReplicationGroup, +) (hasReferences bool, err error) { + if ko.Spec.CacheSubnetGroupRef != nil && ko.Spec.CacheSubnetGroupRef.From != nil { + hasReferences = true + arr := ko.Spec.CacheSubnetGroupRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: CacheSubnetGroupRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &svcapitypes.CacheSubnetGroup{} + if err := getReferencedResourceState_CacheSubnetGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.CacheSubnetGroupName = (*string)(obj.Spec.CacheSubnetGroupName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_CacheSubnetGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_CacheSubnetGroup( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.CacheSubnetGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "CacheSubnetGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "CacheSubnetGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "CacheSubnetGroup", + namespace, name) + } + if obj.Spec.CacheSubnetGroupName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "CacheSubnetGroup", + namespace, name, + "Spec.CacheSubnetGroupName") + } + return nil +} + +// resolveReferenceForSecurityGroupIDs reads the resource referenced +// from SecurityGroupRefs field and sets the SecurityGroupIDs +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForSecurityGroupIDs( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.ReplicationGroup, +) (hasReferences bool, err error) { + for _, f0iter := range ko.Spec.SecurityGroupRefs { + if f0iter != nil && f0iter.From != nil { + hasReferences = true + arr := f0iter.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SecurityGroupRefs") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &ec2apitypes.SecurityGroup{} + if err := getReferencedResourceState_SecurityGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + if ko.Spec.SecurityGroupIDs == nil { + ko.Spec.SecurityGroupIDs = make([]*string, 0, 1) + } + ko.Spec.SecurityGroupIDs = append(ko.Spec.SecurityGroupIDs, (*string)(obj.Status.ID)) + } + } + + return hasReferences, nil +} + +// getReferencedResourceState_SecurityGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_SecurityGroup( + ctx context.Context, + apiReader client.Reader, + obj *ec2apitypes.SecurityGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "SecurityGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "SecurityGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "SecurityGroup", + namespace, name) + } + if obj.Status.ID == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "SecurityGroup", + namespace, name, + "Status.ID") + } + return nil } diff --git a/pkg/resource/replication_group/resource.go b/pkg/resource/replication_group/resource.go index 550cb04f..4f8d44e8 100644 --- a/pkg/resource/replication_group/resource.go +++ b/pkg/resource/replication_group/resource.go @@ -16,6 +16,8 @@ package replication_group import ( + "fmt" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -45,7 +47,7 @@ func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { } // IsBeingDeleted returns true if the Kubernetes resource has a non-zero -// deletion timestemp +// deletion timestamp func (r *resource) IsBeingDeleted() bool { return !r.ko.DeletionTimestamp.IsZero() } @@ -93,6 +95,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f2, ok := fields["replicationGroupID"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: replicationGroupID")) + } + r.ko.Spec.ReplicationGroupID = &f2 + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/replication_group/sdk.go b/pkg/resource/replication_group/sdk.go index fc6150a1..7ffeb5a8 100644 --- a/pkg/resource/replication_group/sdk.go +++ b/pkg/resource/replication_group/sdk.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "math" "reflect" "strings" @@ -28,8 +29,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +43,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.ReplicationGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +51,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +76,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeReplicationGroupsOutput - resp, err = rm.sdkapi.DescribeReplicationGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeReplicationGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeReplicationGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "ReplicationGroupNotFoundFault" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "ReplicationGroupNotFoundFault" { return nil, ackerr.NotFound } return nil, err @@ -115,8 +119,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if elem.AutomaticFailover != nil { - ko.Status.AutomaticFailover = elem.AutomaticFailover + if elem.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(elem.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -136,14 +140,15 @@ func (rm *resourceManager) sdkFind( f8.Address = elem.ConfigurationEndpoint.Address } if elem.ConfigurationEndpoint.Port != nil { - f8.Port = elem.ConfigurationEndpoint.Port + portCopy := int64(*elem.ConfigurationEndpoint.Port) + f8.Port = &portCopy } ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } - if elem.DataTiering != nil { - ko.Status.DataTiering = elem.DataTiering + if elem.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(elem.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -152,239 +157,233 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.Description = nil } + if elem.Engine != nil { + ko.Spec.Engine = elem.Engine + } else { + ko.Spec.Engine = nil + } if elem.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = elem.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if elem.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(elem.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if elem.KmsKeyId != nil { ko.Spec.KMSKeyID = elem.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if elem.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range elem.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f15 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f15iter := range elem.LogDeliveryConfigurations { + f15elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f15iter.DestinationDetails != nil { + f15elemf0 := &svcapitypes.DestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails != nil { + f15elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f15elemf0f0.LogGroup = f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f15elemf0.CloudWatchLogsDetails = f15elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f15iter.DestinationDetails.KinesisFirehoseDetails != nil { + f15elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f15elemf0f1.DeliveryStream = f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f15elemf0.KinesisFirehoseDetails = f15elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f15elem.DestinationDetails = f15elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f15iter.DestinationType != "" { + f15elem.DestinationType = aws.String(string(f15iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f15iter.LogFormat != "" { + f15elem.LogFormat = aws.String(string(f15iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f15iter.LogType != "" { + f15elem.LogType = aws.String(string(f15iter.LogType)) } - f13 = append(f13, f13elem) + f15 = append(f15, f15elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f15 } else { ko.Spec.LogDeliveryConfigurations = nil } if elem.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range elem.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(elem.MemberClusters) } else { ko.Status.MemberClusters = nil } if elem.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range elem.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(elem.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if elem.MultiAZ != nil { - ko.Status.MultiAZ = elem.MultiAZ + if elem.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(elem.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if elem.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(elem.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if elem.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range elem.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f20 := []*svcapitypes.NodeGroup{} + for _, f20iter := range elem.NodeGroups { + f20elem := &svcapitypes.NodeGroup{} + if f20iter.NodeGroupId != nil { + f20elem.NodeGroupID = f20iter.NodeGroupId + } + if f20iter.NodeGroupMembers != nil { + f20elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f20elemf1iter := range f20iter.NodeGroupMembers { + f20elemf1elem := &svcapitypes.NodeGroupMember{} + if f20elemf1iter.CacheClusterId != nil { + f20elemf1elem.CacheClusterID = f20elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f20elemf1iter.CacheNodeId != nil { + f20elemf1elem.CacheNodeID = f20elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f20elemf1iter.CurrentRole != nil { + f20elemf1elem.CurrentRole = f20elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f20elemf1iter.PreferredAvailabilityZone != nil { + f20elemf1elem.PreferredAvailabilityZone = f20elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f20elemf1iter.PreferredOutpostArn != nil { + f20elemf1elem.PreferredOutpostARN = f20elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f20elemf1iter.ReadEndpoint != nil { + f20elemf1elemf5 := &svcapitypes.Endpoint{} + if f20elemf1iter.ReadEndpoint.Address != nil { + f20elemf1elemf5.Address = f20elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f20elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f20elemf1iter.ReadEndpoint.Port) + f20elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f20elemf1elem.ReadEndpoint = f20elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f20elemf1 = append(f20elemf1, f20elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f20elem.NodeGroupMembers = f20elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f20iter.PrimaryEndpoint != nil { + f20elemf2 := &svcapitypes.Endpoint{} + if f20iter.PrimaryEndpoint.Address != nil { + f20elemf2.Address = f20iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f20iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f20iter.PrimaryEndpoint.Port) + f20elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f20elem.PrimaryEndpoint = f20elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f20iter.ReaderEndpoint != nil { + f20elemf3 := &svcapitypes.Endpoint{} + if f20iter.ReaderEndpoint.Address != nil { + f20elemf3.Address = f20iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f20iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f20iter.ReaderEndpoint.Port) + f20elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f20elem.ReaderEndpoint = f20elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f20iter.Slots != nil { + f20elem.Slots = f20iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f20iter.Status != nil { + f20elem.Status = f20iter.Status } - f17 = append(f17, f17elem) + f20 = append(f20, f20elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f20 } else { ko.Status.NodeGroups = nil } if elem.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if elem.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = elem.PendingModifiedValues.AuthTokenStatus + f21 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if elem.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(elem.PendingModifiedValues.AuthTokenStatus)) } - if elem.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = elem.PendingModifiedValues.AutomaticFailoverStatus + if elem.PendingModifiedValues.AutomaticFailoverStatus != "" { + f21.AutomaticFailoverStatus = aws.String(string(elem.PendingModifiedValues.AutomaticFailoverStatus)) } if elem.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range elem.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f21f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f21f2iter := range elem.PendingModifiedValues.LogDeliveryConfigurations { + f21f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f21f2iter.DestinationDetails != nil { + f21f2elemf0 := &svcapitypes.DestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f21f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f21f2elemf0f0.LogGroup = f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f21f2elemf0.CloudWatchLogsDetails = f21f2elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f21f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f21f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f21f2elemf0f1.DeliveryStream = f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f21f2elemf0.KinesisFirehoseDetails = f21f2elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f21f2elem.DestinationDetails = f21f2elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f21f2iter.DestinationType != "" { + f21f2elem.DestinationType = aws.String(string(f21f2iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f21f2iter.LogFormat != "" { + f21f2elem.LogFormat = aws.String(string(f21f2iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f21f2iter.LogType != "" { + f21f2elem.LogType = aws.String(string(f21f2iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f21f2 = append(f21f2, f21f2elem) } - f18.LogDeliveryConfigurations = f18f2 + f21.LogDeliveryConfigurations = f21f2 } if elem.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = elem.PendingModifiedValues.PrimaryClusterId + f21.PrimaryClusterID = elem.PendingModifiedValues.PrimaryClusterId } if elem.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f21f4 := &svcapitypes.ReshardingStatus{} if elem.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f21f4f0 := &svcapitypes.SlotMigration{} if elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f21f4f0.ProgressPercentage = elem.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f21f4.SlotMigration = f21f4f0 } - f18.Resharding = f18f4 + f21.Resharding = f21f4 } if elem.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f21f5 := &svcapitypes.UserGroupsUpdateStatus{} if elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f21f5.UserGroupIDsToAdd = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f21f5.UserGroupIDsToRemove = aws.StringSlice(elem.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f21.UserGroups = f21f5 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f21 } else { ko.Status.PendingModifiedValues = nil } @@ -399,7 +398,8 @@ func (rm *resourceManager) sdkFind( ko.Spec.ReplicationGroupID = nil } if elem.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = elem.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*elem.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -424,13 +424,7 @@ func (rm *resourceManager) sdkFind( ko.Spec.TransitEncryptionEnabled = nil } if elem.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range elem.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(elem.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } @@ -447,6 +441,7 @@ func (rm *resourceManager) sdkFind( if err != nil { return nil, err } + rm.updateSpecFields(ctx, resp.ReplicationGroups[0], &resource{ko}) if isDeleting(r) { // Setting resource synced condition to false will trigger a requeue of @@ -470,6 +465,19 @@ func (rm *resourceManager) sdkFind( ) return &resource{ko}, nil } + + if isCreating(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced( + &resource{ko}, + corev1.ConditionFalse, + &condMsgCurrentlyCreating, + nil, + ) + return &resource{ko}, nil + } + if isCreateFailed(r) { // This is a terminal state and by setting a Terminal condition on the // resource, we will prevent it from being requeued. @@ -482,6 +490,15 @@ func (rm *resourceManager) sdkFind( return &resource{ko}, nil } + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } + return &resource{ko}, nil } @@ -503,7 +520,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeReplicationGroupsInput{} if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } return res, nil @@ -528,7 +545,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateReplicationGroupOutput _ = resp - resp, err = rm.sdkapi.CreateReplicationGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateReplicationGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateReplicationGroup", err) if err != nil { return nil, err @@ -564,8 +581,8 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.ReplicationGroup.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.ReplicationGroup.AutomaticFailover + if resp.ReplicationGroup.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.ReplicationGroup.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -585,14 +602,15 @@ func (rm *resourceManager) sdkCreate( f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { - f8.Port = resp.ReplicationGroup.ConfigurationEndpoint.Port + portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) + f8.Port = &portCopy } ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } - if resp.ReplicationGroup.DataTiering != nil { - ko.Status.DataTiering = resp.ReplicationGroup.DataTiering + if resp.ReplicationGroup.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.ReplicationGroup.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -601,239 +619,233 @@ func (rm *resourceManager) sdkCreate( } else { ko.Spec.Description = nil } + if resp.ReplicationGroup.Engine != nil { + ko.Spec.Engine = resp.ReplicationGroup.Engine + } else { + ko.Spec.Engine = nil + } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if resp.ReplicationGroup.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f15 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f15iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f15elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f15iter.DestinationDetails != nil { + f15elemf0 := &svcapitypes.DestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails != nil { + f15elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f15elemf0f0.LogGroup = f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f15elemf0.CloudWatchLogsDetails = f15elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f15iter.DestinationDetails.KinesisFirehoseDetails != nil { + f15elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f15elemf0f1.DeliveryStream = f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f15elemf0.KinesisFirehoseDetails = f15elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f15elem.DestinationDetails = f15elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f15iter.DestinationType != "" { + f15elem.DestinationType = aws.String(string(f15iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f15iter.LogFormat != "" { + f15elem.LogFormat = aws.String(string(f15iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f15iter.LogType != "" { + f15elem.LogType = aws.String(string(f15iter.LogType)) } - f13 = append(f13, f13elem) + f15 = append(f15, f15elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f15 } else { ko.Spec.LogDeliveryConfigurations = nil } if resp.ReplicationGroup.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range resp.ReplicationGroup.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(resp.ReplicationGroup.MemberClusters) } else { ko.Status.MemberClusters = nil } if resp.ReplicationGroup.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range resp.ReplicationGroup.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(resp.ReplicationGroup.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if resp.ReplicationGroup.MultiAZ != nil { - ko.Status.MultiAZ = resp.ReplicationGroup.MultiAZ + if resp.ReplicationGroup.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(resp.ReplicationGroup.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if resp.ReplicationGroup.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if resp.ReplicationGroup.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range resp.ReplicationGroup.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f20 := []*svcapitypes.NodeGroup{} + for _, f20iter := range resp.ReplicationGroup.NodeGroups { + f20elem := &svcapitypes.NodeGroup{} + if f20iter.NodeGroupId != nil { + f20elem.NodeGroupID = f20iter.NodeGroupId + } + if f20iter.NodeGroupMembers != nil { + f20elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f20elemf1iter := range f20iter.NodeGroupMembers { + f20elemf1elem := &svcapitypes.NodeGroupMember{} + if f20elemf1iter.CacheClusterId != nil { + f20elemf1elem.CacheClusterID = f20elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f20elemf1iter.CacheNodeId != nil { + f20elemf1elem.CacheNodeID = f20elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f20elemf1iter.CurrentRole != nil { + f20elemf1elem.CurrentRole = f20elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f20elemf1iter.PreferredAvailabilityZone != nil { + f20elemf1elem.PreferredAvailabilityZone = f20elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f20elemf1iter.PreferredOutpostArn != nil { + f20elemf1elem.PreferredOutpostARN = f20elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f20elemf1iter.ReadEndpoint != nil { + f20elemf1elemf5 := &svcapitypes.Endpoint{} + if f20elemf1iter.ReadEndpoint.Address != nil { + f20elemf1elemf5.Address = f20elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f20elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f20elemf1iter.ReadEndpoint.Port) + f20elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f20elemf1elem.ReadEndpoint = f20elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f20elemf1 = append(f20elemf1, f20elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f20elem.NodeGroupMembers = f20elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f20iter.PrimaryEndpoint != nil { + f20elemf2 := &svcapitypes.Endpoint{} + if f20iter.PrimaryEndpoint.Address != nil { + f20elemf2.Address = f20iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f20iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f20iter.PrimaryEndpoint.Port) + f20elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f20elem.PrimaryEndpoint = f20elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f20iter.ReaderEndpoint != nil { + f20elemf3 := &svcapitypes.Endpoint{} + if f20iter.ReaderEndpoint.Address != nil { + f20elemf3.Address = f20iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f20iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f20iter.ReaderEndpoint.Port) + f20elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f20elem.ReaderEndpoint = f20elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f20iter.Slots != nil { + f20elem.Slots = f20iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f20iter.Status != nil { + f20elem.Status = f20iter.Status } - f17 = append(f17, f17elem) + f20 = append(f20, f20elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f20 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus + f21 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) } - if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus + if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { + f21.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f21f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f21f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f21f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f21f2iter.DestinationDetails != nil { + f21f2elemf0 := &svcapitypes.DestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f21f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f21f2elemf0f0.LogGroup = f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f21f2elemf0.CloudWatchLogsDetails = f21f2elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f21f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f21f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f21f2elemf0f1.DeliveryStream = f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f21f2elemf0.KinesisFirehoseDetails = f21f2elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f21f2elem.DestinationDetails = f21f2elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f21f2iter.DestinationType != "" { + f21f2elem.DestinationType = aws.String(string(f21f2iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f21f2iter.LogFormat != "" { + f21f2elem.LogFormat = aws.String(string(f21f2iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f21f2iter.LogType != "" { + f21f2elem.LogType = aws.String(string(f21f2iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f21f2 = append(f21f2, f21f2elem) } - f18.LogDeliveryConfigurations = f18f2 + f21.LogDeliveryConfigurations = f21f2 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f21.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f21f4 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f21f4f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f21f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f21f4.SlotMigration = f21f4f0 } - f18.Resharding = f18f4 + f21.Resharding = f21f4 } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f21f5 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f21f5.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f21f5.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f21.UserGroups = f21f5 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f21 } else { ko.Status.PendingModifiedValues = nil } @@ -848,7 +860,8 @@ func (rm *resourceManager) sdkCreate( ko.Spec.ReplicationGroupID = nil } if resp.ReplicationGroup.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.ReplicationGroup.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.ReplicationGroup.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -873,13 +886,7 @@ func (rm *resourceManager) sdkCreate( ko.Spec.TransitEncryptionEnabled = nil } if resp.ReplicationGroup.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range resp.ReplicationGroup.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } @@ -902,7 +909,7 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateReplicationGroupInput{} if r.ko.Spec.AtRestEncryptionEnabled != nil { - res.SetAtRestEncryptionEnabled(*r.ko.Spec.AtRestEncryptionEnabled) + res.AtRestEncryptionEnabled = r.ko.Spec.AtRestEncryptionEnabled } if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) @@ -910,209 +917,198 @@ func (rm *resourceManager) newCreateRequestPayload( return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - res.SetAuthToken(tmpSecret) + res.AuthToken = aws.String(tmpSecret) } } if r.ko.Spec.AutomaticFailoverEnabled != nil { - res.SetAutomaticFailoverEnabled(*r.ko.Spec.AutomaticFailoverEnabled) + res.AutomaticFailoverEnabled = r.ko.Spec.AutomaticFailoverEnabled } if r.ko.Spec.CacheNodeType != nil { - res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + res.CacheNodeType = r.ko.Spec.CacheNodeType } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.CacheSecurityGroupNames != nil { - f5 := []*string{} - for _, f5iter := range r.ko.Spec.CacheSecurityGroupNames { - var f5elem string - f5elem = *f5iter - f5 = append(f5, &f5elem) - } - res.SetCacheSecurityGroupNames(f5) + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) } if r.ko.Spec.CacheSubnetGroupName != nil { - res.SetCacheSubnetGroupName(*r.ko.Spec.CacheSubnetGroupName) + res.CacheSubnetGroupName = r.ko.Spec.CacheSubnetGroupName } if r.ko.Spec.DataTieringEnabled != nil { - res.SetDataTieringEnabled(*r.ko.Spec.DataTieringEnabled) + res.DataTieringEnabled = r.ko.Spec.DataTieringEnabled } if r.ko.Spec.Engine != nil { - res.SetEngine(*r.ko.Spec.Engine) + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.EngineVersion != nil { - res.SetEngineVersion(*r.ko.Spec.EngineVersion) + res.EngineVersion = r.ko.Spec.EngineVersion + } + if r.ko.Spec.IPDiscovery != nil { + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) } if r.ko.Spec.KMSKeyID != nil { - res.SetKmsKeyId(*r.ko.Spec.KMSKeyID) + res.KmsKeyId = r.ko.Spec.KMSKeyID } if r.ko.Spec.LogDeliveryConfigurations != nil { - f11 := []*svcsdk.LogDeliveryConfigurationRequest{} - for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { - f11elem := &svcsdk.LogDeliveryConfigurationRequest{} - if f11iter.DestinationDetails != nil { - f11elemf0 := &svcsdk.DestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { - f11elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} - if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f11elemf0f0.SetLogGroup(*f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + f12 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f12iter := range r.ko.Spec.LogDeliveryConfigurations { + f12elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f12iter.DestinationDetails != nil { + f12elemf0 := &svcsdktypes.DestinationDetails{} + if f12iter.DestinationDetails.CloudWatchLogsDetails != nil { + f12elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f12iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f12elemf0f0.LogGroup = f12iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f11elemf0.SetCloudWatchLogsDetails(f11elemf0f0) + f12elemf0.CloudWatchLogsDetails = f12elemf0f0 } - if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { - f11elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} - if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f11elemf0f1.SetDeliveryStream(*f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + if f12iter.DestinationDetails.KinesisFirehoseDetails != nil { + f12elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f12iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f12elemf0f1.DeliveryStream = f12iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f11elemf0.SetKinesisFirehoseDetails(f11elemf0f1) + f12elemf0.KinesisFirehoseDetails = f12elemf0f1 } - f11elem.SetDestinationDetails(f11elemf0) + f12elem.DestinationDetails = f12elemf0 } - if f11iter.DestinationType != nil { - f11elem.SetDestinationType(*f11iter.DestinationType) + if f12iter.DestinationType != nil { + f12elem.DestinationType = svcsdktypes.DestinationType(*f12iter.DestinationType) } - if f11iter.Enabled != nil { - f11elem.SetEnabled(*f11iter.Enabled) + if f12iter.Enabled != nil { + f12elem.Enabled = f12iter.Enabled } - if f11iter.LogFormat != nil { - f11elem.SetLogFormat(*f11iter.LogFormat) + if f12iter.LogFormat != nil { + f12elem.LogFormat = svcsdktypes.LogFormat(*f12iter.LogFormat) } - if f11iter.LogType != nil { - f11elem.SetLogType(*f11iter.LogType) + if f12iter.LogType != nil { + f12elem.LogType = svcsdktypes.LogType(*f12iter.LogType) } - f11 = append(f11, f11elem) + f12 = append(f12, *f12elem) } - res.SetLogDeliveryConfigurations(f11) + res.LogDeliveryConfigurations = f12 } if r.ko.Spec.MultiAZEnabled != nil { - res.SetMultiAZEnabled(*r.ko.Spec.MultiAZEnabled) + res.MultiAZEnabled = r.ko.Spec.MultiAZEnabled + } + if r.ko.Spec.NetworkType != nil { + res.NetworkType = svcsdktypes.NetworkType(*r.ko.Spec.NetworkType) } if r.ko.Spec.NodeGroupConfiguration != nil { - f13 := []*svcsdk.NodeGroupConfiguration{} - for _, f13iter := range r.ko.Spec.NodeGroupConfiguration { - f13elem := &svcsdk.NodeGroupConfiguration{} - if f13iter.NodeGroupID != nil { - f13elem.SetNodeGroupId(*f13iter.NodeGroupID) - } - if f13iter.PrimaryAvailabilityZone != nil { - f13elem.SetPrimaryAvailabilityZone(*f13iter.PrimaryAvailabilityZone) - } - if f13iter.PrimaryOutpostARN != nil { - f13elem.SetPrimaryOutpostArn(*f13iter.PrimaryOutpostARN) - } - if f13iter.ReplicaAvailabilityZones != nil { - f13elemf3 := []*string{} - for _, f13elemf3iter := range f13iter.ReplicaAvailabilityZones { - var f13elemf3elem string - f13elemf3elem = *f13elemf3iter - f13elemf3 = append(f13elemf3, &f13elemf3elem) - } - f13elem.SetReplicaAvailabilityZones(f13elemf3) + f15 := []svcsdktypes.NodeGroupConfiguration{} + for _, f15iter := range r.ko.Spec.NodeGroupConfiguration { + f15elem := &svcsdktypes.NodeGroupConfiguration{} + if f15iter.NodeGroupID != nil { + f15elem.NodeGroupId = f15iter.NodeGroupID } - if f13iter.ReplicaCount != nil { - f13elem.SetReplicaCount(*f13iter.ReplicaCount) + if f15iter.PrimaryAvailabilityZone != nil { + f15elem.PrimaryAvailabilityZone = f15iter.PrimaryAvailabilityZone } - if f13iter.ReplicaOutpostARNs != nil { - f13elemf5 := []*string{} - for _, f13elemf5iter := range f13iter.ReplicaOutpostARNs { - var f13elemf5elem string - f13elemf5elem = *f13elemf5iter - f13elemf5 = append(f13elemf5, &f13elemf5elem) + if f15iter.PrimaryOutpostARN != nil { + f15elem.PrimaryOutpostArn = f15iter.PrimaryOutpostARN + } + if f15iter.ReplicaAvailabilityZones != nil { + f15elem.ReplicaAvailabilityZones = aws.ToStringSlice(f15iter.ReplicaAvailabilityZones) + } + if f15iter.ReplicaCount != nil { + replicaCountCopy0 := *f15iter.ReplicaCount + if replicaCountCopy0 > math.MaxInt32 || replicaCountCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field ReplicaCount is of type int32") } - f13elem.SetReplicaOutpostArns(f13elemf5) + replicaCountCopy := int32(replicaCountCopy0) + f15elem.ReplicaCount = &replicaCountCopy + } + if f15iter.ReplicaOutpostARNs != nil { + f15elem.ReplicaOutpostArns = aws.ToStringSlice(f15iter.ReplicaOutpostARNs) } - if f13iter.Slots != nil { - f13elem.SetSlots(*f13iter.Slots) + if f15iter.Slots != nil { + f15elem.Slots = f15iter.Slots } - f13 = append(f13, f13elem) + f15 = append(f15, *f15elem) } - res.SetNodeGroupConfiguration(f13) + res.NodeGroupConfiguration = f15 } if r.ko.Spec.NotificationTopicARN != nil { - res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN } if r.ko.Spec.NumNodeGroups != nil { - res.SetNumNodeGroups(*r.ko.Spec.NumNodeGroups) + numNodeGroupsCopy0 := *r.ko.Spec.NumNodeGroups + if numNodeGroupsCopy0 > math.MaxInt32 || numNodeGroupsCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field NumNodeGroups is of type int32") + } + numNodeGroupsCopy := int32(numNodeGroupsCopy0) + res.NumNodeGroups = &numNodeGroupsCopy } if r.ko.Spec.Port != nil { - res.SetPort(*r.ko.Spec.Port) + portCopy0 := *r.ko.Spec.Port + if portCopy0 > math.MaxInt32 || portCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Port is of type int32") + } + portCopy := int32(portCopy0) + res.Port = &portCopy } if r.ko.Spec.PreferredCacheClusterAZs != nil { - f17 := []*string{} - for _, f17iter := range r.ko.Spec.PreferredCacheClusterAZs { - var f17elem string - f17elem = *f17iter - f17 = append(f17, &f17elem) - } - res.SetPreferredCacheClusterAZs(f17) + res.PreferredCacheClusterAZs = aws.ToStringSlice(r.ko.Spec.PreferredCacheClusterAZs) } if r.ko.Spec.PreferredMaintenanceWindow != nil { - res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow } if r.ko.Spec.PrimaryClusterID != nil { - res.SetPrimaryClusterId(*r.ko.Spec.PrimaryClusterID) + res.PrimaryClusterId = r.ko.Spec.PrimaryClusterID } if r.ko.Spec.ReplicasPerNodeGroup != nil { - res.SetReplicasPerNodeGroup(*r.ko.Spec.ReplicasPerNodeGroup) + replicasPerNodeGroupCopy0 := *r.ko.Spec.ReplicasPerNodeGroup + if replicasPerNodeGroupCopy0 > math.MaxInt32 || replicasPerNodeGroupCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field ReplicasPerNodeGroup is of type int32") + } + replicasPerNodeGroupCopy := int32(replicasPerNodeGroupCopy0) + res.ReplicasPerNodeGroup = &replicasPerNodeGroupCopy } if r.ko.Spec.Description != nil { - res.SetReplicationGroupDescription(*r.ko.Spec.Description) + res.ReplicationGroupDescription = r.ko.Spec.Description } if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } if r.ko.Spec.SecurityGroupIDs != nil { - f23 := []*string{} - for _, f23iter := range r.ko.Spec.SecurityGroupIDs { - var f23elem string - f23elem = *f23iter - f23 = append(f23, &f23elem) - } - res.SetSecurityGroupIds(f23) + res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) } if r.ko.Spec.SnapshotARNs != nil { - f24 := []*string{} - for _, f24iter := range r.ko.Spec.SnapshotARNs { - var f24elem string - f24elem = *f24iter - f24 = append(f24, &f24elem) - } - res.SetSnapshotArns(f24) + res.SnapshotArns = aws.ToStringSlice(r.ko.Spec.SnapshotARNs) } if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } if r.ko.Spec.SnapshotRetentionLimit != nil { - res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } if r.ko.Spec.SnapshotWindow != nil { - res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + res.SnapshotWindow = r.ko.Spec.SnapshotWindow } if r.ko.Spec.Tags != nil { - f28 := []*svcsdk.Tag{} - for _, f28iter := range r.ko.Spec.Tags { - f28elem := &svcsdk.Tag{} - if f28iter.Key != nil { - f28elem.SetKey(*f28iter.Key) + f30 := []svcsdktypes.Tag{} + for _, f30iter := range r.ko.Spec.Tags { + f30elem := &svcsdktypes.Tag{} + if f30iter.Key != nil { + f30elem.Key = f30iter.Key } - if f28iter.Value != nil { - f28elem.SetValue(*f28iter.Value) + if f30iter.Value != nil { + f30elem.Value = f30iter.Value } - f28 = append(f28, f28elem) + f30 = append(f30, *f30elem) } - res.SetTags(f28) + res.Tags = f30 } if r.ko.Spec.TransitEncryptionEnabled != nil { - res.SetTransitEncryptionEnabled(*r.ko.Spec.TransitEncryptionEnabled) + res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled } if r.ko.Spec.UserGroupIDs != nil { - f30 := []*string{} - for _, f30iter := range r.ko.Spec.UserGroupIDs { - var f30elem string - f30elem = *f30iter - f30 = append(f30, &f30elem) - } - res.SetUserGroupIds(f30) + res.UserGroupIds = aws.ToStringSlice(r.ko.Spec.UserGroupIDs) } return res, nil @@ -1131,16 +1127,25 @@ func (rm *resourceManager) sdkUpdate( defer func() { exit(err) }() + if delta.DifferentAt("Spec.Tags") { + if err = rm.syncTags(ctx, desired, latest); err != nil { + return nil, err + } + } + updated, err = rm.CustomModifyReplicationGroup(ctx, desired, latest, delta) if updated != nil || err != nil { return updated, err } - input, err := rm.newUpdateRequestPayload(ctx, desired) + input, err := rm.newUpdateRequestPayload(ctx, desired, delta) if err != nil { return nil, err } if !delta.DifferentAt("Spec.LogDeliveryConfigurations") { - input.SetLogDeliveryConfigurations(nil) + input.LogDeliveryConfigurations = nil + } + if !delta.DifferentAt("Spec.TransitEncryptionEnabled") { + input.TransitEncryptionEnabled = nil } if delta.DifferentAt("UserGroupIDs") { for _, diff := range delta.Differences { @@ -1150,7 +1155,7 @@ func (rm *resourceManager) sdkUpdate( // User groups to add { - var userGroupsToAdd []*string + var userGroupsToAdd []string for _, requiredUserGroup := range requiredUserGroups { found := false @@ -1162,16 +1167,18 @@ func (rm *resourceManager) sdkUpdate( } if !found { - userGroupsToAdd = append(userGroupsToAdd, requiredUserGroup) + if requiredUserGroup != nil { + userGroupsToAdd = append(userGroupsToAdd, *requiredUserGroup) + } } } - input.SetUserGroupIdsToAdd(userGroupsToAdd) + input.UserGroupIdsToAdd = userGroupsToAdd } // User groups to remove { - var userGroupsToRemove []*string + var userGroupsToRemove []string for _, existingUserGroup := range existingUserGroups { found := false @@ -1183,11 +1190,13 @@ func (rm *resourceManager) sdkUpdate( } if !found { - userGroupsToRemove = append(userGroupsToRemove, existingUserGroup) + if existingUserGroup != nil { + userGroupsToRemove = append(userGroupsToRemove, *existingUserGroup) + } } } - input.SetUserGroupIdsToRemove(userGroupsToRemove) + input.UserGroupIdsToRemove = userGroupsToRemove } } } @@ -1195,7 +1204,7 @@ func (rm *resourceManager) sdkUpdate( var resp *svcsdk.ModifyReplicationGroupOutput _ = resp - resp, err = rm.sdkapi.ModifyReplicationGroupWithContext(ctx, input) + resp, err = rm.sdkapi.ModifyReplicationGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyReplicationGroup", err) if err != nil { return nil, err @@ -1231,8 +1240,8 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.ReplicationGroup.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.ReplicationGroup.AutomaticFailover + if resp.ReplicationGroup.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.ReplicationGroup.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -1252,14 +1261,15 @@ func (rm *resourceManager) sdkUpdate( f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { - f8.Port = resp.ReplicationGroup.ConfigurationEndpoint.Port + portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) + f8.Port = &portCopy } ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } - if resp.ReplicationGroup.DataTiering != nil { - ko.Status.DataTiering = resp.ReplicationGroup.DataTiering + if resp.ReplicationGroup.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.ReplicationGroup.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -1268,239 +1278,233 @@ func (rm *resourceManager) sdkUpdate( } else { ko.Spec.Description = nil } + if resp.ReplicationGroup.Engine != nil { + ko.Spec.Engine = resp.ReplicationGroup.Engine + } else { + ko.Spec.Engine = nil + } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if resp.ReplicationGroup.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f15 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f15iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f15elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f15iter.DestinationDetails != nil { + f15elemf0 := &svcapitypes.DestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails != nil { + f15elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f15elemf0f0.LogGroup = f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f15elemf0.CloudWatchLogsDetails = f15elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f15iter.DestinationDetails.KinesisFirehoseDetails != nil { + f15elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f15elemf0f1.DeliveryStream = f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f15elemf0.KinesisFirehoseDetails = f15elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f15elem.DestinationDetails = f15elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f15iter.DestinationType != "" { + f15elem.DestinationType = aws.String(string(f15iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f15iter.LogFormat != "" { + f15elem.LogFormat = aws.String(string(f15iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f15iter.LogType != "" { + f15elem.LogType = aws.String(string(f15iter.LogType)) } - f13 = append(f13, f13elem) + f15 = append(f15, f15elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f15 } else { ko.Spec.LogDeliveryConfigurations = nil } if resp.ReplicationGroup.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range resp.ReplicationGroup.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(resp.ReplicationGroup.MemberClusters) } else { ko.Status.MemberClusters = nil } if resp.ReplicationGroup.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range resp.ReplicationGroup.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(resp.ReplicationGroup.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if resp.ReplicationGroup.MultiAZ != nil { - ko.Status.MultiAZ = resp.ReplicationGroup.MultiAZ + if resp.ReplicationGroup.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(resp.ReplicationGroup.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if resp.ReplicationGroup.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if resp.ReplicationGroup.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range resp.ReplicationGroup.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f20 := []*svcapitypes.NodeGroup{} + for _, f20iter := range resp.ReplicationGroup.NodeGroups { + f20elem := &svcapitypes.NodeGroup{} + if f20iter.NodeGroupId != nil { + f20elem.NodeGroupID = f20iter.NodeGroupId + } + if f20iter.NodeGroupMembers != nil { + f20elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f20elemf1iter := range f20iter.NodeGroupMembers { + f20elemf1elem := &svcapitypes.NodeGroupMember{} + if f20elemf1iter.CacheClusterId != nil { + f20elemf1elem.CacheClusterID = f20elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f20elemf1iter.CacheNodeId != nil { + f20elemf1elem.CacheNodeID = f20elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f20elemf1iter.CurrentRole != nil { + f20elemf1elem.CurrentRole = f20elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f20elemf1iter.PreferredAvailabilityZone != nil { + f20elemf1elem.PreferredAvailabilityZone = f20elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f20elemf1iter.PreferredOutpostArn != nil { + f20elemf1elem.PreferredOutpostARN = f20elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f20elemf1iter.ReadEndpoint != nil { + f20elemf1elemf5 := &svcapitypes.Endpoint{} + if f20elemf1iter.ReadEndpoint.Address != nil { + f20elemf1elemf5.Address = f20elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f20elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f20elemf1iter.ReadEndpoint.Port) + f20elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f20elemf1elem.ReadEndpoint = f20elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f20elemf1 = append(f20elemf1, f20elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f20elem.NodeGroupMembers = f20elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f20iter.PrimaryEndpoint != nil { + f20elemf2 := &svcapitypes.Endpoint{} + if f20iter.PrimaryEndpoint.Address != nil { + f20elemf2.Address = f20iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f20iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f20iter.PrimaryEndpoint.Port) + f20elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f20elem.PrimaryEndpoint = f20elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f20iter.ReaderEndpoint != nil { + f20elemf3 := &svcapitypes.Endpoint{} + if f20iter.ReaderEndpoint.Address != nil { + f20elemf3.Address = f20iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f20iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f20iter.ReaderEndpoint.Port) + f20elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f20elem.ReaderEndpoint = f20elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f20iter.Slots != nil { + f20elem.Slots = f20iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f20iter.Status != nil { + f20elem.Status = f20iter.Status } - f17 = append(f17, f17elem) + f20 = append(f20, f20elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f20 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus + f21 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) } - if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus + if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { + f21.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f21f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f21f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f21f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f21f2iter.DestinationDetails != nil { + f21f2elemf0 := &svcapitypes.DestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f21f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f21f2elemf0f0.LogGroup = f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f21f2elemf0.CloudWatchLogsDetails = f21f2elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f21f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f21f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f21f2elemf0f1.DeliveryStream = f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f21f2elemf0.KinesisFirehoseDetails = f21f2elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f21f2elem.DestinationDetails = f21f2elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f21f2iter.DestinationType != "" { + f21f2elem.DestinationType = aws.String(string(f21f2iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f21f2iter.LogFormat != "" { + f21f2elem.LogFormat = aws.String(string(f21f2iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f21f2iter.LogType != "" { + f21f2elem.LogType = aws.String(string(f21f2iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f21f2 = append(f21f2, f21f2elem) } - f18.LogDeliveryConfigurations = f18f2 + f21.LogDeliveryConfigurations = f21f2 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f21.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f21f4 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f21f4f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f21f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f21f4.SlotMigration = f21f4f0 } - f18.Resharding = f18f4 + f21.Resharding = f21f4 } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f21f5 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f21f5.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f21f5.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f21.UserGroups = f21f5 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f21 } else { ko.Status.PendingModifiedValues = nil } @@ -1515,7 +1519,8 @@ func (rm *resourceManager) sdkUpdate( ko.Spec.ReplicationGroupID = nil } if resp.ReplicationGroup.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.ReplicationGroup.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.ReplicationGroup.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -1540,13 +1545,7 @@ func (rm *resourceManager) sdkUpdate( ko.Spec.TransitEncryptionEnabled = nil } if resp.ReplicationGroup.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range resp.ReplicationGroup.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } @@ -1565,104 +1564,113 @@ func (rm *resourceManager) sdkUpdate( func (rm *resourceManager) newUpdateRequestPayload( ctx context.Context, r *resource, + delta *ackcompare.Delta, ) (*svcsdk.ModifyReplicationGroupInput, error) { res := &svcsdk.ModifyReplicationGroupInput{} - res.SetApplyImmediately(true) + res.ApplyImmediately = aws.Bool(true) if r.ko.Spec.AuthToken != nil { tmpSecret, err := rm.rr.SecretValueFromReference(ctx, r.ko.Spec.AuthToken) if err != nil { return nil, ackrequeue.Needed(err) } if tmpSecret != "" { - res.SetAuthToken(tmpSecret) + res.AuthToken = aws.String(tmpSecret) } } if r.ko.Status.AutoMinorVersionUpgrade != nil { - res.SetAutoMinorVersionUpgrade(*r.ko.Status.AutoMinorVersionUpgrade) + res.AutoMinorVersionUpgrade = r.ko.Status.AutoMinorVersionUpgrade } if r.ko.Spec.AutomaticFailoverEnabled != nil { - res.SetAutomaticFailoverEnabled(*r.ko.Spec.AutomaticFailoverEnabled) + res.AutomaticFailoverEnabled = r.ko.Spec.AutomaticFailoverEnabled } if r.ko.Spec.CacheNodeType != nil { - res.SetCacheNodeType(*r.ko.Spec.CacheNodeType) + res.CacheNodeType = r.ko.Spec.CacheNodeType } if r.ko.Spec.CacheParameterGroupName != nil { - res.SetCacheParameterGroupName(*r.ko.Spec.CacheParameterGroupName) + res.CacheParameterGroupName = r.ko.Spec.CacheParameterGroupName } if r.ko.Spec.CacheSecurityGroupNames != nil { - f7 := []*string{} - for _, f7iter := range r.ko.Spec.CacheSecurityGroupNames { - var f7elem string - f7elem = *f7iter - f7 = append(f7, &f7elem) - } - res.SetCacheSecurityGroupNames(f7) + res.CacheSecurityGroupNames = aws.ToStringSlice(r.ko.Spec.CacheSecurityGroupNames) + } + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine + } + if r.ko.Spec.IPDiscovery != nil { + res.IpDiscovery = svcsdktypes.IpDiscovery(*r.ko.Spec.IPDiscovery) } if r.ko.Spec.LogDeliveryConfigurations != nil { - f8 := []*svcsdk.LogDeliveryConfigurationRequest{} - for _, f8iter := range r.ko.Spec.LogDeliveryConfigurations { - f8elem := &svcsdk.LogDeliveryConfigurationRequest{} - if f8iter.DestinationDetails != nil { - f8elemf0 := &svcsdk.DestinationDetails{} - if f8iter.DestinationDetails.CloudWatchLogsDetails != nil { - f8elemf0f0 := &svcsdk.CloudWatchLogsDestinationDetails{} - if f8iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f8elemf0f0.SetLogGroup(*f8iter.DestinationDetails.CloudWatchLogsDetails.LogGroup) + f11 := []svcsdktypes.LogDeliveryConfigurationRequest{} + for _, f11iter := range r.ko.Spec.LogDeliveryConfigurations { + f11elem := &svcsdktypes.LogDeliveryConfigurationRequest{} + if f11iter.DestinationDetails != nil { + f11elemf0 := &svcsdktypes.DestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails != nil { + f11elemf0f0 := &svcsdktypes.CloudWatchLogsDestinationDetails{} + if f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f11elemf0f0.LogGroup = f11iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f8elemf0.SetCloudWatchLogsDetails(f8elemf0f0) + f11elemf0.CloudWatchLogsDetails = f11elemf0f0 } - if f8iter.DestinationDetails.KinesisFirehoseDetails != nil { - f8elemf0f1 := &svcsdk.KinesisFirehoseDestinationDetails{} - if f8iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f8elemf0f1.SetDeliveryStream(*f8iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + if f11iter.DestinationDetails.KinesisFirehoseDetails != nil { + f11elemf0f1 := &svcsdktypes.KinesisFirehoseDestinationDetails{} + if f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f11elemf0f1.DeliveryStream = f11iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f8elemf0.SetKinesisFirehoseDetails(f8elemf0f1) + f11elemf0.KinesisFirehoseDetails = f11elemf0f1 } - f8elem.SetDestinationDetails(f8elemf0) + f11elem.DestinationDetails = f11elemf0 } - if f8iter.DestinationType != nil { - f8elem.SetDestinationType(*f8iter.DestinationType) + if f11iter.DestinationType != nil { + f11elem.DestinationType = svcsdktypes.DestinationType(*f11iter.DestinationType) } - if f8iter.Enabled != nil { - f8elem.SetEnabled(*f8iter.Enabled) + if f11iter.Enabled != nil { + f11elem.Enabled = f11iter.Enabled } - if f8iter.LogFormat != nil { - f8elem.SetLogFormat(*f8iter.LogFormat) + if f11iter.LogFormat != nil { + f11elem.LogFormat = svcsdktypes.LogFormat(*f11iter.LogFormat) } - if f8iter.LogType != nil { - f8elem.SetLogType(*f8iter.LogType) + if f11iter.LogType != nil { + f11elem.LogType = svcsdktypes.LogType(*f11iter.LogType) } - f8 = append(f8, f8elem) + f11 = append(f11, *f11elem) } - res.SetLogDeliveryConfigurations(f8) + res.LogDeliveryConfigurations = f11 } if r.ko.Spec.MultiAZEnabled != nil { - res.SetMultiAZEnabled(*r.ko.Spec.MultiAZEnabled) + res.MultiAZEnabled = r.ko.Spec.MultiAZEnabled } if r.ko.Spec.NotificationTopicARN != nil { - res.SetNotificationTopicArn(*r.ko.Spec.NotificationTopicARN) + res.NotificationTopicArn = r.ko.Spec.NotificationTopicARN } if r.ko.Spec.PreferredMaintenanceWindow != nil { - res.SetPreferredMaintenanceWindow(*r.ko.Spec.PreferredMaintenanceWindow) + res.PreferredMaintenanceWindow = r.ko.Spec.PreferredMaintenanceWindow } if r.ko.Spec.PrimaryClusterID != nil { - res.SetPrimaryClusterId(*r.ko.Spec.PrimaryClusterID) + res.PrimaryClusterId = r.ko.Spec.PrimaryClusterID } if r.ko.Spec.Description != nil { - res.SetReplicationGroupDescription(*r.ko.Spec.Description) + res.ReplicationGroupDescription = r.ko.Spec.Description } if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } if r.ko.Spec.SnapshotRetentionLimit != nil { - res.SetSnapshotRetentionLimit(*r.ko.Spec.SnapshotRetentionLimit) + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } if r.ko.Spec.SnapshotWindow != nil { - res.SetSnapshotWindow(*r.ko.Spec.SnapshotWindow) + res.SnapshotWindow = r.ko.Spec.SnapshotWindow } if r.ko.Status.SnapshottingClusterID != nil { - res.SetSnapshottingClusterId(*r.ko.Status.SnapshottingClusterID) + res.SnapshottingClusterId = r.ko.Status.SnapshottingClusterID + } + if r.ko.Spec.TransitEncryptionEnabled != nil { + res.TransitEncryptionEnabled = r.ko.Spec.TransitEncryptionEnabled } return res, nil @@ -1717,11 +1725,11 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteReplicationGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteReplicationGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteReplicationGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteReplicationGroup", err) // delete call successful if err == nil { - rp, _ := rm.setReplicationGroupOutput(r, resp.ReplicationGroup) + rp, _ := rm.setReplicationGroupOutput(ctx, r, resp.ReplicationGroup) // Setting resource synced condition to false will trigger a requeue of // the resource. ackcondition.SetSynced( @@ -1749,7 +1757,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteReplicationGroupInput{} if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } return res, nil @@ -1857,11 +1865,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "InvalidParameter", "InvalidParameterValue", "InvalidParameterCombination", @@ -1883,8 +1892,9 @@ func (rm *resourceManager) terminalAWSError(err error) bool { // This method copies the data from given ReplicationGroup by populating it // into copy of supplied resource and returns that. func (rm *resourceManager) setReplicationGroupOutput( + ctx context.Context, r *resource, - obj *svcsdk.ReplicationGroup, + obj *svcsdktypes.ReplicationGroup, ) (*resource, error) { if obj == nil || r == nil || @@ -1923,8 +1933,8 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.ReplicationGroup.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.ReplicationGroup.AutomaticFailover + if resp.ReplicationGroup.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.ReplicationGroup.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -1944,14 +1954,15 @@ func (rm *resourceManager) setReplicationGroupOutput( f8.Address = resp.ReplicationGroup.ConfigurationEndpoint.Address } if resp.ReplicationGroup.ConfigurationEndpoint.Port != nil { - f8.Port = resp.ReplicationGroup.ConfigurationEndpoint.Port + portCopy := int64(*resp.ReplicationGroup.ConfigurationEndpoint.Port) + f8.Port = &portCopy } ko.Status.ConfigurationEndpoint = f8 } else { ko.Status.ConfigurationEndpoint = nil } - if resp.ReplicationGroup.DataTiering != nil { - ko.Status.DataTiering = resp.ReplicationGroup.DataTiering + if resp.ReplicationGroup.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.ReplicationGroup.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -1960,239 +1971,233 @@ func (rm *resourceManager) setReplicationGroupOutput( } else { ko.Spec.Description = nil } + if resp.ReplicationGroup.Engine != nil { + ko.Spec.Engine = resp.ReplicationGroup.Engine + } else { + ko.Spec.Engine = nil + } if resp.ReplicationGroup.GlobalReplicationGroupInfo != nil { - f11 := &svcapitypes.GlobalReplicationGroupInfo{} + f12 := &svcapitypes.GlobalReplicationGroupInfo{} if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId != nil { - f11.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId + f12.GlobalReplicationGroupID = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId } if resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole != nil { - f11.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole + f12.GlobalReplicationGroupMemberRole = resp.ReplicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupMemberRole } - ko.Status.GlobalReplicationGroupInfo = f11 + ko.Status.GlobalReplicationGroupInfo = f12 } else { ko.Status.GlobalReplicationGroupInfo = nil } + if resp.ReplicationGroup.IpDiscovery != "" { + ko.Spec.IPDiscovery = aws.String(string(resp.ReplicationGroup.IpDiscovery)) + } else { + ko.Spec.IPDiscovery = nil + } if resp.ReplicationGroup.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.ReplicationGroup.KmsKeyId } else { ko.Spec.KMSKeyID = nil } if resp.ReplicationGroup.LogDeliveryConfigurations != nil { - f13 := []*svcapitypes.LogDeliveryConfigurationRequest{} - for _, f13iter := range resp.ReplicationGroup.LogDeliveryConfigurations { - f13elem := &svcapitypes.LogDeliveryConfigurationRequest{} - if f13iter.DestinationDetails != nil { - f13elemf0 := &svcapitypes.DestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails != nil { - f13elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f13elemf0f0.LogGroup = f13iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f15 := []*svcapitypes.LogDeliveryConfigurationRequest{} + for _, f15iter := range resp.ReplicationGroup.LogDeliveryConfigurations { + f15elem := &svcapitypes.LogDeliveryConfigurationRequest{} + if f15iter.DestinationDetails != nil { + f15elemf0 := &svcapitypes.DestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails != nil { + f15elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f15elemf0f0.LogGroup = f15iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f13elemf0.CloudWatchLogsDetails = f13elemf0f0 + f15elemf0.CloudWatchLogsDetails = f15elemf0f0 } - if f13iter.DestinationDetails.KinesisFirehoseDetails != nil { - f13elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f13elemf0f1.DeliveryStream = f13iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f15iter.DestinationDetails.KinesisFirehoseDetails != nil { + f15elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f15elemf0f1.DeliveryStream = f15iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f13elemf0.KinesisFirehoseDetails = f13elemf0f1 + f15elemf0.KinesisFirehoseDetails = f15elemf0f1 } - f13elem.DestinationDetails = f13elemf0 + f15elem.DestinationDetails = f15elemf0 } - if f13iter.DestinationType != nil { - f13elem.DestinationType = f13iter.DestinationType + if f15iter.DestinationType != "" { + f15elem.DestinationType = aws.String(string(f15iter.DestinationType)) } - if f13iter.LogFormat != nil { - f13elem.LogFormat = f13iter.LogFormat + if f15iter.LogFormat != "" { + f15elem.LogFormat = aws.String(string(f15iter.LogFormat)) } - if f13iter.LogType != nil { - f13elem.LogType = f13iter.LogType + if f15iter.LogType != "" { + f15elem.LogType = aws.String(string(f15iter.LogType)) } - f13 = append(f13, f13elem) + f15 = append(f15, f15elem) } - ko.Spec.LogDeliveryConfigurations = f13 + ko.Spec.LogDeliveryConfigurations = f15 } else { ko.Spec.LogDeliveryConfigurations = nil } if resp.ReplicationGroup.MemberClusters != nil { - f14 := []*string{} - for _, f14iter := range resp.ReplicationGroup.MemberClusters { - var f14elem string - f14elem = *f14iter - f14 = append(f14, &f14elem) - } - ko.Status.MemberClusters = f14 + ko.Status.MemberClusters = aws.StringSlice(resp.ReplicationGroup.MemberClusters) } else { ko.Status.MemberClusters = nil } if resp.ReplicationGroup.MemberClustersOutpostArns != nil { - f15 := []*string{} - for _, f15iter := range resp.ReplicationGroup.MemberClustersOutpostArns { - var f15elem string - f15elem = *f15iter - f15 = append(f15, &f15elem) - } - ko.Status.MemberClustersOutpostARNs = f15 + ko.Status.MemberClustersOutpostARNs = aws.StringSlice(resp.ReplicationGroup.MemberClustersOutpostArns) } else { ko.Status.MemberClustersOutpostARNs = nil } - if resp.ReplicationGroup.MultiAZ != nil { - ko.Status.MultiAZ = resp.ReplicationGroup.MultiAZ + if resp.ReplicationGroup.MultiAZ != "" { + ko.Status.MultiAZ = aws.String(string(resp.ReplicationGroup.MultiAZ)) } else { ko.Status.MultiAZ = nil } + if resp.ReplicationGroup.NetworkType != "" { + ko.Spec.NetworkType = aws.String(string(resp.ReplicationGroup.NetworkType)) + } else { + ko.Spec.NetworkType = nil + } if resp.ReplicationGroup.NodeGroups != nil { - f17 := []*svcapitypes.NodeGroup{} - for _, f17iter := range resp.ReplicationGroup.NodeGroups { - f17elem := &svcapitypes.NodeGroup{} - if f17iter.NodeGroupId != nil { - f17elem.NodeGroupID = f17iter.NodeGroupId - } - if f17iter.NodeGroupMembers != nil { - f17elemf1 := []*svcapitypes.NodeGroupMember{} - for _, f17elemf1iter := range f17iter.NodeGroupMembers { - f17elemf1elem := &svcapitypes.NodeGroupMember{} - if f17elemf1iter.CacheClusterId != nil { - f17elemf1elem.CacheClusterID = f17elemf1iter.CacheClusterId + f20 := []*svcapitypes.NodeGroup{} + for _, f20iter := range resp.ReplicationGroup.NodeGroups { + f20elem := &svcapitypes.NodeGroup{} + if f20iter.NodeGroupId != nil { + f20elem.NodeGroupID = f20iter.NodeGroupId + } + if f20iter.NodeGroupMembers != nil { + f20elemf1 := []*svcapitypes.NodeGroupMember{} + for _, f20elemf1iter := range f20iter.NodeGroupMembers { + f20elemf1elem := &svcapitypes.NodeGroupMember{} + if f20elemf1iter.CacheClusterId != nil { + f20elemf1elem.CacheClusterID = f20elemf1iter.CacheClusterId } - if f17elemf1iter.CacheNodeId != nil { - f17elemf1elem.CacheNodeID = f17elemf1iter.CacheNodeId + if f20elemf1iter.CacheNodeId != nil { + f20elemf1elem.CacheNodeID = f20elemf1iter.CacheNodeId } - if f17elemf1iter.CurrentRole != nil { - f17elemf1elem.CurrentRole = f17elemf1iter.CurrentRole + if f20elemf1iter.CurrentRole != nil { + f20elemf1elem.CurrentRole = f20elemf1iter.CurrentRole } - if f17elemf1iter.PreferredAvailabilityZone != nil { - f17elemf1elem.PreferredAvailabilityZone = f17elemf1iter.PreferredAvailabilityZone + if f20elemf1iter.PreferredAvailabilityZone != nil { + f20elemf1elem.PreferredAvailabilityZone = f20elemf1iter.PreferredAvailabilityZone } - if f17elemf1iter.PreferredOutpostArn != nil { - f17elemf1elem.PreferredOutpostARN = f17elemf1iter.PreferredOutpostArn + if f20elemf1iter.PreferredOutpostArn != nil { + f20elemf1elem.PreferredOutpostARN = f20elemf1iter.PreferredOutpostArn } - if f17elemf1iter.ReadEndpoint != nil { - f17elemf1elemf5 := &svcapitypes.Endpoint{} - if f17elemf1iter.ReadEndpoint.Address != nil { - f17elemf1elemf5.Address = f17elemf1iter.ReadEndpoint.Address + if f20elemf1iter.ReadEndpoint != nil { + f20elemf1elemf5 := &svcapitypes.Endpoint{} + if f20elemf1iter.ReadEndpoint.Address != nil { + f20elemf1elemf5.Address = f20elemf1iter.ReadEndpoint.Address } - if f17elemf1iter.ReadEndpoint.Port != nil { - f17elemf1elemf5.Port = f17elemf1iter.ReadEndpoint.Port + if f20elemf1iter.ReadEndpoint.Port != nil { + portCopy := int64(*f20elemf1iter.ReadEndpoint.Port) + f20elemf1elemf5.Port = &portCopy } - f17elemf1elem.ReadEndpoint = f17elemf1elemf5 + f20elemf1elem.ReadEndpoint = f20elemf1elemf5 } - f17elemf1 = append(f17elemf1, f17elemf1elem) + f20elemf1 = append(f20elemf1, f20elemf1elem) } - f17elem.NodeGroupMembers = f17elemf1 + f20elem.NodeGroupMembers = f20elemf1 } - if f17iter.PrimaryEndpoint != nil { - f17elemf2 := &svcapitypes.Endpoint{} - if f17iter.PrimaryEndpoint.Address != nil { - f17elemf2.Address = f17iter.PrimaryEndpoint.Address + if f20iter.PrimaryEndpoint != nil { + f20elemf2 := &svcapitypes.Endpoint{} + if f20iter.PrimaryEndpoint.Address != nil { + f20elemf2.Address = f20iter.PrimaryEndpoint.Address } - if f17iter.PrimaryEndpoint.Port != nil { - f17elemf2.Port = f17iter.PrimaryEndpoint.Port + if f20iter.PrimaryEndpoint.Port != nil { + portCopy := int64(*f20iter.PrimaryEndpoint.Port) + f20elemf2.Port = &portCopy } - f17elem.PrimaryEndpoint = f17elemf2 + f20elem.PrimaryEndpoint = f20elemf2 } - if f17iter.ReaderEndpoint != nil { - f17elemf3 := &svcapitypes.Endpoint{} - if f17iter.ReaderEndpoint.Address != nil { - f17elemf3.Address = f17iter.ReaderEndpoint.Address + if f20iter.ReaderEndpoint != nil { + f20elemf3 := &svcapitypes.Endpoint{} + if f20iter.ReaderEndpoint.Address != nil { + f20elemf3.Address = f20iter.ReaderEndpoint.Address } - if f17iter.ReaderEndpoint.Port != nil { - f17elemf3.Port = f17iter.ReaderEndpoint.Port + if f20iter.ReaderEndpoint.Port != nil { + portCopy := int64(*f20iter.ReaderEndpoint.Port) + f20elemf3.Port = &portCopy } - f17elem.ReaderEndpoint = f17elemf3 + f20elem.ReaderEndpoint = f20elemf3 } - if f17iter.Slots != nil { - f17elem.Slots = f17iter.Slots + if f20iter.Slots != nil { + f20elem.Slots = f20iter.Slots } - if f17iter.Status != nil { - f17elem.Status = f17iter.Status + if f20iter.Status != nil { + f20elem.Status = f20iter.Status } - f17 = append(f17, f17elem) + f20 = append(f20, f20elem) } - ko.Status.NodeGroups = f17 + ko.Status.NodeGroups = f20 } else { ko.Status.NodeGroups = nil } if resp.ReplicationGroup.PendingModifiedValues != nil { - f18 := &svcapitypes.ReplicationGroupPendingModifiedValues{} - if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != nil { - f18.AuthTokenStatus = resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus + f21 := &svcapitypes.ReplicationGroupPendingModifiedValues{} + if resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus != "" { + f21.AuthTokenStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AuthTokenStatus)) } - if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != nil { - f18.AutomaticFailoverStatus = resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus + if resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus != "" { + f21.AutomaticFailoverStatus = aws.String(string(resp.ReplicationGroup.PendingModifiedValues.AutomaticFailoverStatus)) } if resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations != nil { - f18f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} - for _, f18f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { - f18f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} - if f18f2iter.DestinationDetails != nil { - f18f2elemf0 := &svcapitypes.DestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails != nil { - f18f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} - if f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { - f18f2elemf0f0.LogGroup = f18f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup + f21f2 := []*svcapitypes.PendingLogDeliveryConfiguration{} + for _, f21f2iter := range resp.ReplicationGroup.PendingModifiedValues.LogDeliveryConfigurations { + f21f2elem := &svcapitypes.PendingLogDeliveryConfiguration{} + if f21f2iter.DestinationDetails != nil { + f21f2elemf0 := &svcapitypes.DestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails != nil { + f21f2elemf0f0 := &svcapitypes.CloudWatchLogsDestinationDetails{} + if f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup != nil { + f21f2elemf0f0.LogGroup = f21f2iter.DestinationDetails.CloudWatchLogsDetails.LogGroup } - f18f2elemf0.CloudWatchLogsDetails = f18f2elemf0f0 + f21f2elemf0.CloudWatchLogsDetails = f21f2elemf0f0 } - if f18f2iter.DestinationDetails.KinesisFirehoseDetails != nil { - f18f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} - if f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { - f18f2elemf0f1.DeliveryStream = f18f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream + if f21f2iter.DestinationDetails.KinesisFirehoseDetails != nil { + f21f2elemf0f1 := &svcapitypes.KinesisFirehoseDestinationDetails{} + if f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream != nil { + f21f2elemf0f1.DeliveryStream = f21f2iter.DestinationDetails.KinesisFirehoseDetails.DeliveryStream } - f18f2elemf0.KinesisFirehoseDetails = f18f2elemf0f1 + f21f2elemf0.KinesisFirehoseDetails = f21f2elemf0f1 } - f18f2elem.DestinationDetails = f18f2elemf0 + f21f2elem.DestinationDetails = f21f2elemf0 } - if f18f2iter.DestinationType != nil { - f18f2elem.DestinationType = f18f2iter.DestinationType + if f21f2iter.DestinationType != "" { + f21f2elem.DestinationType = aws.String(string(f21f2iter.DestinationType)) } - if f18f2iter.LogFormat != nil { - f18f2elem.LogFormat = f18f2iter.LogFormat + if f21f2iter.LogFormat != "" { + f21f2elem.LogFormat = aws.String(string(f21f2iter.LogFormat)) } - if f18f2iter.LogType != nil { - f18f2elem.LogType = f18f2iter.LogType + if f21f2iter.LogType != "" { + f21f2elem.LogType = aws.String(string(f21f2iter.LogType)) } - f18f2 = append(f18f2, f18f2elem) + f21f2 = append(f21f2, f21f2elem) } - f18.LogDeliveryConfigurations = f18f2 + f21.LogDeliveryConfigurations = f21f2 } if resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId != nil { - f18.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId + f21.PrimaryClusterID = resp.ReplicationGroup.PendingModifiedValues.PrimaryClusterId } if resp.ReplicationGroup.PendingModifiedValues.Resharding != nil { - f18f4 := &svcapitypes.ReshardingStatus{} + f21f4 := &svcapitypes.ReshardingStatus{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration != nil { - f18f4f0 := &svcapitypes.SlotMigration{} + f21f4f0 := &svcapitypes.SlotMigration{} if resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage != nil { - f18f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage + f21f4f0.ProgressPercentage = resp.ReplicationGroup.PendingModifiedValues.Resharding.SlotMigration.ProgressPercentage } - f18f4.SlotMigration = f18f4f0 + f21f4.SlotMigration = f21f4f0 } - f18.Resharding = f18f4 + f21.Resharding = f21f4 } if resp.ReplicationGroup.PendingModifiedValues.UserGroups != nil { - f18f5 := &svcapitypes.UserGroupsUpdateStatus{} + f21f5 := &svcapitypes.UserGroupsUpdateStatus{} if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd != nil { - f18f5f0 := []*string{} - for _, f18f5f0iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd { - var f18f5f0elem string - f18f5f0elem = *f18f5f0iter - f18f5f0 = append(f18f5f0, &f18f5f0elem) - } - f18f5.UserGroupIDsToAdd = f18f5f0 + f21f5.UserGroupIDsToAdd = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToAdd) } if resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove != nil { - f18f5f1 := []*string{} - for _, f18f5f1iter := range resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove { - var f18f5f1elem string - f18f5f1elem = *f18f5f1iter - f18f5f1 = append(f18f5f1, &f18f5f1elem) - } - f18f5.UserGroupIDsToRemove = f18f5f1 + f21f5.UserGroupIDsToRemove = aws.StringSlice(resp.ReplicationGroup.PendingModifiedValues.UserGroups.UserGroupIdsToRemove) } - f18.UserGroups = f18f5 + f21.UserGroups = f21f5 } - ko.Status.PendingModifiedValues = f18 + ko.Status.PendingModifiedValues = f21 } else { ko.Status.PendingModifiedValues = nil } @@ -2207,7 +2212,8 @@ func (rm *resourceManager) setReplicationGroupOutput( ko.Spec.ReplicationGroupID = nil } if resp.ReplicationGroup.SnapshotRetentionLimit != nil { - ko.Spec.SnapshotRetentionLimit = resp.ReplicationGroup.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.ReplicationGroup.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Spec.SnapshotRetentionLimit = nil } @@ -2232,18 +2238,12 @@ func (rm *resourceManager) setReplicationGroupOutput( ko.Spec.TransitEncryptionEnabled = nil } if resp.ReplicationGroup.UserGroupIds != nil { - f26 := []*string{} - for _, f26iter := range resp.ReplicationGroup.UserGroupIds { - var f26elem string - f26elem = *f26iter - f26 = append(f26, &f26elem) - } - ko.Spec.UserGroupIDs = f26 + ko.Spec.UserGroupIDs = aws.StringSlice(resp.ReplicationGroup.UserGroupIds) } else { ko.Spec.UserGroupIDs = nil } rm.setStatusDefaults(ko) - rm.customSetOutput(obj, ko) // custom set output from obj + rm.customSetOutput(ctx, *obj, ko) // custom set output from obj return &resource{ko}, nil } diff --git a/pkg/resource/replication_group/tags.go b/pkg/resource/replication_group/tags.go index 428d1b8d..90cd6d9a 100644 --- a/pkg/resource/replication_group/tags.go +++ b/pkg/resource/replication_group/tags.go @@ -16,48 +16,104 @@ package replication_group import ( + "slices" + "strings" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) var ( - _ = svcapitypes.ReplicationGroup{} - _ = acktags.NewTags() + _ = svcapitypes.ReplicationGroup{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} ) -// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. // This method helps in creating the hub(acktags.Tags) for merging -// default controller tags with existing resource tags. -func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { result := acktags.NewTags() - if tags == nil || len(tags) == 0 { - return result - } + keyOrder := []string{} + if len(tags) == 0 { + return result, keyOrder + } for _, t := range tags { if t.Key != nil { - if t.Value == nil { - result[*t.Key] = "" - } else { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" } } } - return result + return result, keyOrder } -// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. // This method helps in setting the tags back inside AWSResource after merging -// default controller tags with existing resource tags. -func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } for k, v := range tags { - kCopy := k - vCopy := v - tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + tag := svcapitypes.Tag{Key: &k, Value: &v} result = append(result, &tag) } + return result } + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/replication_group/testdata/DecreaseReplicaCountOutput.json b/pkg/resource/replication_group/testdata/DecreaseReplicaCountOutput.json deleted file mode 100644 index b6cbea8b..00000000 --- a/pkg/resource/replication_group/testdata/DecreaseReplicaCountOutput.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "ReplicationGroup": { - "ReplicationGroupId": "my-cluster", - "Description": "mock_replication_group_description", - "Status": "modifying", - "PendingModifiedValues": {}, - "MemberClusters": [ - "myrepliace", - "my-cluster-001", - "my-cluster-002", - "my-cluster-003" - ], - "NodeGroups": [ - { - "NodeGroupId": "0001", - "Status": "modifying", - "PrimaryEndpoint": { - "Address": "my-cluster.xxxxx.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "ReaderEndpoint": { - "Address": "my-cluster-ro.xxxxx.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "NodeGroupMembers": [ - { - "CacheClusterId": "myrepliace", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "myrepliace.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-001", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "my-cluster-001.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "primary" - }, - { - "CacheClusterId": "my-cluster-002", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "my-cluster-002.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-003", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "my-cluster-003.xxxxx.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - } - ] - } - ], - "AutomaticFailover": "disabled", - "SnapshotRetentionLimit": 0, - "SnapshotWindow": "07:30-08:30", - "ClusterEnabled": false, - "CacheNodeType": "cache.r5.xlarge", - "TransitEncryptionEnabled": false, - "AtRestEncryptionEnabled": false - } -} diff --git a/pkg/resource/replication_group/testdata/DescribeReplicationGroupsOutput.json b/pkg/resource/replication_group/testdata/DescribeReplicationGroupsOutput.json deleted file mode 100644 index 4367a0ce..00000000 --- a/pkg/resource/replication_group/testdata/DescribeReplicationGroupsOutput.json +++ /dev/null @@ -1,80 +0,0 @@ -{ - "ReplicationGroups": [ - { - "ReplicationGroupId": "my-cluster", - "Description": "mycluster", - "Status": "available", - "PendingModifiedValues": {}, - "MemberClusters": [ - "pat-cluster-001", - "pat-cluster-002", - "pat-cluster-003", - "pat-cluster-004" - ], - "NodeGroups": [ - { - "NodeGroupId": "0001", - "Status": "available", - "PrimaryEndpoint": { - "Address": "my-cluster.xxxxih.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "ReaderEndpoint": { - "Address": "my-cluster-ro.xxxxih.ng.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "NodeGroupMembers": [ - { - "CacheClusterId": "my-cluster-001", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-001.xxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "primary" - }, - { - "CacheClusterId": "my-cluster-002", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-002.xxxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-003", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-003.xxxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - }, - { - "CacheClusterId": "my-cluster-004", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Address": "pat-cluster-004.xxxih.0001.usw2.cache.amazonaws.com", - "Port": 6379 - }, - "PreferredAvailabilityZone": "us-west-2a", - "CurrentRole": "replica" - } - ] - } - ], - "AutomaticFailover": "disabled", - "SnapshotRetentionLimit": 0, - "SnapshotWindow": "07:30-08:30", - "ClusterEnabled": false, - "CacheNodeType": "cache.r5.xlarge", - "AuthTokenEnabled": false, - "TransitEncryptionEnabled": false, - "AtRestEncryptionEnabled": false - } - ] -} diff --git a/pkg/resource/replication_group/testdata/allowed_node_types/read_many/rg_cmd_allowed_node_types.json b/pkg/resource/replication_group/testdata/allowed_node_types/read_many/rg_cmd_allowed_node_types.json deleted file mode 100644 index 00c54c51..00000000 --- a/pkg/resource/replication_group/testdata/allowed_node_types/read_many/rg_cmd_allowed_node_types.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "ScaleUpModifications": [ - "cache.m3.2xlarge", - "cache.m3.large", - "cache.m3.medium", - "cache.m3.xlarge", - "cache.m4.10xlarge", - "cache.m4.2xlarge", - "cache.m4.4xlarge", - "cache.m4.large", - "cache.m4.xlarge", - "cache.m5.12xlarge", - "cache.m5.24xlarge", - "cache.m5.2xlarge", - "cache.m5.4xlarge", - "cache.m5.large", - "cache.m5.xlarge", - "cache.m6g.large", - "cache.r3.2xlarge", - "cache.r3.4xlarge", - "cache.r3.8xlarge", - "cache.r3.large", - "cache.r3.xlarge", - "cache.r4.16xlarge", - "cache.r4.2xlarge", - "cache.r4.4xlarge", - "cache.r4.8xlarge", - "cache.r4.large", - "cache.r4.xlarge", - "cache.r5.12xlarge", - "cache.r5.24xlarge", - "cache.r5.2xlarge", - "cache.r5.4xlarge", - "cache.r5.large", - "cache.r5.xlarge", - "cache.r6g.2xlarge", - "cache.r6g.4xlarge", - "cache.r6g.8xlarge", - "cache.r6g.large", - "cache.r6g.xlarge", - "cache.t2.medium", - "cache.t2.micro", - "cache.t2.small", - "cache.t3.medium", - "cache.t3.small" - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/cache_clusters/read_many/rg_cmd_primary_cache_node.json b/pkg/resource/replication_group/testdata/cache_clusters/read_many/rg_cmd_primary_cache_node.json deleted file mode 100644 index a2e77b85..00000000 --- a/pkg/resource/replication_group/testdata/cache_clusters/read_many/rg_cmd_primary_cache_node.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "CacheClusters": [ - { - "CacheClusterId": "rg-cmd-001", - "ReplicationGroupId": "rg-cmd", - "CacheClusterStatus": "available", - "SnapshotRetentionLimit": 0, - "ClientDownloadLandingPage": "https://console.aws.amazon.com/elasticache/home#client-download:", - "CacheNodeType": "cache.t3.micro", - "TransitEncryptionEnabled": false, - "Engine": "redis", - "CacheSecurityGroups": [], - "NumCacheNodes": 1, - "AutoMinorVersionUpgrade": true, - "PendingModifiedValues": {}, - "PreferredMaintenanceWindow": "wed:08:00-wed:09:00", - "CacheSubnetGroupName": "default", - "AuthTokenEnabled": false, - "AtRestEncryptionEnabled": false, - "EngineVersion": "5.0.0", - "CacheClusterCreateTime": "2021-04-13T19:07:04.983Z", - "PreferredAvailabilityZone": "us-east-1b", - "SnapshotWindow": "06:30-07:30", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:cluster:rg-cmd-001", - "CacheParameterGroup": { - "CacheNodeIdsToReboot": [], - "CacheParameterGroupName": "default.redis5.0", - "ParameterApplyStatus": "in-sync" - } - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/events/read_many/rg_cmd_events.json b/pkg/resource/replication_group/testdata/events/read_many/rg_cmd_events.json deleted file mode 100644 index 67dbfbb5..00000000 --- a/pkg/resource/replication_group/testdata/events/read_many/rg_cmd_events.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "Events": [ - { - "Date": "2021-03-30T20:12:00Z", - "Message": "Replication group rg-cmd created", - "SourceIdentifier": "rg-cmd", - "SourceType": "replication-group" - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_create.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_create.yaml deleted file mode 100644 index d193e0bc..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_create.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml deleted file mode 100644 index e06a78e8..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml +++ /dev/null @@ -1,99 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - engineVersion: 5.0.6 # new config has been applied; this is the new engine version but no action has been taken yet - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "06:30-07:30" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-13T19:07:05Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml deleted file mode 100644 index c5d98929..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml +++ /dev/null @@ -1,99 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - engineVersion: 5.0.0 # this should still be the old engine version - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "06:30-07:30" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-13T19:07:05Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica.yaml deleted file mode 100644 index c44f9dca..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 2 # mismatch between this and member clusters because new config was just applied - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica_latest.yaml deleted file mode 100644 index be4111d4..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_increase_replica_latest.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 # as this is the latest state, replicasPerNodeGroup should be consistent with memberClusters/nodeGroups - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_desired.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_desired.yaml deleted file mode 100644 index 42689549..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_desired.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.small - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-12T23:28:40Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1c - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_latest.yaml deleted file mode 100644 index 0b810ca8..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_before_scale_up_latest.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-12T23:28:40Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1c - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed.yaml deleted file mode 100644 index 1f24bdc7..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed.yaml +++ /dev/null @@ -1,105 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - atRestEncryptionEnabled: false - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - preferredMaintenanceWindow: "wed:08:00-wed:09:00" - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_latest.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_latest.yaml deleted file mode 100644 index dcec1277..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_completed_latest.yaml +++ /dev/null @@ -1,106 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - atRestEncryptionEnabled: false - cacheNodeType: cache.t3.micro - cacheParameterGroupName: "default.redis5.0" - engine: redis - numNodeGroups: 1 - preferredMaintenanceWindow: "wed:08:00-wed:09:00" - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_initiated.yaml deleted file mode 100644 index bab3f00a..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_create_initiated.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - SnapshotRetentionLimit: 0 - snapshotWindow: "09:00-10:00" - TransitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" # for tests the ownerAccountID of the resource manager is empty (see implementations of TestRunnerDelegate's ResourceManager function) - region: "" - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - pendingModifiedValues: {} - status: creating \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_delete_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_delete_initiated.yaml deleted file mode 100644 index 3619a1e9..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_delete_initiated.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - atRestEncryptionEnabled: false - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.2xlarge - - cache.m3.large - - cache.m3.medium - - cache.m3.xlarge - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.12xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r3.2xlarge - - cache.r3.4xlarge - - cache.r3.8xlarge - - cache.r3.large - - cache.r3.xlarge - - cache.r4.16xlarge - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "True" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: available - pendingModifiedValues: {} - status: deleting \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml deleted file mode 100644 index f01ca8e3..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - engineVersion: 5.0.6 # resource has 5.0.0 but custom modify code copies desired EV to latest and doesn't overwrite it - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "06:30-07:30" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-13T19:07:05Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: modifying - pendingModifiedValues: {} - status: modifying \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_increase_replica_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_increase_replica_initiated.yaml deleted file mode 100644 index f1fbb7f7..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_increase_replica_initiated.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 2 # new replica hasn't been added, but 2 comes from desired and isn't overwritten in custom modify code - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - snapshotRetentionLimit: 0 - snapshotWindow: "10:00-11:00" - transitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - authTokenEnabled: false - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-03-30T20:12:00Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - - rg-cmd-003 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1b - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: modifying - pendingModifiedValues: {} - status: modifying \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_before_create.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_before_create.yaml deleted file mode 100644 index 49a06ada..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_before_create.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 7 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_create_attempted.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_create_attempted.yaml deleted file mode 100644 index faa4b7c5..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_invalid_create_attempted.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 7 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd -status: - ackResourceMetadata: - ownerAccountID: "" - region: "" - conditions: - - message: "InvalidParameterValue: The number of replicas per node group must be within 0 and 5.\n\tstatus code: 0, request id: " - status: "True" - type: ACK.Terminal \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_scale_up_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_scale_up_initiated.yaml deleted file mode 100644 index 978ec98f..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cmd_scale_up_initiated.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.small - engine: redis - numNodeGroups: 1 - replicasPerNodeGroup: 1 - description: cluster-mode disabled RG - replicationGroupID: rg-cmd - atRestEncryptionEnabled: false - SnapshotRetentionLimit: 0 - snapshotWindow: "09:00-10:00" - TransitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd - ownerAccountID: "" - region: "" - automaticFailover: disabled - clusterEnabled: false - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode disabled RG - events: - - date: "2021-04-12T23:28:40Z" - message: Replication group rg-cmd created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cmd-001 - - rg-cmd-002 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "0001" - nodeGroupMembers: - - cacheClusterID: rg-cmd-001 - cacheNodeID: "0001" - currentRole: primary - preferredAvailabilityZone: us-east-1d - readEndpoint: - address: rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - - cacheClusterID: rg-cmd-002 - cacheNodeID: "0001" - currentRole: replica - preferredAvailabilityZone: us-east-1c - readEndpoint: - address: rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com - port: 6379 - primaryEndpoint: - address: rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - readerEndpoint: - address: rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com - port: 6379 - status: modifying - pendingModifiedValues: {} - status: modifying \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_before_create.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_before_create.yaml deleted file mode 100644 index 5e8a60e1..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_before_create.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 2 - description: cluster-mode enabled RG - replicationGroupID: rg-cme \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_create_initiated.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_create_initiated.yaml deleted file mode 100644 index d8b43b93..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_create_initiated.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 2 - description: cluster-mode enabled RG - replicationGroupID: rg-cme - atRestEncryptionEnabled: false - SnapshotRetentionLimit: 0 - snapshotWindow: "08:00-09:00" - TransitEncryptionEnabled: false -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme - ownerAccountID: "" - region: "" - automaticFailover: enabled - clusterEnabled: true - conditions: - - status: "False" - type: ACK.ResourceSynced - description: cluster-mode enabled RG - globalReplicationGroupInfo: {} - memberClusters: - - rg-cme-1111-001 - - rg-cme-1111-002 - - rg-cme-2222-001 - - rg-cme-2222-002 - - rg-cme-2222-003 - - rg-cme-2222-004 - multiAZ: disabled - pendingModifiedValues: {} - status: creating \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml deleted file mode 100644 index 11ce7a73..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml +++ /dev/null @@ -1,118 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - cacheSubnetGroupName: default - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 3 # this is the mismatch; 3 shards indicated but only 2 are specified in above nodeGroupConfiguration - description: cluster-mode enabled RG - replicationGroupID: rg-cme -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: enabled - clusterEnabled: true - conditions: - - status: "True" #TODO: should synced condition be set false when terminal condition is true? - type: ACK.ResourceSynced - - message: "InvalidParameterValue: Configuration for all the node groups should be provided.\n\tstatus code: 0, request id: " - status: "True" - type: ACK.Terminal - configurationEndpoint: - address: rg-cme.xxxxxx.clustercfg.use1.cache.amazonaws.com - port: 6379 - description: cluster-mode enabled RG - events: - - date: "2021-04-14T19:36:01Z" - message: Replication group rg-cme created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cme-1111-001 - - rg-cme-1111-002 - - rg-cme-2222-001 - - rg-cme-2222-002 - - rg-cme-2222-003 - - rg-cme-2222-004 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "1111" - nodeGroupMembers: - - cacheClusterID: rg-cme-1111-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-1111-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 0-5999 - status: available - - nodeGroupID: "2222" - nodeGroupMembers: - - cacheClusterID: rg-cme-2222-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1c - - cacheClusterID: rg-cme-2222-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1d - - cacheClusterID: rg-cme-2222-003 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-2222-004 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 6000-16383 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_shard_mismatch.yaml b/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_shard_mismatch.yaml deleted file mode 100644 index ba840422..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/cr/rg_cme_shard_mismatch.yaml +++ /dev/null @@ -1,115 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -# omitted metadata -spec: - cacheNodeType: cache.t3.micro - cacheSubnetGroupName: default - engine: redis - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-east-1a - replicaAvailabilityZones: - - us-east-1b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-east-1c - replicaAvailabilityZones: - - us-east-1d - - us-east-1a - - us-east-1b - replicaCount: 3 - slots: 6000-16383 - numNodeGroups: 3 # this is the mismatch; 3 shards indicated but only 2 are specified in above nodeGroupConfiguration - description: cluster-mode enabled RG - replicationGroupID: rg-cme -status: - ackResourceMetadata: - arn: arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme - ownerAccountID: "" - region: "" - allowedScaleUpModifications: - - cache.m3.large - - cache.m4.10xlarge - - cache.m4.2xlarge - - cache.m4.4xlarge - - cache.m4.large - - cache.m4.xlarge - - cache.m5.24xlarge - - cache.m5.2xlarge - - cache.m5.4xlarge - - cache.m5.large - - cache.m5.xlarge - - cache.m6g.large - - cache.r4.2xlarge - - cache.r4.4xlarge - - cache.r4.8xlarge - - cache.r4.large - - cache.r4.xlarge - - cache.r5.12xlarge - - cache.r5.24xlarge - - cache.r5.2xlarge - - cache.r5.4xlarge - - cache.r5.large - - cache.r5.xlarge - - cache.r6g.2xlarge - - cache.r6g.4xlarge - - cache.r6g.8xlarge - - cache.r6g.large - - cache.r6g.xlarge - - cache.t2.medium - - cache.t2.micro - - cache.t2.small - - cache.t3.medium - - cache.t3.small - authTokenEnabled: false - automaticFailover: enabled - clusterEnabled: true - conditions: - - status: "True" - type: ACK.ResourceSynced - configurationEndpoint: - address: rg-cme.xxxxxx.clustercfg.use1.cache.amazonaws.com - port: 6379 - description: cluster-mode enabled RG - events: - - date: "2021-04-14T19:36:01Z" - message: Replication group rg-cme created - globalReplicationGroupInfo: {} - memberClusters: - - rg-cme-1111-001 - - rg-cme-1111-002 - - rg-cme-2222-001 - - rg-cme-2222-002 - - rg-cme-2222-003 - - rg-cme-2222-004 - multiAZ: disabled - nodeGroups: - - nodeGroupID: "1111" - nodeGroupMembers: - - cacheClusterID: rg-cme-1111-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-1111-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 0-5999 - status: available - - nodeGroupID: "2222" - nodeGroupMembers: - - cacheClusterID: rg-cme-2222-001 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1c - - cacheClusterID: rg-cme-2222-002 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1d - - cacheClusterID: rg-cme-2222-003 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1a - - cacheClusterID: rg-cme-2222-004 - cacheNodeID: "0001" - preferredAvailabilityZone: us-east-1b - slots: 6000-16383 - status: available - pendingModifiedValues: {} - status: available \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/create/rg_cmd_creating.json b/pkg/resource/replication_group/testdata/replication_group/create/rg_cmd_creating.json deleted file mode 100644 index f10c3ad2..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/create/rg_cmd_creating.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "creating", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "09:00-10:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/create/rg_cme_creating.json b/pkg/resource/replication_group/testdata/replication_group/create/rg_cme_creating.json deleted file mode 100644 index f311d4b2..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/create/rg_cme_creating.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "creating", - "MultiAZ": "disabled", - "Description": "cluster-mode enabled RG", - "AtRestEncryptionEnabled": false, - "ClusterEnabled": true, - "ReplicationGroupId": "rg-cme", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "enabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "08:00-09:00", - "MemberClusters": [ - "rg-cme-1111-001", - "rg-cme-1111-002", - "rg-cme-2222-001", - "rg-cme-2222-002", - "rg-cme-2222-003", - "rg-cme-2222-004" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cme", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/delete/rg_cmd_delete_initiated.json b/pkg/resource/replication_group/testdata/replication_group/delete/rg_cmd_delete_initiated.json deleted file mode 100644 index e34ffa8e..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/delete/rg_cmd_delete_initiated.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "deleting", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "AtRestEncryptionEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_create_completed.json b/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_create_completed.json deleted file mode 100644 index 2c2a695f..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_create_completed.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "ReplicationGroups": [ - { - "Status": "available", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "available", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AuthTokenEnabled": false, - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_delete_initiated.json b/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_delete_initiated.json deleted file mode 100644 index ab423000..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/read_one/rg_cmd_delete_initiated.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "ReplicationGroups": [ - { - "Status": "deleting", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "available", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AuthTokenEnabled": false, - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } - ] -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_engine_upgrade_initiated.json b/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_engine_upgrade_initiated.json deleted file mode 100644 index 9a02cd6a..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_engine_upgrade_initiated.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "modifying", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "modifying", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AtRestEncryptionEnabled": false, - "AuthTokenEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "06:30-07:30", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_increase_replica_initiated.json b/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_increase_replica_initiated.json deleted file mode 100644 index 51606a39..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_increase_replica_initiated.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "modifying", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "modifying", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1b", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AtRestEncryptionEnabled": false, - "AuthTokenEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "10:00-11:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002", - "rg-cmd-003" - ], - "CacheNodeType": "cache.t3.micro", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} \ No newline at end of file diff --git a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_scale_up_initiated.json b/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_scale_up_initiated.json deleted file mode 100644 index 7aace9d7..00000000 --- a/pkg/resource/replication_group/testdata/replication_group/update/rg_cmd_scale_up_initiated.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "ReplicationGroup": { - "Status": "modifying", - "MultiAZ": "disabled", - "Description": "cluster-mode disabled RG", - "NodeGroups": [ - { - "Status": "modifying", - "NodeGroupMembers": [ - { - "CurrentRole": "primary", - "PreferredAvailabilityZone": "us-east-1d", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-001.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-001" - }, - { - "CurrentRole": "replica", - "PreferredAvailabilityZone": "us-east-1c", - "CacheNodeId": "0001", - "ReadEndpoint": { - "Port": 6379, - "Address": "rg-cmd-002.xxxxxx.0001.use1.cache.amazonaws.com" - }, - "CacheClusterId": "rg-cmd-002" - } - ], - "ReaderEndpoint": { - "Port": 6379, - "Address": "rg-cmd-ro.xxxxxx.ng.0001.use1.cache.amazonaws.com" - }, - "NodeGroupId": "0001", - "PrimaryEndpoint": { - "Port": 6379, - "Address": "rg-cmd.xxxxxx.ng.0001.use1.cache.amazonaws.com" - } - } - ], - "AtRestEncryptionEnabled": false, - "ClusterEnabled": false, - "ReplicationGroupId": "rg-cmd", - "GlobalReplicationGroupInfo": {}, - "SnapshotRetentionLimit": 0, - "AutomaticFailover": "disabled", - "TransitEncryptionEnabled": false, - "SnapshotWindow": "09:00-10:00", - "MemberClusters": [ - "rg-cmd-001", - "rg-cmd-002" - ], - "CacheNodeType": "cache.t3.small", - "ARN": "arn:aws:elasticache:us-east-1:012345678910:replicationgroup:rg-cmd", - "PendingModifiedValues": {} - } -} diff --git a/pkg/resource/replication_group/testdata/test_suite.yaml b/pkg/resource/replication_group/testdata/test_suite.yaml deleted file mode 100644 index d723b45d..00000000 --- a/pkg/resource/replication_group/testdata/test_suite.yaml +++ /dev/null @@ -1,160 +0,0 @@ -tests: - - name: "Cluster mode disabled replication group" - description: "Cluster mode disabled replication group CRUD tests" - scenarios: - - name: "ReadOne=DNE" - description: "Given that the resource doesn't exist, expect an error" - given: - desired_state: "replication_group/cr/rg_cmd_before_create.yaml" - svc_api: - - operation: DescribeReplicationGroupsWithContext - error: - code: ReplicationGroupNotFoundFault - message: "ReplicationGroup rg-cmd not found" - invoke: ReadOne # Unit under test. Possible values: Create | ReadOne | Update | Delete - expect: # no explicit latest_state expectation; returned resource may be non-nil - error: resource not found - - name: "Create=InvalidInput" - description: "Given one of the parameters is invalid, ko.Status shows a terminal condition" - given: - desired_state: "replication_group/cr/rg_cmd_invalid_before_create.yaml" - svc_api: - - operation: CreateReplicationGroupWithContext - error: - code: InvalidParameterValue - message: "The number of replicas per node group must be within 0 and 5." - invoke: Create - expect: - latest_state: "replication_group/cr/rg_cmd_invalid_create_attempted.yaml" - error: resource is in terminal condition - - name: "Create" - description: "Create a new replication group; ensure ko.Status shows that this create has been initiated" - given: - desired_state: "replication_group/cr/rg_cmd_before_create.yaml" - svc_api: - - operation: CreateReplicationGroupWithContext - output_fixture: "replication_group/create/rg_cmd_creating.json" - invoke: Create - expect: - latest_state: "replication_group/cr/rg_cmd_create_initiated.yaml" - error: nil - - name: "ReadOne=NewlyCreated" - description: "Given that the creation of this RG completes, ko.Status reflects that the RG is ready (e.g. ResourceSynced condition True)" - given: - desired_state: "replication_group/cr/rg_cmd_create_initiated.yaml" # RG is creating, but creating has not yet finished - svc_api: - - operation: DescribeReplicationGroupsWithContext - output_fixture: "replication_group/read_one/rg_cmd_create_completed.json" - - operation: ListAllowedNodeTypeModifications - output_fixture: "allowed_node_types/read_many/rg_cmd_allowed_node_types.json" - - operation: DescribeEventsWithContext - output_fixture: "events/read_many/rg_cmd_events.json" - - operation: DescribeCacheClustersWithContext - output_fixture: "cache_clusters/read_many/rg_cmd_primary_cache_node.json" - invoke: ReadOne - expect: - latest_state: "replication_group/cr/rg_cmd_create_completed_latest.yaml" - error: nil - - name: "ReadOne=NoDiff" - description: "Given desired state matches with server side resource data, ko.Status remain unchanged (resource is stable)" - given: # fixture - desired_state: "replication_group/cr/rg_cmd_create_completed.yaml" - svc_api: - - operation: DescribeReplicationGroupsWithContext - output_fixture: "replication_group/read_one/rg_cmd_create_completed.json" - - operation: ListAllowedNodeTypeModifications - output_fixture: "allowed_node_types/read_many/rg_cmd_allowed_node_types.json" - - operation: DescribeEventsWithContext - output_fixture: "events/read_many/rg_cmd_events.json" - - operation: DescribeCacheClustersWithContext - output_fixture: "cache_clusters/read_many/rg_cmd_primary_cache_node.json" - invoke: ReadOne - expect: - latest_state: "replication_group/cr/rg_cmd_create_completed_latest.yaml" - error: nil - - name: "Update=IncreaseReplicaCount" - description: "Ensure a replica is added once a new config is provided" - given: - desired_state: "replication_group/cr/rg_cmd_before_increase_replica.yaml" - latest_state: "replication_group/cr/rg_cmd_before_increase_replica_latest.yaml" - svc_api: - - operation: IncreaseReplicaCountWithContext - output_fixture: "replication_group/update/rg_cmd_increase_replica_initiated.json" - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cmd_increase_replica_initiated.yaml" - error: nil - - name: "Update=ScaleUp" - description: "Scale up replication group to larger instance type" - given: - desired_state: "replication_group/cr/rg_cmd_before_scale_up_desired.yaml" - latest_state: "replication_group/cr/rg_cmd_before_scale_up_latest.yaml" - svc_api: - - operation: ModifyReplicationGroupWithContext - output_fixture: "replication_group/update/rg_cmd_scale_up_initiated.json" - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cmd_scale_up_initiated.yaml" - error: nil - - name: "Update=UpgradeEngine" - description: "Upgrade Redis engine version from 5.0.0 to a newer version" - given: - desired_state: "replication_group/cr/rg_cmd_before_engine_version_upgrade.yaml" - latest_state: "replication_group/cr/rg_cmd_before_engine_version_upgrade_latest.yaml" - svc_api: - - operation: ModifyReplicationGroupWithContext - output_fixture: "replication_group/update/rg_cmd_engine_upgrade_initiated.json" - - operation: DescribeCacheClustersWithContext - output_fixture: "cache_clusters/read_many/rg_cmd_primary_cache_node.json" - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cmd_engine_upgrade_initiated.yaml" - error: nil - - name: "DeleteInitiated" - description: "Delete cluster mode-disabled RG. RG moves from available to deleting state." - given: - desired_state: "replication_group/cr/rg_cmd_create_completed.yaml" - svc_api: - - operation: DeleteReplicationGroupWithContext - output_fixture: "replication_group/delete/rg_cmd_delete_initiated.json" - - operation: DescribeReplicationGroupsWithContext - output_fixture: "replication_group/read_one/rg_cmd_delete_initiated.json" - invoke: Delete - expect: - error: "Delete is in progress." - - name: "Deleting" - description: "Delete cluster mode-disabled RG. Retry scenario, RG is in deleting state." - given: - desired_state: "replication_group/cr/rg_cmd_delete_initiated.yaml" - svc_api: - invoke: Delete - expect: - error: "Delete is in progress." - - name: Cluster mode enabled replication group - description: Cluster mode enabled replication group CRUD tests - scenarios: - - name: "Create=CustomShardConfig" - description: Create CME RG with custom node group configuration - given: - desired_state: "replication_group/cr/rg_cme_before_create.yaml" - svc_api: - - operation: CreateReplicationGroupWithContext - output_fixture: "replication_group/create/rg_cme_creating.json" - invoke: Create - expect: - latest_state: "replication_group/cr/rg_cme_create_initiated.yaml" - error: nil - - name: "Update=ShardConfigMismatch" - description: Increasing NumNodeGroups without changing NodeGroupConfiguration should result in a terminal condition - given: - desired_state: "replication_group/cr/rg_cme_shard_mismatch.yaml" - latest_state: "replication_group/cr/rg_cme_shard_mismatch.yaml" - svc_api: - - operation: ModifyReplicationGroupShardConfigurationWithContext - error: - code: InvalidParameterValue - message: Configuration for all the node groups should be provided. - invoke: Update - expect: - latest_state: "replication_group/cr/rg_cme_invalid_scale_out_attempted.yaml" - error: resource is in terminal condition diff --git a/pkg/resource/serverless_cache/delta.go b/pkg/resource/serverless_cache/delta.go new file mode 100644 index 00000000..194402f6 --- /dev/null +++ b/pkg/resource/serverless_cache/delta.go @@ -0,0 +1,183 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + "bytes" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" +) + +// Hack to avoid import errors during build... +var ( + _ = &bytes.Buffer{} + _ = &reflect.Method{} + _ = &acktags.Tags{} +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits, b.ko.Spec.CacheUsageLimits) { + delta.Add("Spec.CacheUsageLimits", a.ko.Spec.CacheUsageLimits, b.ko.Spec.CacheUsageLimits) + } else if a.ko.Spec.CacheUsageLimits != nil && b.ko.Spec.CacheUsageLimits != nil { + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits.DataStorage, b.ko.Spec.CacheUsageLimits.DataStorage) { + delta.Add("Spec.CacheUsageLimits.DataStorage", a.ko.Spec.CacheUsageLimits.DataStorage, b.ko.Spec.CacheUsageLimits.DataStorage) + } else if a.ko.Spec.CacheUsageLimits.DataStorage != nil && b.ko.Spec.CacheUsageLimits.DataStorage != nil { + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits.DataStorage.Maximum, b.ko.Spec.CacheUsageLimits.DataStorage.Maximum) { + delta.Add("Spec.CacheUsageLimits.DataStorage.Maximum", a.ko.Spec.CacheUsageLimits.DataStorage.Maximum, b.ko.Spec.CacheUsageLimits.DataStorage.Maximum) + } else if a.ko.Spec.CacheUsageLimits.DataStorage.Maximum != nil && b.ko.Spec.CacheUsageLimits.DataStorage.Maximum != nil { + if *a.ko.Spec.CacheUsageLimits.DataStorage.Maximum != *b.ko.Spec.CacheUsageLimits.DataStorage.Maximum { + delta.Add("Spec.CacheUsageLimits.DataStorage.Maximum", a.ko.Spec.CacheUsageLimits.DataStorage.Maximum, b.ko.Spec.CacheUsageLimits.DataStorage.Maximum) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits.DataStorage.Minimum, b.ko.Spec.CacheUsageLimits.DataStorage.Minimum) { + delta.Add("Spec.CacheUsageLimits.DataStorage.Minimum", a.ko.Spec.CacheUsageLimits.DataStorage.Minimum, b.ko.Spec.CacheUsageLimits.DataStorage.Minimum) + } else if a.ko.Spec.CacheUsageLimits.DataStorage.Minimum != nil && b.ko.Spec.CacheUsageLimits.DataStorage.Minimum != nil { + if *a.ko.Spec.CacheUsageLimits.DataStorage.Minimum != *b.ko.Spec.CacheUsageLimits.DataStorage.Minimum { + delta.Add("Spec.CacheUsageLimits.DataStorage.Minimum", a.ko.Spec.CacheUsageLimits.DataStorage.Minimum, b.ko.Spec.CacheUsageLimits.DataStorage.Minimum) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits.DataStorage.Unit, b.ko.Spec.CacheUsageLimits.DataStorage.Unit) { + delta.Add("Spec.CacheUsageLimits.DataStorage.Unit", a.ko.Spec.CacheUsageLimits.DataStorage.Unit, b.ko.Spec.CacheUsageLimits.DataStorage.Unit) + } else if a.ko.Spec.CacheUsageLimits.DataStorage.Unit != nil && b.ko.Spec.CacheUsageLimits.DataStorage.Unit != nil { + if *a.ko.Spec.CacheUsageLimits.DataStorage.Unit != *b.ko.Spec.CacheUsageLimits.DataStorage.Unit { + delta.Add("Spec.CacheUsageLimits.DataStorage.Unit", a.ko.Spec.CacheUsageLimits.DataStorage.Unit, b.ko.Spec.CacheUsageLimits.DataStorage.Unit) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits.ECPUPerSecond, b.ko.Spec.CacheUsageLimits.ECPUPerSecond) { + delta.Add("Spec.CacheUsageLimits.ECPUPerSecond", a.ko.Spec.CacheUsageLimits.ECPUPerSecond, b.ko.Spec.CacheUsageLimits.ECPUPerSecond) + } else if a.ko.Spec.CacheUsageLimits.ECPUPerSecond != nil && b.ko.Spec.CacheUsageLimits.ECPUPerSecond != nil { + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum, b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum) { + delta.Add("Spec.CacheUsageLimits.ECPUPerSecond.Maximum", a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum, b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum) + } else if a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum != nil && b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum != nil { + if *a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum != *b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum { + delta.Add("Spec.CacheUsageLimits.ECPUPerSecond.Maximum", a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum, b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum, b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum) { + delta.Add("Spec.CacheUsageLimits.ECPUPerSecond.Minimum", a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum, b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum) + } else if a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum != nil && b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum != nil { + if *a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum != *b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum { + delta.Add("Spec.CacheUsageLimits.ECPUPerSecond.Minimum", a.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum, b.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum) + } + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DailySnapshotTime, b.ko.Spec.DailySnapshotTime) { + delta.Add("Spec.DailySnapshotTime", a.ko.Spec.DailySnapshotTime, b.ko.Spec.DailySnapshotTime) + } else if a.ko.Spec.DailySnapshotTime != nil && b.ko.Spec.DailySnapshotTime != nil { + if *a.ko.Spec.DailySnapshotTime != *b.ko.Spec.DailySnapshotTime { + delta.Add("Spec.DailySnapshotTime", a.ko.Spec.DailySnapshotTime, b.ko.Spec.DailySnapshotTime) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Description, b.ko.Spec.Description) { + delta.Add("Spec.Description", a.ko.Spec.Description, b.ko.Spec.Description) + } else if a.ko.Spec.Description != nil && b.ko.Spec.Description != nil { + if *a.ko.Spec.Description != *b.ko.Spec.Description { + delta.Add("Spec.Description", a.ko.Spec.Description, b.ko.Spec.Description) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Engine, b.ko.Spec.Engine) { + delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) + } else if a.ko.Spec.Engine != nil && b.ko.Spec.Engine != nil { + if *a.ko.Spec.Engine != *b.ko.Spec.Engine { + delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) { + delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) + } else if a.ko.Spec.KMSKeyID != nil && b.ko.Spec.KMSKeyID != nil { + if *a.ko.Spec.KMSKeyID != *b.ko.Spec.KMSKeyID { + delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.MajorEngineVersion, b.ko.Spec.MajorEngineVersion) { + delta.Add("Spec.MajorEngineVersion", a.ko.Spec.MajorEngineVersion, b.ko.Spec.MajorEngineVersion) + } else if a.ko.Spec.MajorEngineVersion != nil && b.ko.Spec.MajorEngineVersion != nil { + if *a.ko.Spec.MajorEngineVersion != *b.ko.Spec.MajorEngineVersion { + delta.Add("Spec.MajorEngineVersion", a.ko.Spec.MajorEngineVersion, b.ko.Spec.MajorEngineVersion) + } + } + if len(a.ko.Spec.SecurityGroupIDs) != len(b.ko.Spec.SecurityGroupIDs) { + delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } else if len(a.ko.Spec.SecurityGroupIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) { + delta.Add("Spec.SecurityGroupIDs", a.ko.Spec.SecurityGroupIDs, b.ko.Spec.SecurityGroupIDs) + } + } + if !reflect.DeepEqual(a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) { + delta.Add("Spec.SecurityGroupRefs", a.ko.Spec.SecurityGroupRefs, b.ko.Spec.SecurityGroupRefs) + } + if ackcompare.HasNilDifference(a.ko.Spec.ServerlessCacheName, b.ko.Spec.ServerlessCacheName) { + delta.Add("Spec.ServerlessCacheName", a.ko.Spec.ServerlessCacheName, b.ko.Spec.ServerlessCacheName) + } else if a.ko.Spec.ServerlessCacheName != nil && b.ko.Spec.ServerlessCacheName != nil { + if *a.ko.Spec.ServerlessCacheName != *b.ko.Spec.ServerlessCacheName { + delta.Add("Spec.ServerlessCacheName", a.ko.Spec.ServerlessCacheName, b.ko.Spec.ServerlessCacheName) + } + } + if len(a.ko.Spec.SnapshotARNsToRestore) != len(b.ko.Spec.SnapshotARNsToRestore) { + delta.Add("Spec.SnapshotARNsToRestore", a.ko.Spec.SnapshotARNsToRestore, b.ko.Spec.SnapshotARNsToRestore) + } else if len(a.ko.Spec.SnapshotARNsToRestore) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SnapshotARNsToRestore, b.ko.Spec.SnapshotARNsToRestore) { + delta.Add("Spec.SnapshotARNsToRestore", a.ko.Spec.SnapshotARNsToRestore, b.ko.Spec.SnapshotARNsToRestore) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) { + delta.Add("Spec.SnapshotRetentionLimit", a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) + } else if a.ko.Spec.SnapshotRetentionLimit != nil && b.ko.Spec.SnapshotRetentionLimit != nil { + if *a.ko.Spec.SnapshotRetentionLimit != *b.ko.Spec.SnapshotRetentionLimit { + delta.Add("Spec.SnapshotRetentionLimit", a.ko.Spec.SnapshotRetentionLimit, b.ko.Spec.SnapshotRetentionLimit) + } + } + if len(a.ko.Spec.SubnetIDs) != len(b.ko.Spec.SubnetIDs) { + delta.Add("Spec.SubnetIDs", a.ko.Spec.SubnetIDs, b.ko.Spec.SubnetIDs) + } else if len(a.ko.Spec.SubnetIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.SubnetIDs, b.ko.Spec.SubnetIDs) { + delta.Add("Spec.SubnetIDs", a.ko.Spec.SubnetIDs, b.ko.Spec.SubnetIDs) + } + } + if !reflect.DeepEqual(a.ko.Spec.SubnetRefs, b.ko.Spec.SubnetRefs) { + delta.Add("Spec.SubnetRefs", a.ko.Spec.SubnetRefs, b.ko.Spec.SubnetRefs) + } + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { + delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) + } + if ackcompare.HasNilDifference(a.ko.Spec.UserGroupID, b.ko.Spec.UserGroupID) { + delta.Add("Spec.UserGroupID", a.ko.Spec.UserGroupID, b.ko.Spec.UserGroupID) + } else if a.ko.Spec.UserGroupID != nil && b.ko.Spec.UserGroupID != nil { + if *a.ko.Spec.UserGroupID != *b.ko.Spec.UserGroupID { + delta.Add("Spec.UserGroupID", a.ko.Spec.UserGroupID, b.ko.Spec.UserGroupID) + } + } + + return delta +} diff --git a/pkg/resource/serverless_cache/descriptor.go b/pkg/resource/serverless_cache/descriptor.go new file mode 100644 index 00000000..e3bb6daf --- /dev/null +++ b/pkg/resource/serverless_cache/descriptor.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +const ( + FinalizerString = "finalizers.elasticache.services.k8s.aws/ServerlessCache" +) + +var ( + GroupVersionResource = svcapitypes.GroupVersion.WithResource("serverlesscaches") + GroupKind = metav1.GroupKind{ + Group: "elasticache.services.k8s.aws", + Kind: "ServerlessCache", + } +) + +// resourceDescriptor implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceDescriptor` interface +type resourceDescriptor struct { +} + +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) +} + +// EmptyRuntimeObject returns an empty object prototype that may be used in +// apimachinery and k8s client operations +func (d *resourceDescriptor) EmptyRuntimeObject() rtclient.Object { + return &svcapitypes.ServerlessCache{} +} + +// ResourceFromRuntimeObject returns an AWSResource that has been initialized +// with the supplied runtime.Object +func (d *resourceDescriptor) ResourceFromRuntimeObject( + obj rtclient.Object, +) acktypes.AWSResource { + return &resource{ + ko: obj.(*svcapitypes.ServerlessCache), + } +} + +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) +} + +// IsManaged returns true if the supplied AWSResource is under the management +// of an ACK service controller. What this means in practice is that the +// underlying custom resource (CR) in the AWSResource has had a +// resource-specific finalizer associated with it. +func (d *resourceDescriptor) IsManaged( + res acktypes.AWSResource, +) bool { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + // Remove use of custom code once + // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is + // fixed. This should be able to be: + // + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) +} + +// Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 +// is fixed. +func containsFinalizer(obj rtclient.Object, finalizer string) bool { + f := obj.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// MarkManaged places the supplied resource under the management of ACK. What +// this typically means is that the resource manager will decorate the +// underlying custom resource (CR) with a finalizer that indicates ACK is +// managing the resource and the underlying CR may not be deleted until ACK is +// finished cleaning up any backend AWS service resources associated with the +// CR. +func (d *resourceDescriptor) MarkManaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.AddFinalizer(obj, FinalizerString) +} + +// MarkUnmanaged removes the supplied resource from management by ACK. What +// this typically means is that the resource manager will remove a finalizer +// underlying custom resource (CR) that indicates ACK is managing the resource. +// This will allow the Kubernetes API server to delete the underlying CR. +func (d *resourceDescriptor) MarkUnmanaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) +} + +// MarkAdopted places descriptors on the custom resource that indicate the +// resource was not created from within ACK. +func (d *resourceDescriptor) MarkAdopted( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeObject in AWSResource") + } + curr := obj.GetAnnotations() + if curr == nil { + curr = make(map[string]string) + } + curr[ackv1alpha1.AnnotationAdopted] = "true" + obj.SetAnnotations(curr) +} diff --git a/pkg/resource/serverless_cache/hooks.go b/pkg/resource/serverless_cache/hooks.go new file mode 100644 index 00000000..7b9de395 --- /dev/null +++ b/pkg/resource/serverless_cache/hooks.go @@ -0,0 +1,311 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package serverless_cache + +import ( + "context" + "fmt" + "time" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + corev1 "k8s.io/api/core/v1" +) + +const ( + ServerlessCacheStatusCreating = "creating" + ServerlessCacheStatusDeleting = "deleting" + ServerlessCacheStatusModifying = "modifying" +) + +var ( + ErrServerlessCacheDeleting = fmt.Errorf( + "serverless cache in '%v' state, cannot be modified or deleted", + ServerlessCacheStatusDeleting, + ) + ErrServerlessCacheCreating = fmt.Errorf( + "serverless cache in '%v' state, cannot be modified or deleted", + ServerlessCacheStatusCreating, + ) + ErrServerlessCacheModifying = fmt.Errorf( + "serverless cache in '%v' state, cannot be further modified", + ServerlessCacheStatusModifying, + ) +) + +var ( + requeueWaitWhileDeleting = ackrequeue.NeededAfter( + ErrServerlessCacheDeleting, + 5*time.Second, + ) + requeueWaitWhileCreating = ackrequeue.NeededAfter( + ErrServerlessCacheCreating, + 5*time.Second, + ) + requeueWaitWhileModifying = ackrequeue.NeededAfter( + ErrServerlessCacheModifying, + 10*time.Second, + ) +) + +// modifyServerlessCache is a central function that creates the input object and makes the API call +// with consistent metrics recording +func (rm *resourceManager) modifyServerlessCache( + ctx context.Context, + serverlessCacheName *string, + configFunc func(*svcsdk.ModifyServerlessCacheInput), +) error { + input := &svcsdk.ModifyServerlessCacheInput{ + ServerlessCacheName: serverlessCacheName, + } + + if configFunc != nil { + configFunc(input) + } + + _, err := rm.sdkapi.ModifyServerlessCache(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "ModifyServerlessCache", err) + return err +} + +func isServerlessCacheCreating(r *resource) bool { + if r.ko.Status.Status == nil { + return false + } + return *r.ko.Status.Status == ServerlessCacheStatusCreating +} + +func isServerlessCacheDeleting(r *resource) bool { + if r.ko.Status.Status == nil { + return false + } + return *r.ko.Status.Status == ServerlessCacheStatusDeleting +} + +func isServerlessCacheModifying(r *resource) bool { + if r.ko.Status.Status == nil { + return false + } + return *r.ko.Status.Status == ServerlessCacheStatusModifying +} + +// customUpdateServerlessCache handles updates in a phased approach similar to DynamoDB table updates +func (rm *resourceManager) customUpdateServerlessCache( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (updated *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.customUpdateServerlessCache") + defer func() { exit(err) }() + + if isServerlessCacheDeleting(latest) { + msg := "serverless cache is currently being deleted" + ackcondition.SetSynced(desired, corev1.ConditionFalse, &msg, nil) + return desired, requeueWaitWhileDeleting + } + if isServerlessCacheCreating(latest) { + msg := "serverless cache is currently being created" + ackcondition.SetSynced(desired, corev1.ConditionFalse, &msg, nil) + return desired, requeueWaitWhileCreating + } + if isServerlessCacheModifying(latest) { + msg := "serverless cache is currently being modified" + ackcondition.SetSynced(desired, corev1.ConditionFalse, &msg, nil) + return desired, requeueWaitWhileModifying + } + + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + rm.setStatusDefaults(ko) + + switch { + case delta.DifferentAt("Spec.Description"): + if err := rm.syncDescription(ctx, desired); err != nil { + return nil, fmt.Errorf("cannot update serverless cache: %v", err) + } + + case delta.DifferentAt("Spec.Tags"): + if err := rm.syncTags(ctx, desired, latest); err != nil { + return &resource{ko}, err + } + + case delta.DifferentAt("Spec.DailySnapshotTime"): + if err := rm.syncDailySnapshotTime(ctx, desired); err != nil { + return nil, fmt.Errorf("cannot update serverless cache: %v", err) + } + + case delta.DifferentAt("Spec.SnapshotRetentionLimit"): + if err := rm.syncSnapshotRetentionLimit(ctx, desired); err != nil { + return nil, fmt.Errorf("cannot update serverless cache: %v", err) + } + + case delta.DifferentAt("Spec.SecurityGroupIDs"): + if err := rm.syncSecurityGroupIDs(ctx, desired); err != nil { + return nil, fmt.Errorf("cannot update serverless cache: %v", err) + } + + case delta.DifferentAt("Spec.UserGroupID"): + if err := rm.syncUserGroupID(ctx, desired); err != nil { + return nil, fmt.Errorf("cannot update serverless cache: %v", err) + } + + case delta.DifferentAt("Spec.CacheUsageLimits"): + if err := rm.syncCacheUsageLimits(ctx, desired); err != nil { + return nil, fmt.Errorf("cannot update serverless cache: %v", err) + } + + case delta.DifferentAt("Spec.Engine") || delta.DifferentAt("Spec.MajorEngineVersion"): + if err := rm.syncEngineAndVersion(ctx, desired); err != nil { + return nil, fmt.Errorf("cannot update serverless cache: %v", err) + } + } + + return &resource{ko}, requeueWaitWhileModifying +} + +// syncDescription handles updating only the description field +func (rm *resourceManager) syncDescription( + ctx context.Context, + desired *resource, +) error { + return rm.modifyServerlessCache(ctx, desired.ko.Spec.ServerlessCacheName, func(input *svcsdk.ModifyServerlessCacheInput) { + input.Description = desired.ko.Spec.Description + }) +} + +// syncDailySnapshotTime handles updating only the daily snapshot time field +func (rm *resourceManager) syncDailySnapshotTime( + ctx context.Context, + desired *resource, +) error { + return rm.modifyServerlessCache(ctx, desired.ko.Spec.ServerlessCacheName, func(input *svcsdk.ModifyServerlessCacheInput) { + input.DailySnapshotTime = desired.ko.Spec.DailySnapshotTime + }) +} + +// syncSnapshotRetentionLimit handles updating only the snapshot retention limit field +func (rm *resourceManager) syncSnapshotRetentionLimit( + ctx context.Context, + desired *resource, +) error { + return rm.modifyServerlessCache(ctx, desired.ko.Spec.ServerlessCacheName, func(input *svcsdk.ModifyServerlessCacheInput) { + if desired.ko.Spec.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy := int32(*desired.ko.Spec.SnapshotRetentionLimit) + input.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } + }) +} + +// syncSecurityGroupIDs handles updating only the security group IDs field +func (rm *resourceManager) syncSecurityGroupIDs( + ctx context.Context, + desired *resource, +) error { + securityGroupIDs := aws.ToStringSlice(desired.ko.Spec.SecurityGroupIDs) + // AWS ElastiCache ModifyServerlessCache doesn't support unsetting SecurityGroupIds + err := rm.modifyServerlessCache(ctx, desired.ko.Spec.ServerlessCacheName, func(input *svcsdk.ModifyServerlessCacheInput) { + input.SecurityGroupIds = securityGroupIDs + }) + return err +} + +// syncUserGroupID handles updating only the user group ID field +func (rm *resourceManager) syncUserGroupID( + ctx context.Context, + desired *resource, +) error { + return rm.modifyServerlessCache(ctx, desired.ko.Spec.ServerlessCacheName, func(input *svcsdk.ModifyServerlessCacheInput) { + input.UserGroupId = desired.ko.Spec.UserGroupID + }) +} + +// syncCacheUsageLimits handles updating the cache usage limits which may have restrictions +func (rm *resourceManager) syncCacheUsageLimits( + ctx context.Context, + desired *resource, +) error { + return rm.modifyServerlessCache(ctx, desired.ko.Spec.ServerlessCacheName, func(input *svcsdk.ModifyServerlessCacheInput) { + if desired.ko.Spec.CacheUsageLimits != nil { + f0 := &svcsdktypes.CacheUsageLimits{} + if desired.ko.Spec.CacheUsageLimits.DataStorage != nil { + f0f0 := &svcsdktypes.DataStorage{} + if desired.ko.Spec.CacheUsageLimits.DataStorage.Maximum != nil { + maximumCopy0 := *desired.ko.Spec.CacheUsageLimits.DataStorage.Maximum + maximumCopy := int32(maximumCopy0) + f0f0.Maximum = &maximumCopy + } + if desired.ko.Spec.CacheUsageLimits.DataStorage.Minimum != nil { + minimumCopy0 := *desired.ko.Spec.CacheUsageLimits.DataStorage.Minimum + minimumCopy := int32(minimumCopy0) + f0f0.Minimum = &minimumCopy + } + if desired.ko.Spec.CacheUsageLimits.DataStorage.Unit != nil { + f0f0.Unit = svcsdktypes.DataStorageUnit(*desired.ko.Spec.CacheUsageLimits.DataStorage.Unit) + } + f0.DataStorage = f0f0 + } + if desired.ko.Spec.CacheUsageLimits.ECPUPerSecond != nil { + f0f1 := &svcsdktypes.ECPUPerSecond{} + if desired.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum != nil { + maximumCopy0 := *desired.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum + maximumCopy := int32(maximumCopy0) + f0f1.Maximum = &maximumCopy + } + if desired.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum != nil { + minimumCopy0 := *desired.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum + minimumCopy := int32(minimumCopy0) + f0f1.Minimum = &minimumCopy + } + f0.ECPUPerSecond = f0f1 + } + input.CacheUsageLimits = f0 + } + }) +} + +// syncEngineAndVersion handles the special case of engine and version changes +func (rm *resourceManager) syncEngineAndVersion( + ctx context.Context, + desired *resource, +) error { + return rm.modifyServerlessCache(ctx, desired.ko.Spec.ServerlessCacheName, func(input *svcsdk.ModifyServerlessCacheInput) { + input.Engine = desired.ko.Spec.Engine + input.MajorEngineVersion = desired.ko.Spec.MajorEngineVersion + }) +} + +func (rm *resourceManager) syncTags( + ctx context.Context, + desired *resource, + latest *resource, +) (err error) { + return util.SyncTags(ctx, desired.ko.Spec.Tags, latest.ko.Spec.Tags, latest.ko.Status.ACKResourceMetadata, convertToOrderedACKTags, rm.sdkapi, rm.metrics) +} + +func (rm *resourceManager) getTags( + ctx context.Context, + resourceARN string, +) ([]*svcapitypes.Tag, error) { + return util.GetTags(ctx, rm.sdkapi, rm.metrics, resourceARN) +} diff --git a/pkg/resource/serverless_cache/identifiers.go b/pkg/resource/serverless_cache/identifiers.go new file mode 100644 index 00000000..d9511d4a --- /dev/null +++ b/pkg/resource/serverless_cache/identifiers.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" +) + +// resourceIdentifiers implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceIdentifiers` interface +type resourceIdentifiers struct { + meta *ackv1alpha1.ResourceMetadata +} + +// ARN returns the AWS Resource Name for the backend AWS resource. If nil, +// this means the resource has not yet been created in the backend AWS +// service. +func (ri *resourceIdentifiers) ARN() *ackv1alpha1.AWSResourceName { + if ri.meta != nil { + return ri.meta.ARN + } + return nil +} + +// OwnerAccountID returns the AWS account identifier in which the +// backend AWS resource resides, or nil if this information is not known +// for the resource +func (ri *resourceIdentifiers) OwnerAccountID() *ackv1alpha1.AWSAccountID { + if ri.meta != nil { + return ri.meta.OwnerAccountID + } + return nil +} + +// Region returns the AWS region in which the resource exists, or +// nil if this information is not known. +func (ri *resourceIdentifiers) Region() *ackv1alpha1.AWSRegion { + if ri.meta != nil { + return ri.meta.Region + } + return nil +} diff --git a/pkg/resource/serverless_cache/manager.go b/pkg/resource/serverless_cache/manager.go new file mode 100644 index 00000000..d02e7436 --- /dev/null +++ b/pkg/resource/serverless_cache/manager.go @@ -0,0 +1,412 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + "context" + "fmt" + "time" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrt "github.com/aws-controllers-k8s/runtime/pkg/runtime" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = ackutil.InStrings + _ = acktags.NewTags() + _ = ackrt.MissingImageTagValue + _ = svcapitypes.ServerlessCache{} +) + +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=serverlesscaches,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=serverlesscaches/status,verbs=get;update;patch + +var lateInitializeFieldNames = []string{} + +// resourceManager is responsible for providing a consistent way to perform +// CRUD operations in a backend AWS service API for Book custom resources. +type resourceManager struct { + // cfg is a copy of the ackcfg.Config object passed on start of the service + // controller + cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config + // log refers to the logr.Logger object handling logging for the service + // controller + log logr.Logger + // metrics contains a collection of Prometheus metric objects that the + // service controller and its reconcilers track + metrics *ackmetrics.Metrics + // rr is the Reconciler which can be used for various utility + // functions such as querying for Secret values given a SecretReference + rr acktypes.Reconciler + // awsAccountID is the AWS account identifier that contains the resources + // managed by this resource manager + awsAccountID ackv1alpha1.AWSAccountID + // The AWS Region that this resource manager targets + awsRegion ackv1alpha1.AWSRegion + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client +} + +// concreteResource returns a pointer to a resource from the supplied +// generic AWSResource interface +func (rm *resourceManager) concreteResource( + res acktypes.AWSResource, +) *resource { + // cast the generic interface into a pointer type specific to the concrete + // implementing resource type managed by this resource manager + return res.(*resource) +} + +// ReadOne returns the currently-observed state of the supplied AWSResource in +// the backend AWS service API. +func (rm *resourceManager) ReadOne( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's ReadOne() method received resource with nil CR object") + } + observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(observed) +} + +// Create attempts to create the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-created +// resource +func (rm *resourceManager) Create( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Create() method received resource with nil CR object") + } + created, err := rm.sdkCreate(ctx, r) + if err != nil { + if created != nil { + return rm.onError(created, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(created) +} + +// Update attempts to mutate the supplied desired AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-mutated +// resource. +// Note for specialized logic implementers can check to see how the latest +// observed resource differs from the supplied desired state. The +// higher-level reonciler determines whether or not the desired differs +// from the latest observed and decides whether to call the resource +// manager's Update method +func (rm *resourceManager) Update( + ctx context.Context, + resDesired acktypes.AWSResource, + resLatest acktypes.AWSResource, + delta *ackcompare.Delta, +) (acktypes.AWSResource, error) { + desired := rm.concreteResource(resDesired) + latest := rm.concreteResource(resLatest) + if desired.ko == nil || latest.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) + if err != nil { + if updated != nil { + return rm.onError(updated, err) + } + return rm.onError(latest, err) + } + return rm.onSuccess(updated) +} + +// Delete attempts to destroy the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the +// resource being deleted (if delete is asynchronous and takes time) +func (rm *resourceManager) Delete( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + observed, err := rm.sdkDelete(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + + return rm.onSuccess(observed) +} + +// ARNFromName returns an AWS Resource Name from a given string name. This +// is useful for constructing ARNs for APIs that require ARNs in their +// GetAttributes operations but all we have (for new CRs at least) is a +// name for the resource +func (rm *resourceManager) ARNFromName(name string) string { + return fmt.Sprintf( + "arn:aws:elasticache:%s:%s:%s", + rm.awsRegion, + rm.awsAccountID, + name, + ) +} + +// LateInitialize returns an acktypes.AWSResource after setting the late initialized +// fields from the readOne call. This method will initialize the optional fields +// which were not provided by the k8s user but were defaulted by the AWS service. +// If there are no such fields to be initialized, the returned object is similar to +// object passed in the parameter. +func (rm *resourceManager) LateInitialize( + ctx context.Context, + latest acktypes.AWSResource, +) (acktypes.AWSResource, error) { + rlog := ackrtlog.FromContext(ctx) + // If there are no fields to late initialize, do nothing + if len(lateInitializeFieldNames) == 0 { + rlog.Debug("no late initialization required.") + return latest, nil + } + latestCopy := latest.DeepCopy() + lateInitConditionReason := "" + lateInitConditionMessage := "" + observed, err := rm.ReadOne(ctx, latestCopy) + if err != nil { + lateInitConditionMessage = "Unable to complete Read operation required for late initialization" + lateInitConditionReason = "Late Initialization Failure" + ackcondition.SetLateInitialized(latestCopy, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(latestCopy, corev1.ConditionFalse, nil, nil) + return latestCopy, err + } + lateInitializedRes := rm.lateInitializeFromReadOneOutput(observed, latestCopy) + incompleteInitialization := rm.incompleteLateInitialization(lateInitializedRes) + if incompleteInitialization { + // Add the condition with LateInitialized=False + lateInitConditionMessage = "Late initialization did not complete, requeuing with delay of 5 seconds" + lateInitConditionReason = "Delayed Late Initialization" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(lateInitializedRes, corev1.ConditionFalse, nil, nil) + return lateInitializedRes, ackrequeue.NeededAfter(nil, time.Duration(5)*time.Second) + } + // Set LateInitialized condition to True + lateInitConditionMessage = "Late initialization successful" + lateInitConditionReason = "Late initialization successful" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionTrue, &lateInitConditionMessage, &lateInitConditionReason) + return lateInitializedRes, nil +} + +// incompleteLateInitialization return true if there are fields which were supposed to be +// late initialized but are not. If all the fields are late initialized, false is returned +func (rm *resourceManager) incompleteLateInitialization( + res acktypes.AWSResource, +) bool { + return false +} + +// lateInitializeFromReadOneOutput late initializes the 'latest' resource from the 'observed' +// resource and returns 'latest' resource +func (rm *resourceManager) lateInitializeFromReadOneOutput( + observed acktypes.AWSResource, + latest acktypes.AWSResource, +) acktypes.AWSResource { + return latest +} + +// IsSynced returns true if the resource is synced. +func (rm *resourceManager) IsSynced(ctx context.Context, res acktypes.AWSResource) (bool, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's IsSynced() method received resource with nil CR object") + } + + if r.ko.Status.Status == nil { + return false, nil + } + statusCandidates := []string{"available", "create_failed"} + if !ackutil.InStrings(*r.ko.Status.Status, statusCandidates) { + return false, nil + } + + return true, nil +} + +// EnsureTags ensures that tags are present inside the AWSResource. +// If the AWSResource does not have any existing resource tags, the 'tags' +// field is initialized and the controller tags are added. +// If the AWSResource has existing resource tags, then controller tags are +// added to the existing resource tags without overriding them. +// If the AWSResource does not support tags, only then the controller tags +// will not be added to the AWSResource. +func (rm *resourceManager) EnsureTags( + ctx context.Context, + res acktypes.AWSResource, + md acktypes.ServiceControllerMetadata, +) error { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's EnsureTags method received resource with nil CR object") + } + defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) + tags := acktags.Merge(resourceTags, defaultTags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) + return nil +} + +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + +// newResourceManager returns a new struct implementing +// acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 +func newResourceManager( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (*resourceManager, error) { + return &resourceManager{ + cfg: cfg, + clientcfg: clientcfg, + log: log, + metrics: metrics, + rr: rr, + awsAccountID: id, + awsRegion: region, + sdkapi: svcsdk.NewFromConfig(clientcfg), + }, nil +} + +// onError updates resource conditions and returns updated resource +// it returns nil if no condition is updated. +func (rm *resourceManager) onError( + r *resource, + err error, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, err + } + r1, updated := rm.updateConditions(r, false, err) + if !updated { + return r, err + } + for _, condition := range r1.Conditions() { + if condition.Type == ackv1alpha1.ConditionTypeTerminal && + condition.Status == corev1.ConditionTrue { + // resource is in Terminal condition + // return Terminal error + return r1, ackerr.Terminal + } + } + return r1, err +} + +// onSuccess updates resource conditions and returns updated resource +// it returns the supplied resource if no condition is updated. +func (rm *resourceManager) onSuccess( + r *resource, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, nil + } + r1, updated := rm.updateConditions(r, true, nil) + if !updated { + return r, nil + } + return r1, nil +} diff --git a/pkg/resource/serverless_cache/manager_factory.go b/pkg/resource/serverless_cache/manager_factory.go new file mode 100644 index 00000000..237f5e28 --- /dev/null +++ b/pkg/resource/serverless_cache/manager_factory.go @@ -0,0 +1,100 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + "fmt" + "sync" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/go-logr/logr" + + svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" +) + +// resourceManagerFactory produces resourceManager objects. It implements the +// `types.AWSResourceManagerFactory` interface. +type resourceManagerFactory struct { + sync.RWMutex + // rmCache contains resource managers for a particular AWS account ID + rmCache map[string]*resourceManager +} + +// ResourcePrototype returns an AWSResource that resource managers produced by +// this factory will handle +func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescriptor { + return &resourceDescriptor{} +} + +// ManagerFor returns a resource manager object that can manage resources for a +// supplied AWS account +func (f *resourceManagerFactory) ManagerFor( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, +) (acktypes.AWSResourceManager, error) { + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) + f.RLock() + rm, found := f.rmCache[rmId] + f.RUnlock() + + if found { + return rm, nil + } + + f.Lock() + defer f.Unlock() + + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) + if err != nil { + return nil, err + } + f.rmCache[rmId] = rm + return rm, nil +} + +// IsAdoptable returns true if the resource is able to be adopted +func (f *resourceManagerFactory) IsAdoptable() bool { + return true +} + +// RequeueOnSuccessSeconds returns true if the resource should be requeued after specified seconds +// Default is false which means resource will not be requeued after success. +func (f *resourceManagerFactory) RequeueOnSuccessSeconds() int { + return 0 +} + +func newResourceManagerFactory() *resourceManagerFactory { + return &resourceManagerFactory{ + rmCache: map[string]*resourceManager{}, + } +} + +func init() { + svcresource.RegisterManagerFactory(newResourceManagerFactory()) +} diff --git a/pkg/resource/serverless_cache/references.go b/pkg/resource/serverless_cache/references.go new file mode 100644 index 00000000..357ec68e --- /dev/null +++ b/pkg/resource/serverless_cache/references.go @@ -0,0 +1,277 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + ec2apitypes "github.com/aws-controllers-k8s/ec2-controller/apis/v1alpha1" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=securitygroups,verbs=get;list +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=securitygroups/status,verbs=get;list + +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=subnets,verbs=get;list +// +kubebuilder:rbac:groups=ec2.services.k8s.aws,resources=subnets/status,verbs=get;list + +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + if len(ko.Spec.SecurityGroupRefs) > 0 { + ko.Spec.SecurityGroupIDs = nil + } + + if len(ko.Spec.SubnetRefs) > 0 { + ko.Spec.SubnetIDs = nil + } + + return &resource{ko} +} + +// ResolveReferences finds if there are any Reference field(s) present +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. +func (rm *resourceManager) ResolveReferences( + ctx context.Context, + apiReader client.Reader, + res acktypes.AWSResource, +) (acktypes.AWSResource, bool, error) { + ko := rm.concreteResource(res).ko + + resourceHasReferences := false + err := validateReferenceFields(ko) + if fieldHasReferences, err := rm.resolveReferenceForSecurityGroupIDs(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForSubnetIDs(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + return &resource{ko}, resourceHasReferences, err +} + +// validateReferenceFields validates the reference field and corresponding +// identifier field. +func validateReferenceFields(ko *svcapitypes.ServerlessCache) error { + + if len(ko.Spec.SecurityGroupRefs) > 0 && len(ko.Spec.SecurityGroupIDs) > 0 { + return ackerr.ResourceReferenceAndIDNotSupportedFor("SecurityGroupIDs", "SecurityGroupRefs") + } + + if len(ko.Spec.SubnetRefs) > 0 && len(ko.Spec.SubnetIDs) > 0 { + return ackerr.ResourceReferenceAndIDNotSupportedFor("SubnetIDs", "SubnetRefs") + } + return nil +} + +// resolveReferenceForSecurityGroupIDs reads the resource referenced +// from SecurityGroupRefs field and sets the SecurityGroupIDs +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForSecurityGroupIDs( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.ServerlessCache, +) (hasReferences bool, err error) { + for _, f0iter := range ko.Spec.SecurityGroupRefs { + if f0iter != nil && f0iter.From != nil { + hasReferences = true + arr := f0iter.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SecurityGroupRefs") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &ec2apitypes.SecurityGroup{} + if err := getReferencedResourceState_SecurityGroup(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + if ko.Spec.SecurityGroupIDs == nil { + ko.Spec.SecurityGroupIDs = make([]*string, 0, 1) + } + ko.Spec.SecurityGroupIDs = append(ko.Spec.SecurityGroupIDs, (*string)(obj.Status.ID)) + } + } + + return hasReferences, nil +} + +// getReferencedResourceState_SecurityGroup looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_SecurityGroup( + ctx context.Context, + apiReader client.Reader, + obj *ec2apitypes.SecurityGroup, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "SecurityGroup", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "SecurityGroup", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "SecurityGroup", + namespace, name) + } + if obj.Status.ID == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "SecurityGroup", + namespace, name, + "Status.ID") + } + return nil +} + +// resolveReferenceForSubnetIDs reads the resource referenced +// from SubnetRefs field and sets the SubnetIDs +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForSubnetIDs( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.ServerlessCache, +) (hasReferences bool, err error) { + for _, f0iter := range ko.Spec.SubnetRefs { + if f0iter != nil && f0iter.From != nil { + hasReferences = true + arr := f0iter.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: SubnetRefs") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &ec2apitypes.Subnet{} + if err := getReferencedResourceState_Subnet(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + if ko.Spec.SubnetIDs == nil { + ko.Spec.SubnetIDs = make([]*string, 0, 1) + } + ko.Spec.SubnetIDs = append(ko.Spec.SubnetIDs, (*string)(obj.Status.SubnetID)) + } + } + + return hasReferences, nil +} + +// getReferencedResourceState_Subnet looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_Subnet( + ctx context.Context, + apiReader client.Reader, + obj *ec2apitypes.Subnet, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "Subnet", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "Subnet", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "Subnet", + namespace, name) + } + if obj.Status.SubnetID == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "Subnet", + namespace, name, + "Status.SubnetID") + } + return nil +} diff --git a/pkg/resource/serverless_cache/resource.go b/pkg/resource/serverless_cache/resource.go new file mode 100644 index 00000000..13cfe9ad --- /dev/null +++ b/pkg/resource/serverless_cache/resource.go @@ -0,0 +1,113 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + "fmt" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &ackerrors.MissingNameIdentifier +) + +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` +// interface +type resource struct { + // The Kubernetes-native CR representing the resource + ko *svcapitypes.ServerlessCache +} + +// Identifiers returns an AWSResourceIdentifiers object containing various +// identifying information, including the AWS account ID that owns the +// resource, the resource's AWS Resource Name (ARN) +func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { + return &resourceIdentifiers{r.ko.Status.ACKResourceMetadata} +} + +// IsBeingDeleted returns true if the Kubernetes resource has a non-zero +// deletion timestamp +func (r *resource) IsBeingDeleted() bool { + return !r.ko.DeletionTimestamp.IsZero() +} + +// RuntimeObject returns the Kubernetes apimachinery/runtime representation of +// the AWSResource +func (r *resource) RuntimeObject() rtclient.Object { + return r.ko +} + +// MetaObject returns the Kubernetes apimachinery/apis/meta/v1.Object +// representation of the AWSResource +func (r *resource) MetaObject() metav1.Object { + return r.ko.GetObjectMeta() +} + +// Conditions returns the ACK Conditions collection for the AWSResource +func (r *resource) Conditions() []*ackv1alpha1.Condition { + return r.ko.Status.Conditions +} + +// ReplaceConditions sets the Conditions status field for the resource +func (r *resource) ReplaceConditions(conditions []*ackv1alpha1.Condition) { + r.ko.Status.Conditions = conditions +} + +// SetObjectMeta sets the ObjectMeta field for the resource +func (r *resource) SetObjectMeta(meta metav1.ObjectMeta) { + r.ko.ObjectMeta = meta +} + +// SetStatus will set the Status field for the resource +func (r *resource) SetStatus(desired acktypes.AWSResource) { + r.ko.Status = desired.(*resource).ko.Status +} + +// SetIdentifiers sets the Spec or Status field that is referenced as the unique +// resource identifier +func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error { + if identifier.NameOrID == "" { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.ServerlessCacheName = &identifier.NameOrID + + return nil +} + +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f2, ok := fields["serverlessCacheName"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: serverlessCacheName")) + } + r.ko.Spec.ServerlessCacheName = &f2 + + return nil +} + +// DeepCopy will return a copy of the resource +func (r *resource) DeepCopy() acktypes.AWSResource { + koCopy := r.ko.DeepCopy() + return &resource{koCopy} +} diff --git a/pkg/resource/serverless_cache/sdk.go b/pkg/resource/serverless_cache/sdk.go new file mode 100644 index 00000000..d05f84b9 --- /dev/null +++ b/pkg/resource/serverless_cache/sdk.go @@ -0,0 +1,712 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + "context" + "errors" + "fmt" + "math" + "reflect" + "strings" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &metav1.Time{} + _ = strings.ToLower("") + _ = &svcsdk.Client{} + _ = &svcapitypes.ServerlessCache{} + _ = ackv1alpha1.AWSAccountID("") + _ = &ackerr.NotFound + _ = &ackcondition.NotManagedMessage + _ = &reflect.Value{} + _ = fmt.Sprintf("") + _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} +) + +// sdkFind returns SDK-specific information about a supplied resource +func (rm *resourceManager) sdkFind( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkFind") + defer func() { + exit(err) + }() + // If any required fields in the input shape are missing, AWS resource is + // not created yet. Return NotFound here to indicate to callers that the + // resource isn't yet created. + if rm.requiredFieldsMissingFromReadManyInput(r) { + return nil, ackerr.NotFound + } + + input, err := rm.newListRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DescribeServerlessCachesOutput + resp, err = rm.sdkapi.DescribeServerlessCaches(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeServerlessCaches", err) + if err != nil { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "ServerlessCacheNotFoundFault" { + return nil, ackerr.NotFound + } + return nil, err + } + + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := r.ko.DeepCopy() + + found := false + for _, elem := range resp.ServerlessCaches { + if elem.ARN != nil { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + tmpARN := ackv1alpha1.AWSResourceName(*elem.ARN) + ko.Status.ACKResourceMetadata.ARN = &tmpARN + } + if elem.CacheUsageLimits != nil { + f1 := &svcapitypes.CacheUsageLimits{} + if elem.CacheUsageLimits.DataStorage != nil { + f1f0 := &svcapitypes.DataStorage{} + if elem.CacheUsageLimits.DataStorage.Maximum != nil { + maximumCopy := int64(*elem.CacheUsageLimits.DataStorage.Maximum) + f1f0.Maximum = &maximumCopy + } + if elem.CacheUsageLimits.DataStorage.Minimum != nil { + minimumCopy := int64(*elem.CacheUsageLimits.DataStorage.Minimum) + f1f0.Minimum = &minimumCopy + } + if elem.CacheUsageLimits.DataStorage.Unit != "" { + f1f0.Unit = aws.String(string(elem.CacheUsageLimits.DataStorage.Unit)) + } + f1.DataStorage = f1f0 + } + if elem.CacheUsageLimits.ECPUPerSecond != nil { + f1f1 := &svcapitypes.ECPUPerSecond{} + if elem.CacheUsageLimits.ECPUPerSecond.Maximum != nil { + maximumCopy := int64(*elem.CacheUsageLimits.ECPUPerSecond.Maximum) + f1f1.Maximum = &maximumCopy + } + if elem.CacheUsageLimits.ECPUPerSecond.Minimum != nil { + minimumCopy := int64(*elem.CacheUsageLimits.ECPUPerSecond.Minimum) + f1f1.Minimum = &minimumCopy + } + f1.ECPUPerSecond = f1f1 + } + ko.Spec.CacheUsageLimits = f1 + } else { + ko.Spec.CacheUsageLimits = nil + } + if elem.CreateTime != nil { + ko.Status.CreateTime = &metav1.Time{*elem.CreateTime} + } else { + ko.Status.CreateTime = nil + } + if elem.DailySnapshotTime != nil { + ko.Spec.DailySnapshotTime = elem.DailySnapshotTime + } else { + ko.Spec.DailySnapshotTime = nil + } + if elem.Description != nil { + ko.Spec.Description = elem.Description + } else { + ko.Spec.Description = nil + } + if elem.Endpoint != nil { + f5 := &svcapitypes.Endpoint{} + if elem.Endpoint.Address != nil { + f5.Address = elem.Endpoint.Address + } + if elem.Endpoint.Port != nil { + portCopy := int64(*elem.Endpoint.Port) + f5.Port = &portCopy + } + ko.Status.Endpoint = f5 + } else { + ko.Status.Endpoint = nil + } + if elem.Engine != nil { + ko.Spec.Engine = elem.Engine + } else { + ko.Spec.Engine = nil + } + if elem.FullEngineVersion != nil { + ko.Status.FullEngineVersion = elem.FullEngineVersion + } else { + ko.Status.FullEngineVersion = nil + } + if elem.KmsKeyId != nil { + ko.Spec.KMSKeyID = elem.KmsKeyId + } else { + ko.Spec.KMSKeyID = nil + } + if elem.MajorEngineVersion != nil { + ko.Spec.MajorEngineVersion = elem.MajorEngineVersion + } else { + ko.Spec.MajorEngineVersion = nil + } + if elem.ReaderEndpoint != nil { + f10 := &svcapitypes.Endpoint{} + if elem.ReaderEndpoint.Address != nil { + f10.Address = elem.ReaderEndpoint.Address + } + if elem.ReaderEndpoint.Port != nil { + portCopy := int64(*elem.ReaderEndpoint.Port) + f10.Port = &portCopy + } + ko.Status.ReaderEndpoint = f10 + } else { + ko.Status.ReaderEndpoint = nil + } + if elem.SecurityGroupIds != nil { + ko.Spec.SecurityGroupIDs = aws.StringSlice(elem.SecurityGroupIds) + } else { + ko.Spec.SecurityGroupIDs = nil + } + if elem.ServerlessCacheName != nil { + ko.Spec.ServerlessCacheName = elem.ServerlessCacheName + } else { + ko.Spec.ServerlessCacheName = nil + } + if elem.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy := int64(*elem.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if elem.Status != nil { + ko.Status.Status = elem.Status + } else { + ko.Status.Status = nil + } + if elem.SubnetIds != nil { + ko.Spec.SubnetIDs = aws.StringSlice(elem.SubnetIds) + } else { + ko.Spec.SubnetIDs = nil + } + if elem.UserGroupId != nil { + ko.Spec.UserGroupID = elem.UserGroupId + } else { + ko.Spec.UserGroupID = nil + } + found = true + break + } + if !found { + return nil, ackerr.NotFound + } + + rm.setStatusDefaults(ko) + // Get the ARN from the resource metadata + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + // Retrieve the tags for the resource + resourceARN := string(*ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, resourceARN) + if err == nil { + ko.Spec.Tags = tags + } + } + return &resource{ko}, nil +} + +// requiredFieldsMissingFromReadManyInput returns true if there are any fields +// for the ReadMany Input shape that are required but not present in the +// resource's Spec or Status +func (rm *resourceManager) requiredFieldsMissingFromReadManyInput( + r *resource, +) bool { + return r.ko.Spec.ServerlessCacheName == nil + +} + +// newListRequestPayload returns SDK-specific struct for the HTTP request +// payload of the List API call for the resource +func (rm *resourceManager) newListRequestPayload( + r *resource, +) (*svcsdk.DescribeServerlessCachesInput, error) { + res := &svcsdk.DescribeServerlessCachesInput{} + + if r.ko.Spec.ServerlessCacheName != nil { + res.ServerlessCacheName = r.ko.Spec.ServerlessCacheName + } + + return res, nil +} + +// sdkCreate creates the supplied resource in the backend AWS service API and +// returns a copy of the resource with resource fields (in both Spec and +// Status) filled in with values from the CREATE API operation's Output shape. +func (rm *resourceManager) sdkCreate( + ctx context.Context, + desired *resource, +) (created *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkCreate") + defer func() { + exit(err) + }() + input, err := rm.newCreateRequestPayload(ctx, desired) + if err != nil { + return nil, err + } + + var resp *svcsdk.CreateServerlessCacheOutput + _ = resp + resp, err = rm.sdkapi.CreateServerlessCache(ctx, input) + rm.metrics.RecordAPICall("CREATE", "CreateServerlessCache", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.ServerlessCache.ARN != nil { + arn := ackv1alpha1.AWSResourceName(*resp.ServerlessCache.ARN) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.ServerlessCache.CacheUsageLimits != nil { + f1 := &svcapitypes.CacheUsageLimits{} + if resp.ServerlessCache.CacheUsageLimits.DataStorage != nil { + f1f0 := &svcapitypes.DataStorage{} + if resp.ServerlessCache.CacheUsageLimits.DataStorage.Maximum != nil { + maximumCopy := int64(*resp.ServerlessCache.CacheUsageLimits.DataStorage.Maximum) + f1f0.Maximum = &maximumCopy + } + if resp.ServerlessCache.CacheUsageLimits.DataStorage.Minimum != nil { + minimumCopy := int64(*resp.ServerlessCache.CacheUsageLimits.DataStorage.Minimum) + f1f0.Minimum = &minimumCopy + } + if resp.ServerlessCache.CacheUsageLimits.DataStorage.Unit != "" { + f1f0.Unit = aws.String(string(resp.ServerlessCache.CacheUsageLimits.DataStorage.Unit)) + } + f1.DataStorage = f1f0 + } + if resp.ServerlessCache.CacheUsageLimits.ECPUPerSecond != nil { + f1f1 := &svcapitypes.ECPUPerSecond{} + if resp.ServerlessCache.CacheUsageLimits.ECPUPerSecond.Maximum != nil { + maximumCopy := int64(*resp.ServerlessCache.CacheUsageLimits.ECPUPerSecond.Maximum) + f1f1.Maximum = &maximumCopy + } + if resp.ServerlessCache.CacheUsageLimits.ECPUPerSecond.Minimum != nil { + minimumCopy := int64(*resp.ServerlessCache.CacheUsageLimits.ECPUPerSecond.Minimum) + f1f1.Minimum = &minimumCopy + } + f1.ECPUPerSecond = f1f1 + } + ko.Spec.CacheUsageLimits = f1 + } else { + ko.Spec.CacheUsageLimits = nil + } + if resp.ServerlessCache.CreateTime != nil { + ko.Status.CreateTime = &metav1.Time{*resp.ServerlessCache.CreateTime} + } else { + ko.Status.CreateTime = nil + } + if resp.ServerlessCache.DailySnapshotTime != nil { + ko.Spec.DailySnapshotTime = resp.ServerlessCache.DailySnapshotTime + } else { + ko.Spec.DailySnapshotTime = nil + } + if resp.ServerlessCache.Description != nil { + ko.Spec.Description = resp.ServerlessCache.Description + } else { + ko.Spec.Description = nil + } + if resp.ServerlessCache.Endpoint != nil { + f5 := &svcapitypes.Endpoint{} + if resp.ServerlessCache.Endpoint.Address != nil { + f5.Address = resp.ServerlessCache.Endpoint.Address + } + if resp.ServerlessCache.Endpoint.Port != nil { + portCopy := int64(*resp.ServerlessCache.Endpoint.Port) + f5.Port = &portCopy + } + ko.Status.Endpoint = f5 + } else { + ko.Status.Endpoint = nil + } + if resp.ServerlessCache.Engine != nil { + ko.Spec.Engine = resp.ServerlessCache.Engine + } else { + ko.Spec.Engine = nil + } + if resp.ServerlessCache.FullEngineVersion != nil { + ko.Status.FullEngineVersion = resp.ServerlessCache.FullEngineVersion + } else { + ko.Status.FullEngineVersion = nil + } + if resp.ServerlessCache.KmsKeyId != nil { + ko.Spec.KMSKeyID = resp.ServerlessCache.KmsKeyId + } else { + ko.Spec.KMSKeyID = nil + } + if resp.ServerlessCache.MajorEngineVersion != nil { + ko.Spec.MajorEngineVersion = resp.ServerlessCache.MajorEngineVersion + } else { + ko.Spec.MajorEngineVersion = nil + } + if resp.ServerlessCache.ReaderEndpoint != nil { + f10 := &svcapitypes.Endpoint{} + if resp.ServerlessCache.ReaderEndpoint.Address != nil { + f10.Address = resp.ServerlessCache.ReaderEndpoint.Address + } + if resp.ServerlessCache.ReaderEndpoint.Port != nil { + portCopy := int64(*resp.ServerlessCache.ReaderEndpoint.Port) + f10.Port = &portCopy + } + ko.Status.ReaderEndpoint = f10 + } else { + ko.Status.ReaderEndpoint = nil + } + if resp.ServerlessCache.SecurityGroupIds != nil { + ko.Spec.SecurityGroupIDs = aws.StringSlice(resp.ServerlessCache.SecurityGroupIds) + } else { + ko.Spec.SecurityGroupIDs = nil + } + if resp.ServerlessCache.ServerlessCacheName != nil { + ko.Spec.ServerlessCacheName = resp.ServerlessCache.ServerlessCacheName + } else { + ko.Spec.ServerlessCacheName = nil + } + if resp.ServerlessCache.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy := int64(*resp.ServerlessCache.SnapshotRetentionLimit) + ko.Spec.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } else { + ko.Spec.SnapshotRetentionLimit = nil + } + if resp.ServerlessCache.Status != nil { + ko.Status.Status = resp.ServerlessCache.Status + } else { + ko.Status.Status = nil + } + if resp.ServerlessCache.SubnetIds != nil { + ko.Spec.SubnetIDs = aws.StringSlice(resp.ServerlessCache.SubnetIds) + } else { + ko.Spec.SubnetIDs = nil + } + if resp.ServerlessCache.UserGroupId != nil { + ko.Spec.UserGroupID = resp.ServerlessCache.UserGroupId + } else { + ko.Spec.UserGroupID = nil + } + + rm.setStatusDefaults(ko) + return &resource{ko}, nil +} + +// newCreateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newCreateRequestPayload( + ctx context.Context, + r *resource, +) (*svcsdk.CreateServerlessCacheInput, error) { + res := &svcsdk.CreateServerlessCacheInput{} + + if r.ko.Spec.CacheUsageLimits != nil { + f0 := &svcsdktypes.CacheUsageLimits{} + if r.ko.Spec.CacheUsageLimits.DataStorage != nil { + f0f0 := &svcsdktypes.DataStorage{} + if r.ko.Spec.CacheUsageLimits.DataStorage.Maximum != nil { + maximumCopy0 := *r.ko.Spec.CacheUsageLimits.DataStorage.Maximum + if maximumCopy0 > math.MaxInt32 || maximumCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Maximum is of type int32") + } + maximumCopy := int32(maximumCopy0) + f0f0.Maximum = &maximumCopy + } + if r.ko.Spec.CacheUsageLimits.DataStorage.Minimum != nil { + minimumCopy0 := *r.ko.Spec.CacheUsageLimits.DataStorage.Minimum + if minimumCopy0 > math.MaxInt32 || minimumCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Minimum is of type int32") + } + minimumCopy := int32(minimumCopy0) + f0f0.Minimum = &minimumCopy + } + if r.ko.Spec.CacheUsageLimits.DataStorage.Unit != nil { + f0f0.Unit = svcsdktypes.DataStorageUnit(*r.ko.Spec.CacheUsageLimits.DataStorage.Unit) + } + f0.DataStorage = f0f0 + } + if r.ko.Spec.CacheUsageLimits.ECPUPerSecond != nil { + f0f1 := &svcsdktypes.ECPUPerSecond{} + if r.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum != nil { + maximumCopy0 := *r.ko.Spec.CacheUsageLimits.ECPUPerSecond.Maximum + if maximumCopy0 > math.MaxInt32 || maximumCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Maximum is of type int32") + } + maximumCopy := int32(maximumCopy0) + f0f1.Maximum = &maximumCopy + } + if r.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum != nil { + minimumCopy0 := *r.ko.Spec.CacheUsageLimits.ECPUPerSecond.Minimum + if minimumCopy0 > math.MaxInt32 || minimumCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field Minimum is of type int32") + } + minimumCopy := int32(minimumCopy0) + f0f1.Minimum = &minimumCopy + } + f0.ECPUPerSecond = f0f1 + } + res.CacheUsageLimits = f0 + } + if r.ko.Spec.DailySnapshotTime != nil { + res.DailySnapshotTime = r.ko.Spec.DailySnapshotTime + } + if r.ko.Spec.Description != nil { + res.Description = r.ko.Spec.Description + } + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine + } + if r.ko.Spec.KMSKeyID != nil { + res.KmsKeyId = r.ko.Spec.KMSKeyID + } + if r.ko.Spec.MajorEngineVersion != nil { + res.MajorEngineVersion = r.ko.Spec.MajorEngineVersion + } + if r.ko.Spec.SecurityGroupIDs != nil { + res.SecurityGroupIds = aws.ToStringSlice(r.ko.Spec.SecurityGroupIDs) + } + if r.ko.Spec.ServerlessCacheName != nil { + res.ServerlessCacheName = r.ko.Spec.ServerlessCacheName + } + if r.ko.Spec.SnapshotARNsToRestore != nil { + res.SnapshotArnsToRestore = aws.ToStringSlice(r.ko.Spec.SnapshotARNsToRestore) + } + if r.ko.Spec.SnapshotRetentionLimit != nil { + snapshotRetentionLimitCopy0 := *r.ko.Spec.SnapshotRetentionLimit + if snapshotRetentionLimitCopy0 > math.MaxInt32 || snapshotRetentionLimitCopy0 < math.MinInt32 { + return nil, fmt.Errorf("error: field SnapshotRetentionLimit is of type int32") + } + snapshotRetentionLimitCopy := int32(snapshotRetentionLimitCopy0) + res.SnapshotRetentionLimit = &snapshotRetentionLimitCopy + } + if r.ko.Spec.SubnetIDs != nil { + res.SubnetIds = aws.ToStringSlice(r.ko.Spec.SubnetIDs) + } + if r.ko.Spec.Tags != nil { + f11 := []svcsdktypes.Tag{} + for _, f11iter := range r.ko.Spec.Tags { + f11elem := &svcsdktypes.Tag{} + if f11iter.Key != nil { + f11elem.Key = f11iter.Key + } + if f11iter.Value != nil { + f11elem.Value = f11iter.Value + } + f11 = append(f11, *f11elem) + } + res.Tags = f11 + } + if r.ko.Spec.UserGroupID != nil { + res.UserGroupId = r.ko.Spec.UserGroupID + } + + return res, nil +} + +// sdkUpdate patches the supplied resource in the backend AWS service API and +// returns a new resource with updated fields. +func (rm *resourceManager) sdkUpdate( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + return rm.customUpdateServerlessCache(ctx, desired, latest, delta) +} + +// sdkDelete deletes the supplied resource in the backend AWS service API +func (rm *resourceManager) sdkDelete( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkDelete") + defer func() { + exit(err) + }() + input, err := rm.newDeleteRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DeleteServerlessCacheOutput + _ = resp + resp, err = rm.sdkapi.DeleteServerlessCache(ctx, input) + rm.metrics.RecordAPICall("DELETE", "DeleteServerlessCache", err) + return nil, err +} + +// newDeleteRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Delete API call for the resource +func (rm *resourceManager) newDeleteRequestPayload( + r *resource, +) (*svcsdk.DeleteServerlessCacheInput, error) { + res := &svcsdk.DeleteServerlessCacheInput{} + + if r.ko.Spec.ServerlessCacheName != nil { + res.ServerlessCacheName = r.ko.Spec.ServerlessCacheName + } + + return res, nil +} + +// setStatusDefaults sets default properties into supplied custom resource +func (rm *resourceManager) setStatusDefaults( + ko *svcapitypes.ServerlessCache, +) { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if ko.Status.ACKResourceMetadata.Region == nil { + ko.Status.ACKResourceMetadata.Region = &rm.awsRegion + } + if ko.Status.ACKResourceMetadata.OwnerAccountID == nil { + ko.Status.ACKResourceMetadata.OwnerAccountID = &rm.awsAccountID + } + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } +} + +// updateConditions returns updated resource, true; if conditions were updated +// else it returns nil, false +func (rm *resourceManager) updateConditions( + r *resource, + onSuccess bool, + err error, +) (*resource, bool) { + ko := r.ko.DeepCopy() + rm.setStatusDefaults(ko) + + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + var recoverableCondition *ackv1alpha1.Condition = nil + var syncCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeRecoverable { + recoverableCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + syncCondition = condition + } + } + var termError *ackerr.TerminalError + if rm.terminalAWSError(err) || err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + var errorMessage = "" + if err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + errorMessage = err.Error() + } else { + awsErr, _ := ackerr.AWSError(err) + errorMessage = awsErr.Error() + } + terminalCondition.Status = corev1.ConditionTrue + terminalCondition.Message = &errorMessage + } else { + // Clear the terminal condition if no longer present + if terminalCondition != nil { + terminalCondition.Status = corev1.ConditionFalse + terminalCondition.Message = nil + } + // Handling Recoverable Conditions + if err != nil { + if recoverableCondition == nil { + // Add a new Condition containing a non-terminal error + recoverableCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeRecoverable, + } + ko.Status.Conditions = append(ko.Status.Conditions, recoverableCondition) + } + recoverableCondition.Status = corev1.ConditionTrue + awsErr, _ := ackerr.AWSError(err) + errorMessage := err.Error() + if awsErr != nil { + errorMessage = awsErr.Error() + } + recoverableCondition.Message = &errorMessage + } else if recoverableCondition != nil { + recoverableCondition.Status = corev1.ConditionFalse + recoverableCondition.Message = nil + } + } + // Required to avoid the "declared but not used" error in the default case + _ = syncCondition + if terminalCondition != nil || recoverableCondition != nil || syncCondition != nil { + return &resource{ko}, true // updated + } + return nil, false // not updated +} + +// terminalAWSError returns awserr, true; if the supplied error is an aws Error type +// and if the exception indicates that it is a Terminal exception +// 'Terminal' exception are specified in generator configuration +func (rm *resourceManager) terminalAWSError(err error) bool { + if err == nil { + return false + } + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { + return false + } + switch terminalErr.ErrorCode() { + case "ServerlessCacheAlreadyExistsFault", + "ServerlessCacheQuotaForCustomerExceededFault", + "InvalidParameterValue", + "InvalidParameterCombination", + "InvalidVPCNetworkStateFault", + "TagQuotaPerResourceExceeded", + "InvalidKMSKeyFault": + return true + default: + return false + } +} diff --git a/pkg/resource/serverless_cache/tags.go b/pkg/resource/serverless_cache/tags.go new file mode 100644 index 00000000..fd0dd64a --- /dev/null +++ b/pkg/resource/serverless_cache/tags.go @@ -0,0 +1,119 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache + +import ( + "slices" + "strings" + + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = svcapitypes.ServerlessCache{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} +) + +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. +// This method helps in creating the hub(acktags.Tags) for merging +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { + result := acktags.NewTags() + keyOrder := []string{} + + if len(tags) == 0 { + return result, keyOrder + } + for _, t := range tags { + if t.Key != nil { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { + result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" + } + } + } + + return result, keyOrder +} + +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// This method helps in setting the tags back inside AWSResource after merging +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { + result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } + for k, v := range tags { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + } + + return result +} + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/serverless_cache_snapshot/delta.go b/pkg/resource/serverless_cache_snapshot/delta.go new file mode 100644 index 00000000..8f9f7ac4 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/delta.go @@ -0,0 +1,80 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + "bytes" + "reflect" + + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" +) + +// Hack to avoid import errors during build... +var ( + _ = &bytes.Buffer{} + _ = &reflect.Method{} + _ = &acktags.Tags{} +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) { + delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) + } else if a.ko.Spec.KMSKeyID != nil && b.ko.Spec.KMSKeyID != nil { + if *a.ko.Spec.KMSKeyID != *b.ko.Spec.KMSKeyID { + delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) + } + } + if !reflect.DeepEqual(a.ko.Spec.KMSKeyRef, b.ko.Spec.KMSKeyRef) { + delta.Add("Spec.KMSKeyRef", a.ko.Spec.KMSKeyRef, b.ko.Spec.KMSKeyRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.ServerlessCacheName, b.ko.Spec.ServerlessCacheName) { + delta.Add("Spec.ServerlessCacheName", a.ko.Spec.ServerlessCacheName, b.ko.Spec.ServerlessCacheName) + } else if a.ko.Spec.ServerlessCacheName != nil && b.ko.Spec.ServerlessCacheName != nil { + if *a.ko.Spec.ServerlessCacheName != *b.ko.Spec.ServerlessCacheName { + delta.Add("Spec.ServerlessCacheName", a.ko.Spec.ServerlessCacheName, b.ko.Spec.ServerlessCacheName) + } + } + if !reflect.DeepEqual(a.ko.Spec.ServerlessCacheRef, b.ko.Spec.ServerlessCacheRef) { + delta.Add("Spec.ServerlessCacheRef", a.ko.Spec.ServerlessCacheRef, b.ko.Spec.ServerlessCacheRef) + } + if ackcompare.HasNilDifference(a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) { + delta.Add("Spec.ServerlessCacheSnapshotName", a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) + } else if a.ko.Spec.ServerlessCacheSnapshotName != nil && b.ko.Spec.ServerlessCacheSnapshotName != nil { + if *a.ko.Spec.ServerlessCacheSnapshotName != *b.ko.Spec.ServerlessCacheSnapshotName { + delta.Add("Spec.ServerlessCacheSnapshotName", a.ko.Spec.ServerlessCacheSnapshotName, b.ko.Spec.ServerlessCacheSnapshotName) + } + } + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { + delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) + } + + return delta +} diff --git a/pkg/resource/serverless_cache_snapshot/descriptor.go b/pkg/resource/serverless_cache_snapshot/descriptor.go new file mode 100644 index 00000000..12f31b08 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/descriptor.go @@ -0,0 +1,155 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +const ( + FinalizerString = "finalizers.elasticache.services.k8s.aws/ServerlessCacheSnapshot" +) + +var ( + GroupVersionResource = svcapitypes.GroupVersion.WithResource("serverlesscachesnapshots") + GroupKind = metav1.GroupKind{ + Group: "elasticache.services.k8s.aws", + Kind: "ServerlessCacheSnapshot", + } +) + +// resourceDescriptor implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceDescriptor` interface +type resourceDescriptor struct { +} + +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) +} + +// EmptyRuntimeObject returns an empty object prototype that may be used in +// apimachinery and k8s client operations +func (d *resourceDescriptor) EmptyRuntimeObject() rtclient.Object { + return &svcapitypes.ServerlessCacheSnapshot{} +} + +// ResourceFromRuntimeObject returns an AWSResource that has been initialized +// with the supplied runtime.Object +func (d *resourceDescriptor) ResourceFromRuntimeObject( + obj rtclient.Object, +) acktypes.AWSResource { + return &resource{ + ko: obj.(*svcapitypes.ServerlessCacheSnapshot), + } +} + +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) +} + +// IsManaged returns true if the supplied AWSResource is under the management +// of an ACK service controller. What this means in practice is that the +// underlying custom resource (CR) in the AWSResource has had a +// resource-specific finalizer associated with it. +func (d *resourceDescriptor) IsManaged( + res acktypes.AWSResource, +) bool { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + // Remove use of custom code once + // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is + // fixed. This should be able to be: + // + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) +} + +// Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 +// is fixed. +func containsFinalizer(obj rtclient.Object, finalizer string) bool { + f := obj.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// MarkManaged places the supplied resource under the management of ACK. What +// this typically means is that the resource manager will decorate the +// underlying custom resource (CR) with a finalizer that indicates ACK is +// managing the resource and the underlying CR may not be deleted until ACK is +// finished cleaning up any backend AWS service resources associated with the +// CR. +func (d *resourceDescriptor) MarkManaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.AddFinalizer(obj, FinalizerString) +} + +// MarkUnmanaged removes the supplied resource from management by ACK. What +// this typically means is that the resource manager will remove a finalizer +// underlying custom resource (CR) that indicates ACK is managing the resource. +// This will allow the Kubernetes API server to delete the underlying CR. +func (d *resourceDescriptor) MarkUnmanaged( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeMetaObject in AWSResource") + } + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) +} + +// MarkAdopted places descriptors on the custom resource that indicate the +// resource was not created from within ACK. +func (d *resourceDescriptor) MarkAdopted( + res acktypes.AWSResource, +) { + obj := res.RuntimeObject() + if obj == nil { + // Should not happen. If it does, there is a bug in the code + panic("nil RuntimeObject in AWSResource") + } + curr := obj.GetAnnotations() + if curr == nil { + curr = make(map[string]string) + } + curr[ackv1alpha1.AnnotationAdopted] = "true" + obj.SetAnnotations(curr) +} diff --git a/pkg/resource/serverless_cache_snapshot/hooks.go b/pkg/resource/serverless_cache_snapshot/hooks.go new file mode 100644 index 00000000..1e5a3978 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/hooks.go @@ -0,0 +1,106 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package serverless_cache_snapshot + +import ( + "context" + "fmt" + "time" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/util" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" +) + +const ( + ServerlessCacheSnapshotStatusAvailable = "available" +) + +var requeueWaitUntilCanModify = 10 * time.Second + +// customUpdateServerlessCacheSnapshot handles updates for serverless cache snapshots. +// Since immutable fields are enforced by generator configuration, this method +// only needs to handle tag updates. +func (rm *resourceManager) customUpdateServerlessCacheSnapshot( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (updated *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.customUpdateServerlessCacheSnapshot") + defer func() { exit(err) }() + + // Check if the snapshot is in a state that allows updates + if !isServerlessCacheSnapshotAvailable(latest) { + return desired, ackrequeue.NeededAfter( + fmt.Errorf("snapshot not in active state"), + requeueWaitUntilCanModify, + ) + } + + ko := desired.ko.DeepCopy() + rm.setStatusDefaults(ko) + + // Handle tag updates + if delta.DifferentAt("Spec.Tags") { + if err := rm.syncTags(ctx, desired, latest); err != nil { + return &resource{ko}, err + } + return &resource{ko}, nil + } + + return latest, nil +} + +// isServerlessCacheSnapshotAvailable returns true if the snapshot is in a state +// that allows modifications (currently only tag updates) +func isServerlessCacheSnapshotAvailable(r *resource) bool { + if r.ko.Status.Status == nil { + return false + } + status := *r.ko.Status.Status + return status == ServerlessCacheSnapshotStatusAvailable +} + +// getTags retrieves the tags for a given ServerlessCacheSnapshot +func (rm *resourceManager) getTags( + ctx context.Context, + resourceARN string, +) ([]*svcapitypes.Tag, error) { + return util.GetTags(ctx, rm.sdkapi, rm.metrics, resourceARN) +} + +// syncTags synchronizes the tags between the resource spec and the AWS resource +func (rm *resourceManager) syncTags( + ctx context.Context, + desired *resource, + latest *resource, +) error { + if latest.ko.Status.ACKResourceMetadata == nil || latest.ko.Status.ACKResourceMetadata.ARN == nil { + return nil + } + + return util.SyncTags( + ctx, + desired.ko.Spec.Tags, + latest.ko.Spec.Tags, + latest.ko.Status.ACKResourceMetadata, + convertToOrderedACKTags, + rm.sdkapi, + rm.metrics, + ) +} diff --git a/pkg/resource/serverless_cache_snapshot/identifiers.go b/pkg/resource/serverless_cache_snapshot/identifiers.go new file mode 100644 index 00000000..8053ffc6 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/identifiers.go @@ -0,0 +1,55 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" +) + +// resourceIdentifiers implements the +// `aws-service-operator-k8s/pkg/types.AWSResourceIdentifiers` interface +type resourceIdentifiers struct { + meta *ackv1alpha1.ResourceMetadata +} + +// ARN returns the AWS Resource Name for the backend AWS resource. If nil, +// this means the resource has not yet been created in the backend AWS +// service. +func (ri *resourceIdentifiers) ARN() *ackv1alpha1.AWSResourceName { + if ri.meta != nil { + return ri.meta.ARN + } + return nil +} + +// OwnerAccountID returns the AWS account identifier in which the +// backend AWS resource resides, or nil if this information is not known +// for the resource +func (ri *resourceIdentifiers) OwnerAccountID() *ackv1alpha1.AWSAccountID { + if ri.meta != nil { + return ri.meta.OwnerAccountID + } + return nil +} + +// Region returns the AWS region in which the resource exists, or +// nil if this information is not known. +func (ri *resourceIdentifiers) Region() *ackv1alpha1.AWSRegion { + if ri.meta != nil { + return ri.meta.Region + } + return nil +} diff --git a/pkg/resource/serverless_cache_snapshot/manager.go b/pkg/resource/serverless_cache_snapshot/manager.go new file mode 100644 index 00000000..52b65288 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/manager.go @@ -0,0 +1,412 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + "context" + "fmt" + "time" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrt "github.com/aws-controllers-k8s/runtime/pkg/runtime" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = ackutil.InStrings + _ = acktags.NewTags() + _ = ackrt.MissingImageTagValue + _ = svcapitypes.ServerlessCacheSnapshot{} +) + +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=serverlesscachesnapshots,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=elasticache.services.k8s.aws,resources=serverlesscachesnapshots/status,verbs=get;update;patch + +var lateInitializeFieldNames = []string{} + +// resourceManager is responsible for providing a consistent way to perform +// CRUD operations in a backend AWS service API for Book custom resources. +type resourceManager struct { + // cfg is a copy of the ackcfg.Config object passed on start of the service + // controller + cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config + // log refers to the logr.Logger object handling logging for the service + // controller + log logr.Logger + // metrics contains a collection of Prometheus metric objects that the + // service controller and its reconcilers track + metrics *ackmetrics.Metrics + // rr is the Reconciler which can be used for various utility + // functions such as querying for Secret values given a SecretReference + rr acktypes.Reconciler + // awsAccountID is the AWS account identifier that contains the resources + // managed by this resource manager + awsAccountID ackv1alpha1.AWSAccountID + // The AWS Region that this resource manager targets + awsRegion ackv1alpha1.AWSRegion + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client +} + +// concreteResource returns a pointer to a resource from the supplied +// generic AWSResource interface +func (rm *resourceManager) concreteResource( + res acktypes.AWSResource, +) *resource { + // cast the generic interface into a pointer type specific to the concrete + // implementing resource type managed by this resource manager + return res.(*resource) +} + +// ReadOne returns the currently-observed state of the supplied AWSResource in +// the backend AWS service API. +func (rm *resourceManager) ReadOne( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's ReadOne() method received resource with nil CR object") + } + observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(observed) +} + +// Create attempts to create the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-created +// resource +func (rm *resourceManager) Create( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Create() method received resource with nil CR object") + } + created, err := rm.sdkCreate(ctx, r) + if err != nil { + if created != nil { + return rm.onError(created, err) + } + return rm.onError(r, err) + } + return rm.onSuccess(created) +} + +// Update attempts to mutate the supplied desired AWSResource in the backend AWS +// service API, returning an AWSResource representing the newly-mutated +// resource. +// Note for specialized logic implementers can check to see how the latest +// observed resource differs from the supplied desired state. The +// higher-level reonciler determines whether or not the desired differs +// from the latest observed and decides whether to call the resource +// manager's Update method +func (rm *resourceManager) Update( + ctx context.Context, + resDesired acktypes.AWSResource, + resLatest acktypes.AWSResource, + delta *ackcompare.Delta, +) (acktypes.AWSResource, error) { + desired := rm.concreteResource(resDesired) + latest := rm.concreteResource(resLatest) + if desired.ko == nil || latest.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) + if err != nil { + if updated != nil { + return rm.onError(updated, err) + } + return rm.onError(latest, err) + } + return rm.onSuccess(updated) +} + +// Delete attempts to destroy the supplied AWSResource in the backend AWS +// service API, returning an AWSResource representing the +// resource being deleted (if delete is asynchronous and takes time) +func (rm *resourceManager) Delete( + ctx context.Context, + res acktypes.AWSResource, +) (acktypes.AWSResource, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's Update() method received resource with nil CR object") + } + observed, err := rm.sdkDelete(ctx, r) + if err != nil { + if observed != nil { + return rm.onError(observed, err) + } + return rm.onError(r, err) + } + + return rm.onSuccess(observed) +} + +// ARNFromName returns an AWS Resource Name from a given string name. This +// is useful for constructing ARNs for APIs that require ARNs in their +// GetAttributes operations but all we have (for new CRs at least) is a +// name for the resource +func (rm *resourceManager) ARNFromName(name string) string { + return fmt.Sprintf( + "arn:aws:elasticache:%s:%s:%s", + rm.awsRegion, + rm.awsAccountID, + name, + ) +} + +// LateInitialize returns an acktypes.AWSResource after setting the late initialized +// fields from the readOne call. This method will initialize the optional fields +// which were not provided by the k8s user but were defaulted by the AWS service. +// If there are no such fields to be initialized, the returned object is similar to +// object passed in the parameter. +func (rm *resourceManager) LateInitialize( + ctx context.Context, + latest acktypes.AWSResource, +) (acktypes.AWSResource, error) { + rlog := ackrtlog.FromContext(ctx) + // If there are no fields to late initialize, do nothing + if len(lateInitializeFieldNames) == 0 { + rlog.Debug("no late initialization required.") + return latest, nil + } + latestCopy := latest.DeepCopy() + lateInitConditionReason := "" + lateInitConditionMessage := "" + observed, err := rm.ReadOne(ctx, latestCopy) + if err != nil { + lateInitConditionMessage = "Unable to complete Read operation required for late initialization" + lateInitConditionReason = "Late Initialization Failure" + ackcondition.SetLateInitialized(latestCopy, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(latestCopy, corev1.ConditionFalse, nil, nil) + return latestCopy, err + } + lateInitializedRes := rm.lateInitializeFromReadOneOutput(observed, latestCopy) + incompleteInitialization := rm.incompleteLateInitialization(lateInitializedRes) + if incompleteInitialization { + // Add the condition with LateInitialized=False + lateInitConditionMessage = "Late initialization did not complete, requeuing with delay of 5 seconds" + lateInitConditionReason = "Delayed Late Initialization" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionFalse, &lateInitConditionMessage, &lateInitConditionReason) + ackcondition.SetSynced(lateInitializedRes, corev1.ConditionFalse, nil, nil) + return lateInitializedRes, ackrequeue.NeededAfter(nil, time.Duration(5)*time.Second) + } + // Set LateInitialized condition to True + lateInitConditionMessage = "Late initialization successful" + lateInitConditionReason = "Late initialization successful" + ackcondition.SetLateInitialized(lateInitializedRes, corev1.ConditionTrue, &lateInitConditionMessage, &lateInitConditionReason) + return lateInitializedRes, nil +} + +// incompleteLateInitialization return true if there are fields which were supposed to be +// late initialized but are not. If all the fields are late initialized, false is returned +func (rm *resourceManager) incompleteLateInitialization( + res acktypes.AWSResource, +) bool { + return false +} + +// lateInitializeFromReadOneOutput late initializes the 'latest' resource from the 'observed' +// resource and returns 'latest' resource +func (rm *resourceManager) lateInitializeFromReadOneOutput( + observed acktypes.AWSResource, + latest acktypes.AWSResource, +) acktypes.AWSResource { + return latest +} + +// IsSynced returns true if the resource is synced. +func (rm *resourceManager) IsSynced(ctx context.Context, res acktypes.AWSResource) (bool, error) { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's IsSynced() method received resource with nil CR object") + } + + if r.ko.Status.Status == nil { + return false, nil + } + statusCandidates := []string{"available", "create_failed"} + if !ackutil.InStrings(*r.ko.Status.Status, statusCandidates) { + return false, nil + } + + return true, nil +} + +// EnsureTags ensures that tags are present inside the AWSResource. +// If the AWSResource does not have any existing resource tags, the 'tags' +// field is initialized and the controller tags are added. +// If the AWSResource has existing resource tags, then controller tags are +// added to the existing resource tags without overriding them. +// If the AWSResource does not support tags, only then the controller tags +// will not be added to the AWSResource. +func (rm *resourceManager) EnsureTags( + ctx context.Context, + res acktypes.AWSResource, + md acktypes.ServiceControllerMetadata, +) error { + r := rm.concreteResource(res) + if r.ko == nil { + // Should never happen... if it does, it's buggy code. + panic("resource manager's EnsureTags method received resource with nil CR object") + } + defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) + tags := acktags.Merge(resourceTags, defaultTags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) + return nil +} + +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + +// newResourceManager returns a new struct implementing +// acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 +func newResourceManager( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, +) (*resourceManager, error) { + return &resourceManager{ + cfg: cfg, + clientcfg: clientcfg, + log: log, + metrics: metrics, + rr: rr, + awsAccountID: id, + awsRegion: region, + sdkapi: svcsdk.NewFromConfig(clientcfg), + }, nil +} + +// onError updates resource conditions and returns updated resource +// it returns nil if no condition is updated. +func (rm *resourceManager) onError( + r *resource, + err error, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, err + } + r1, updated := rm.updateConditions(r, false, err) + if !updated { + return r, err + } + for _, condition := range r1.Conditions() { + if condition.Type == ackv1alpha1.ConditionTypeTerminal && + condition.Status == corev1.ConditionTrue { + // resource is in Terminal condition + // return Terminal error + return r1, ackerr.Terminal + } + } + return r1, err +} + +// onSuccess updates resource conditions and returns updated resource +// it returns the supplied resource if no condition is updated. +func (rm *resourceManager) onSuccess( + r *resource, +) (acktypes.AWSResource, error) { + if r == nil { + return nil, nil + } + r1, updated := rm.updateConditions(r, true, nil) + if !updated { + return r, nil + } + return r1, nil +} diff --git a/pkg/resource/serverless_cache_snapshot/manager_factory.go b/pkg/resource/serverless_cache_snapshot/manager_factory.go new file mode 100644 index 00000000..94ee48ca --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/manager_factory.go @@ -0,0 +1,100 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + "fmt" + "sync" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" + ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/go-logr/logr" + + svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" +) + +// resourceManagerFactory produces resourceManager objects. It implements the +// `types.AWSResourceManagerFactory` interface. +type resourceManagerFactory struct { + sync.RWMutex + // rmCache contains resource managers for a particular AWS account ID + rmCache map[string]*resourceManager +} + +// ResourcePrototype returns an AWSResource that resource managers produced by +// this factory will handle +func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescriptor { + return &resourceDescriptor{} +} + +// ManagerFor returns a resource manager object that can manage resources for a +// supplied AWS account +func (f *resourceManagerFactory) ManagerFor( + cfg ackcfg.Config, + clientcfg aws.Config, + log logr.Logger, + metrics *ackmetrics.Metrics, + rr acktypes.Reconciler, + id ackv1alpha1.AWSAccountID, + region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, +) (acktypes.AWSResourceManager, error) { + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) + f.RLock() + rm, found := f.rmCache[rmId] + f.RUnlock() + + if found { + return rm, nil + } + + f.Lock() + defer f.Unlock() + + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) + if err != nil { + return nil, err + } + f.rmCache[rmId] = rm + return rm, nil +} + +// IsAdoptable returns true if the resource is able to be adopted +func (f *resourceManagerFactory) IsAdoptable() bool { + return true +} + +// RequeueOnSuccessSeconds returns true if the resource should be requeued after specified seconds +// Default is false which means resource will not be requeued after success. +func (f *resourceManagerFactory) RequeueOnSuccessSeconds() int { + return 0 +} + +func newResourceManagerFactory() *resourceManagerFactory { + return &resourceManagerFactory{ + rmCache: map[string]*resourceManager{}, + } +} + +func init() { + svcresource.RegisterManagerFactory(newResourceManagerFactory()) +} diff --git a/pkg/resource/serverless_cache_snapshot/references.go b/pkg/resource/serverless_cache_snapshot/references.go new file mode 100644 index 00000000..e1443676 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/references.go @@ -0,0 +1,267 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + kmsapitypes "github.com/aws-controllers-k8s/kms-controller/apis/v1alpha1" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// +kubebuilder:rbac:groups=kms.services.k8s.aws,resources=keys,verbs=get;list +// +kubebuilder:rbac:groups=kms.services.k8s.aws,resources=keys/status,verbs=get;list + +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + if ko.Spec.KMSKeyRef != nil { + ko.Spec.KMSKeyID = nil + } + + if ko.Spec.ServerlessCacheRef != nil { + ko.Spec.ServerlessCacheName = nil + } + + return &resource{ko} +} + +// ResolveReferences finds if there are any Reference field(s) present +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. +func (rm *resourceManager) ResolveReferences( + ctx context.Context, + apiReader client.Reader, + res acktypes.AWSResource, +) (acktypes.AWSResource, bool, error) { + ko := rm.concreteResource(res).ko + + resourceHasReferences := false + err := validateReferenceFields(ko) + if fieldHasReferences, err := rm.resolveReferenceForKMSKeyID(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + if fieldHasReferences, err := rm.resolveReferenceForServerlessCacheName(ctx, apiReader, ko); err != nil { + return &resource{ko}, (resourceHasReferences || fieldHasReferences), err + } else { + resourceHasReferences = resourceHasReferences || fieldHasReferences + } + + return &resource{ko}, resourceHasReferences, err +} + +// validateReferenceFields validates the reference field and corresponding +// identifier field. +func validateReferenceFields(ko *svcapitypes.ServerlessCacheSnapshot) error { + + if ko.Spec.KMSKeyRef != nil && ko.Spec.KMSKeyID != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("KMSKeyID", "KMSKeyRef") + } + + if ko.Spec.ServerlessCacheRef != nil && ko.Spec.ServerlessCacheName != nil { + return ackerr.ResourceReferenceAndIDNotSupportedFor("ServerlessCacheName", "ServerlessCacheRef") + } + if ko.Spec.ServerlessCacheRef == nil && ko.Spec.ServerlessCacheName == nil { + return ackerr.ResourceReferenceOrIDRequiredFor("ServerlessCacheName", "ServerlessCacheRef") + } + return nil +} + +// resolveReferenceForKMSKeyID reads the resource referenced +// from KMSKeyRef field and sets the KMSKeyID +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForKMSKeyID( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.ServerlessCacheSnapshot, +) (hasReferences bool, err error) { + if ko.Spec.KMSKeyRef != nil && ko.Spec.KMSKeyRef.From != nil { + hasReferences = true + arr := ko.Spec.KMSKeyRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: KMSKeyRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &kmsapitypes.Key{} + if err := getReferencedResourceState_Key(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.KMSKeyID = (*string)(obj.Status.ACKResourceMetadata.ARN) + } + + return hasReferences, nil +} + +// getReferencedResourceState_Key looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_Key( + ctx context.Context, + apiReader client.Reader, + obj *kmsapitypes.Key, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "Key", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "Key", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "Key", + namespace, name) + } + if obj.Status.ACKResourceMetadata == nil || obj.Status.ACKResourceMetadata.ARN == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "Key", + namespace, name, + "Status.ACKResourceMetadata.ARN") + } + return nil +} + +// resolveReferenceForServerlessCacheName reads the resource referenced +// from ServerlessCacheRef field and sets the ServerlessCacheName +// from referenced resource. Returns a boolean indicating whether a reference +// contains references, or an error +func (rm *resourceManager) resolveReferenceForServerlessCacheName( + ctx context.Context, + apiReader client.Reader, + ko *svcapitypes.ServerlessCacheSnapshot, +) (hasReferences bool, err error) { + if ko.Spec.ServerlessCacheRef != nil && ko.Spec.ServerlessCacheRef.From != nil { + hasReferences = true + arr := ko.Spec.ServerlessCacheRef.From + if arr.Name == nil || *arr.Name == "" { + return hasReferences, fmt.Errorf("provided resource reference is nil or empty: ServerlessCacheRef") + } + namespace := ko.ObjectMeta.GetNamespace() + if arr.Namespace != nil && *arr.Namespace != "" { + namespace = *arr.Namespace + } + obj := &svcapitypes.ServerlessCache{} + if err := getReferencedResourceState_ServerlessCache(ctx, apiReader, obj, *arr.Name, namespace); err != nil { + return hasReferences, err + } + ko.Spec.ServerlessCacheName = (*string)(obj.Spec.ServerlessCacheName) + } + + return hasReferences, nil +} + +// getReferencedResourceState_ServerlessCache looks up whether a referenced resource +// exists and is in a ACK.ResourceSynced=True state. If the referenced resource does exist and is +// in a Synced state, returns nil, otherwise returns `ackerr.ResourceReferenceTerminalFor` or +// `ResourceReferenceNotSyncedFor` depending on if the resource is in a Terminal state. +func getReferencedResourceState_ServerlessCache( + ctx context.Context, + apiReader client.Reader, + obj *svcapitypes.ServerlessCache, + name string, // the Kubernetes name of the referenced resource + namespace string, // the Kubernetes namespace of the referenced resource +) error { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + err := apiReader.Get(ctx, namespacedName, obj) + if err != nil { + return err + } + var refResourceTerminal bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeTerminal && + cond.Status == corev1.ConditionTrue { + return ackerr.ResourceReferenceTerminalFor( + "ServerlessCache", + namespace, name) + } + } + if refResourceTerminal { + return ackerr.ResourceReferenceTerminalFor( + "ServerlessCache", + namespace, name) + } + var refResourceSynced bool + for _, cond := range obj.Status.Conditions { + if cond.Type == ackv1alpha1.ConditionTypeResourceSynced && + cond.Status == corev1.ConditionTrue { + refResourceSynced = true + } + } + if !refResourceSynced { + return ackerr.ResourceReferenceNotSyncedFor( + "ServerlessCache", + namespace, name) + } + if obj.Spec.ServerlessCacheName == nil { + return ackerr.ResourceReferenceMissingTargetFieldFor( + "ServerlessCache", + namespace, name, + "Spec.ServerlessCacheName") + } + return nil +} diff --git a/pkg/resource/serverless_cache_snapshot/resource.go b/pkg/resource/serverless_cache_snapshot/resource.go new file mode 100644 index 00000000..8aef1184 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/resource.go @@ -0,0 +1,132 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + "fmt" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" + acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" + "github.com/aws/aws-sdk-go-v2/aws" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rtclient "sigs.k8s.io/controller-runtime/pkg/client" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &ackerrors.MissingNameIdentifier +) + +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` +// interface +type resource struct { + // The Kubernetes-native CR representing the resource + ko *svcapitypes.ServerlessCacheSnapshot +} + +// Identifiers returns an AWSResourceIdentifiers object containing various +// identifying information, including the AWS account ID that owns the +// resource, the resource's AWS Resource Name (ARN) +func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { + return &resourceIdentifiers{r.ko.Status.ACKResourceMetadata} +} + +// IsBeingDeleted returns true if the Kubernetes resource has a non-zero +// deletion timestamp +func (r *resource) IsBeingDeleted() bool { + return !r.ko.DeletionTimestamp.IsZero() +} + +// RuntimeObject returns the Kubernetes apimachinery/runtime representation of +// the AWSResource +func (r *resource) RuntimeObject() rtclient.Object { + return r.ko +} + +// MetaObject returns the Kubernetes apimachinery/apis/meta/v1.Object +// representation of the AWSResource +func (r *resource) MetaObject() metav1.Object { + return r.ko.GetObjectMeta() +} + +// Conditions returns the ACK Conditions collection for the AWSResource +func (r *resource) Conditions() []*ackv1alpha1.Condition { + return r.ko.Status.Conditions +} + +// ReplaceConditions sets the Conditions status field for the resource +func (r *resource) ReplaceConditions(conditions []*ackv1alpha1.Condition) { + r.ko.Status.Conditions = conditions +} + +// SetObjectMeta sets the ObjectMeta field for the resource +func (r *resource) SetObjectMeta(meta metav1.ObjectMeta) { + r.ko.ObjectMeta = meta +} + +// SetStatus will set the Status field for the resource +func (r *resource) SetStatus(desired acktypes.AWSResource) { + r.ko.Status = desired.(*resource).ko.Status +} + +// SetIdentifiers sets the Spec or Status field that is referenced as the unique +// resource identifier +func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error { + if identifier.NameOrID == "" { + return ackerrors.MissingNameIdentifier + } + r.ko.Spec.ServerlessCacheSnapshotName = &identifier.NameOrID + + f2, f2ok := identifier.AdditionalKeys["serverlessCacheName"] + if f2ok { + r.ko.Spec.ServerlessCacheName = aws.String(f2) + } + f4, f4ok := identifier.AdditionalKeys["snapshotType"] + if f4ok { + r.ko.Status.SnapshotType = aws.String(f4) + } + + return nil +} + +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f3, ok := fields["serverlessCacheSnapshotName"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: serverlessCacheSnapshotName")) + } + r.ko.Spec.ServerlessCacheSnapshotName = &f3 + + f2, f2ok := fields["serverlessCacheName"] + if f2ok { + r.ko.Spec.ServerlessCacheName = aws.String(f2) + } + f4, f4ok := fields["snapshotType"] + if f4ok { + r.ko.Status.SnapshotType = aws.String(f4) + } + + return nil +} + +// DeepCopy will return a copy of the resource +func (r *resource) DeepCopy() acktypes.AWSResource { + koCopy := r.ko.DeepCopy() + return &resource{koCopy} +} diff --git a/pkg/resource/serverless_cache_snapshot/sdk.go b/pkg/resource/serverless_cache_snapshot/sdk.go new file mode 100644 index 00000000..57cb676a --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/sdk.go @@ -0,0 +1,492 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + ackcondition "github.com/aws-controllers-k8s/runtime/pkg/condition" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +// Hack to avoid import errors during build... +var ( + _ = &metav1.Time{} + _ = strings.ToLower("") + _ = &svcsdk.Client{} + _ = &svcapitypes.ServerlessCacheSnapshot{} + _ = ackv1alpha1.AWSAccountID("") + _ = &ackerr.NotFound + _ = &ackcondition.NotManagedMessage + _ = &reflect.Value{} + _ = fmt.Sprintf("") + _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} +) + +// sdkFind returns SDK-specific information about a supplied resource +func (rm *resourceManager) sdkFind( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkFind") + defer func() { + exit(err) + }() + // If any required fields in the input shape are missing, AWS resource is + // not created yet. Return NotFound here to indicate to callers that the + // resource isn't yet created. + if rm.requiredFieldsMissingFromReadManyInput(r) { + return nil, ackerr.NotFound + } + + input, err := rm.newListRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DescribeServerlessCacheSnapshotsOutput + resp, err = rm.sdkapi.DescribeServerlessCacheSnapshots(ctx, input) + rm.metrics.RecordAPICall("READ_MANY", "DescribeServerlessCacheSnapshots", err) + if err != nil { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "ServerlessCacheSnapshotNotFoundFault" { + return nil, ackerr.NotFound + } + return nil, err + } + + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := r.ko.DeepCopy() + + found := false + for _, elem := range resp.ServerlessCacheSnapshots { + if elem.ARN != nil { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + tmpARN := ackv1alpha1.AWSResourceName(*elem.ARN) + ko.Status.ACKResourceMetadata.ARN = &tmpARN + } + if elem.BytesUsedForCache != nil { + ko.Status.BytesUsedForCache = elem.BytesUsedForCache + } else { + ko.Status.BytesUsedForCache = nil + } + if elem.CreateTime != nil { + ko.Status.CreateTime = &metav1.Time{*elem.CreateTime} + } else { + ko.Status.CreateTime = nil + } + if elem.ExpiryTime != nil { + ko.Status.ExpiryTime = &metav1.Time{*elem.ExpiryTime} + } else { + ko.Status.ExpiryTime = nil + } + if elem.KmsKeyId != nil { + ko.Spec.KMSKeyID = elem.KmsKeyId + } else { + ko.Spec.KMSKeyID = nil + } + if elem.ServerlessCacheConfiguration != nil { + f5 := &svcapitypes.ServerlessCacheConfiguration{} + if elem.ServerlessCacheConfiguration.Engine != nil { + f5.Engine = elem.ServerlessCacheConfiguration.Engine + } + if elem.ServerlessCacheConfiguration.MajorEngineVersion != nil { + f5.MajorEngineVersion = elem.ServerlessCacheConfiguration.MajorEngineVersion + } + if elem.ServerlessCacheConfiguration.ServerlessCacheName != nil { + f5.ServerlessCacheName = elem.ServerlessCacheConfiguration.ServerlessCacheName + } + ko.Status.ServerlessCacheConfiguration = f5 + } else { + ko.Status.ServerlessCacheConfiguration = nil + } + if elem.ServerlessCacheSnapshotName != nil { + ko.Spec.ServerlessCacheSnapshotName = elem.ServerlessCacheSnapshotName + } else { + ko.Spec.ServerlessCacheSnapshotName = nil + } + if elem.SnapshotType != nil { + ko.Status.SnapshotType = elem.SnapshotType + } else { + ko.Status.SnapshotType = nil + } + if elem.Status != nil { + ko.Status.Status = elem.Status + } else { + ko.Status.Status = nil + } + found = true + break + } + if !found { + return nil, ackerr.NotFound + } + + rm.setStatusDefaults(ko) + // Only fetch tags if the snapshot is available + // ListTagsForResource fails when snapshot is still creating + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil && + isServerlessCacheSnapshotAvailable(&resource{ko}) { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } + return &resource{ko}, nil +} + +// requiredFieldsMissingFromReadManyInput returns true if there are any fields +// for the ReadMany Input shape that are required but not present in the +// resource's Spec or Status +func (rm *resourceManager) requiredFieldsMissingFromReadManyInput( + r *resource, +) bool { + return r.ko.Spec.ServerlessCacheSnapshotName == nil + +} + +// newListRequestPayload returns SDK-specific struct for the HTTP request +// payload of the List API call for the resource +func (rm *resourceManager) newListRequestPayload( + r *resource, +) (*svcsdk.DescribeServerlessCacheSnapshotsInput, error) { + res := &svcsdk.DescribeServerlessCacheSnapshotsInput{} + + if r.ko.Spec.ServerlessCacheName != nil { + res.ServerlessCacheName = r.ko.Spec.ServerlessCacheName + } + if r.ko.Spec.ServerlessCacheSnapshotName != nil { + res.ServerlessCacheSnapshotName = r.ko.Spec.ServerlessCacheSnapshotName + } + if r.ko.Status.SnapshotType != nil { + res.SnapshotType = r.ko.Status.SnapshotType + } + + return res, nil +} + +// sdkCreate creates the supplied resource in the backend AWS service API and +// returns a copy of the resource with resource fields (in both Spec and +// Status) filled in with values from the CREATE API operation's Output shape. +func (rm *resourceManager) sdkCreate( + ctx context.Context, + desired *resource, +) (created *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkCreate") + defer func() { + exit(err) + }() + input, err := rm.newCreateRequestPayload(ctx, desired) + if err != nil { + return nil, err + } + + var resp *svcsdk.CreateServerlessCacheSnapshotOutput + _ = resp + resp, err = rm.sdkapi.CreateServerlessCacheSnapshot(ctx, input) + rm.metrics.RecordAPICall("CREATE", "CreateServerlessCacheSnapshot", err) + if err != nil { + return nil, err + } + // Merge in the information we read from the API call above to the copy of + // the original Kubernetes object we passed to the function + ko := desired.ko.DeepCopy() + + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if resp.ServerlessCacheSnapshot.ARN != nil { + arn := ackv1alpha1.AWSResourceName(*resp.ServerlessCacheSnapshot.ARN) + ko.Status.ACKResourceMetadata.ARN = &arn + } + if resp.ServerlessCacheSnapshot.BytesUsedForCache != nil { + ko.Status.BytesUsedForCache = resp.ServerlessCacheSnapshot.BytesUsedForCache + } else { + ko.Status.BytesUsedForCache = nil + } + if resp.ServerlessCacheSnapshot.CreateTime != nil { + ko.Status.CreateTime = &metav1.Time{*resp.ServerlessCacheSnapshot.CreateTime} + } else { + ko.Status.CreateTime = nil + } + if resp.ServerlessCacheSnapshot.ExpiryTime != nil { + ko.Status.ExpiryTime = &metav1.Time{*resp.ServerlessCacheSnapshot.ExpiryTime} + } else { + ko.Status.ExpiryTime = nil + } + if resp.ServerlessCacheSnapshot.KmsKeyId != nil { + ko.Spec.KMSKeyID = resp.ServerlessCacheSnapshot.KmsKeyId + } else { + ko.Spec.KMSKeyID = nil + } + if resp.ServerlessCacheSnapshot.ServerlessCacheConfiguration != nil { + f5 := &svcapitypes.ServerlessCacheConfiguration{} + if resp.ServerlessCacheSnapshot.ServerlessCacheConfiguration.Engine != nil { + f5.Engine = resp.ServerlessCacheSnapshot.ServerlessCacheConfiguration.Engine + } + if resp.ServerlessCacheSnapshot.ServerlessCacheConfiguration.MajorEngineVersion != nil { + f5.MajorEngineVersion = resp.ServerlessCacheSnapshot.ServerlessCacheConfiguration.MajorEngineVersion + } + if resp.ServerlessCacheSnapshot.ServerlessCacheConfiguration.ServerlessCacheName != nil { + f5.ServerlessCacheName = resp.ServerlessCacheSnapshot.ServerlessCacheConfiguration.ServerlessCacheName + } + ko.Status.ServerlessCacheConfiguration = f5 + } else { + ko.Status.ServerlessCacheConfiguration = nil + } + if resp.ServerlessCacheSnapshot.ServerlessCacheSnapshotName != nil { + ko.Spec.ServerlessCacheSnapshotName = resp.ServerlessCacheSnapshot.ServerlessCacheSnapshotName + } else { + ko.Spec.ServerlessCacheSnapshotName = nil + } + if resp.ServerlessCacheSnapshot.SnapshotType != nil { + ko.Status.SnapshotType = resp.ServerlessCacheSnapshot.SnapshotType + } else { + ko.Status.SnapshotType = nil + } + if resp.ServerlessCacheSnapshot.Status != nil { + ko.Status.Status = resp.ServerlessCacheSnapshot.Status + } else { + ko.Status.Status = nil + } + + rm.setStatusDefaults(ko) + // If tags are specified, mark the resource as needing a sync + if ko.Spec.Tags != nil { + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + } + return &resource{ko}, nil +} + +// newCreateRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Create API call for the resource +func (rm *resourceManager) newCreateRequestPayload( + ctx context.Context, + r *resource, +) (*svcsdk.CreateServerlessCacheSnapshotInput, error) { + res := &svcsdk.CreateServerlessCacheSnapshotInput{} + + if r.ko.Spec.KMSKeyID != nil { + res.KmsKeyId = r.ko.Spec.KMSKeyID + } + if r.ko.Spec.ServerlessCacheName != nil { + res.ServerlessCacheName = r.ko.Spec.ServerlessCacheName + } + if r.ko.Spec.ServerlessCacheSnapshotName != nil { + res.ServerlessCacheSnapshotName = r.ko.Spec.ServerlessCacheSnapshotName + } + if r.ko.Spec.Tags != nil { + f3 := []svcsdktypes.Tag{} + for _, f3iter := range r.ko.Spec.Tags { + f3elem := &svcsdktypes.Tag{} + if f3iter.Key != nil { + f3elem.Key = f3iter.Key + } + if f3iter.Value != nil { + f3elem.Value = f3iter.Value + } + f3 = append(f3, *f3elem) + } + res.Tags = f3 + } + + return res, nil +} + +// sdkUpdate patches the supplied resource in the backend AWS service API and +// returns a new resource with updated fields. +func (rm *resourceManager) sdkUpdate( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + return rm.customUpdateServerlessCacheSnapshot(ctx, desired, latest, delta) +} + +// sdkDelete deletes the supplied resource in the backend AWS service API +func (rm *resourceManager) sdkDelete( + ctx context.Context, + r *resource, +) (latest *resource, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.sdkDelete") + defer func() { + exit(err) + }() + input, err := rm.newDeleteRequestPayload(r) + if err != nil { + return nil, err + } + var resp *svcsdk.DeleteServerlessCacheSnapshotOutput + _ = resp + resp, err = rm.sdkapi.DeleteServerlessCacheSnapshot(ctx, input) + rm.metrics.RecordAPICall("DELETE", "DeleteServerlessCacheSnapshot", err) + return nil, err +} + +// newDeleteRequestPayload returns an SDK-specific struct for the HTTP request +// payload of the Delete API call for the resource +func (rm *resourceManager) newDeleteRequestPayload( + r *resource, +) (*svcsdk.DeleteServerlessCacheSnapshotInput, error) { + res := &svcsdk.DeleteServerlessCacheSnapshotInput{} + + if r.ko.Spec.ServerlessCacheSnapshotName != nil { + res.ServerlessCacheSnapshotName = r.ko.Spec.ServerlessCacheSnapshotName + } + + return res, nil +} + +// setStatusDefaults sets default properties into supplied custom resource +func (rm *resourceManager) setStatusDefaults( + ko *svcapitypes.ServerlessCacheSnapshot, +) { + if ko.Status.ACKResourceMetadata == nil { + ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} + } + if ko.Status.ACKResourceMetadata.Region == nil { + ko.Status.ACKResourceMetadata.Region = &rm.awsRegion + } + if ko.Status.ACKResourceMetadata.OwnerAccountID == nil { + ko.Status.ACKResourceMetadata.OwnerAccountID = &rm.awsAccountID + } + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } +} + +// updateConditions returns updated resource, true; if conditions were updated +// else it returns nil, false +func (rm *resourceManager) updateConditions( + r *resource, + onSuccess bool, + err error, +) (*resource, bool) { + ko := r.ko.DeepCopy() + rm.setStatusDefaults(ko) + + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + var recoverableCondition *ackv1alpha1.Condition = nil + var syncCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeRecoverable { + recoverableCondition = condition + } + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + syncCondition = condition + } + } + var termError *ackerr.TerminalError + if rm.terminalAWSError(err) || err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + var errorMessage = "" + if err == ackerr.SecretTypeNotSupported || err == ackerr.SecretNotFound || errors.As(err, &termError) { + errorMessage = err.Error() + } else { + awsErr, _ := ackerr.AWSError(err) + errorMessage = awsErr.Error() + } + terminalCondition.Status = corev1.ConditionTrue + terminalCondition.Message = &errorMessage + } else { + // Clear the terminal condition if no longer present + if terminalCondition != nil { + terminalCondition.Status = corev1.ConditionFalse + terminalCondition.Message = nil + } + // Handling Recoverable Conditions + if err != nil { + if recoverableCondition == nil { + // Add a new Condition containing a non-terminal error + recoverableCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeRecoverable, + } + ko.Status.Conditions = append(ko.Status.Conditions, recoverableCondition) + } + recoverableCondition.Status = corev1.ConditionTrue + awsErr, _ := ackerr.AWSError(err) + errorMessage := err.Error() + if awsErr != nil { + errorMessage = awsErr.Error() + } + recoverableCondition.Message = &errorMessage + } else if recoverableCondition != nil { + recoverableCondition.Status = corev1.ConditionFalse + recoverableCondition.Message = nil + } + } + // Required to avoid the "declared but not used" error in the default case + _ = syncCondition + if terminalCondition != nil || recoverableCondition != nil || syncCondition != nil { + return &resource{ko}, true // updated + } + return nil, false // not updated +} + +// terminalAWSError returns awserr, true; if the supplied error is an aws Error type +// and if the exception indicates that it is a Terminal exception +// 'Terminal' exception are specified in generator configuration +func (rm *resourceManager) terminalAWSError(err error) bool { + if err == nil { + return false + } + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { + return false + } + switch terminalErr.ErrorCode() { + case "ServerlessCacheSnapshotAlreadyExistsFault", + "InvalidParameterValueException": + return true + default: + return false + } +} diff --git a/pkg/resource/serverless_cache_snapshot/tags.go b/pkg/resource/serverless_cache_snapshot/tags.go new file mode 100644 index 00000000..0e7c0162 --- /dev/null +++ b/pkg/resource/serverless_cache_snapshot/tags.go @@ -0,0 +1,119 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package serverless_cache_snapshot + +import ( + "slices" + "strings" + + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var ( + _ = svcapitypes.ServerlessCacheSnapshot{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} +) + +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. +// This method helps in creating the hub(acktags.Tags) for merging +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { + result := acktags.NewTags() + keyOrder := []string{} + + if len(tags) == 0 { + return result, keyOrder + } + for _, t := range tags { + if t.Key != nil { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { + result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" + } + } + } + + return result, keyOrder +} + +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// This method helps in setting the tags back inside AWSResource after merging +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { + result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } + for k, v := range tags { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + } + + return result +} + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/snapshot/custom_create_api_test.go b/pkg/resource/snapshot/custom_create_api_test.go deleted file mode 100644 index 3c29c0d2..00000000 --- a/pkg/resource/snapshot/custom_create_api_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - "context" - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - "github.com/stretchr/testify/assert" - "testing" -) - -// Helper methods to setup tests -// provideResourceManager returns pointer to resourceManager -func provideResourceManager() *resourceManager { - return &resourceManager{ - rr: nil, - awsAccountID: "", - awsRegion: "", - sess: nil, - sdkapi: nil, - } -} - -// provideResource returns pointer to resource -func provideResource() *resource { - return &resource{ - ko: &svcapitypes.Snapshot{}, - } -} - -func Test_CustomCreateSnapshot_NotCopySnapshot(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - - desired := provideResource() - - var ctx context.Context - - res, err := rm.CustomCreateSnapshot(ctx, desired) - assert.Nil(res) - assert.Nil(err) -} - -func Test_CustomCreateSnapshot_InvalidParam(t *testing.T) { - assert := assert.New(t) - // Setup - rm := provideResourceManager() - desired := provideResource() - sourceSnapshotName := "test-rg-backup" - rgId := "rgId" - desired.ko.Spec = svcapitypes.SnapshotSpec{SourceSnapshotName: &sourceSnapshotName, - ReplicationGroupID: &rgId} - var ctx context.Context - - res, err := rm.CustomCreateSnapshot(ctx, desired) - assert.Nil(res) - assert.NotNil(err) - assert.Equal(err.Error(), "InvalidParameterCombination: Cannot specify CacheClusteId or ReplicationGroupId while SourceSnapshotName is specified") -} diff --git a/pkg/resource/snapshot/custom_set_conditions.go b/pkg/resource/snapshot/custom_set_conditions.go deleted file mode 100644 index 4fdb00dd..00000000 --- a/pkg/resource/snapshot/custom_set_conditions.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - corev1 "k8s.io/api/core/v1" -) - -// CustomUpdateConditions sets conditions (terminal) on supplied snapshot -// it examines supplied resource to determine conditions. -// It returns true if conditions are updated -func (rm *resourceManager) CustomUpdateConditions( - ko *svcapitypes.Snapshot, - r *resource, - err error, -) bool { - snapshotStatus := r.ko.Status.SnapshotStatus - if snapshotStatus == nil || *snapshotStatus != "failed" { - return false - } - // Terminal condition - var terminalCondition *ackv1alpha1.Condition = nil - if ko.Status.Conditions == nil { - ko.Status.Conditions = []*ackv1alpha1.Condition{} - } else { - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeTerminal { - terminalCondition = condition - break - } - } - if terminalCondition != nil && terminalCondition.Status == corev1.ConditionTrue { - // some other exception already put the resource in terminal condition - return false - } - } - if terminalCondition == nil { - terminalCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeTerminal, - } - ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) - } - terminalCondition.Status = corev1.ConditionTrue - errorMessage := "Snapshot status: failed" - terminalCondition.Message = &errorMessage - return true -} diff --git a/pkg/resource/snapshot/custom_set_output.go b/pkg/resource/snapshot/custom_set_output.go deleted file mode 100644 index 1e0981d8..00000000 --- a/pkg/resource/snapshot/custom_set_output.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - "context" - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" -) - -func (rm *resourceManager) CustomDescribeSnapshotSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.DescribeSnapshotsOutput, - ko *svcapitypes.Snapshot, -) (*svcapitypes.Snapshot, error) { - if len(resp.Snapshots) == 0 { - return ko, nil - } - elem := resp.Snapshots[0] - rm.customSetOutput(r, elem, ko) - return ko, nil -} - -func (rm *resourceManager) CustomCreateSnapshotSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.CreateSnapshotOutput, - ko *svcapitypes.Snapshot, -) (*svcapitypes.Snapshot, error) { - rm.customSetOutput(r, resp.Snapshot, ko) - return ko, nil -} - -func (rm *resourceManager) CustomCopySnapshotSetOutput( - r *resource, - resp *elasticache.CopySnapshotOutput, - ko *svcapitypes.Snapshot, -) *svcapitypes.Snapshot { - rm.customSetOutput(r, resp.Snapshot, ko) - return ko -} - -func (rm *resourceManager) customSetOutput( - r *resource, - respSnapshot *elasticache.Snapshot, - ko *svcapitypes.Snapshot, -) { - if respSnapshot.ReplicationGroupId != nil { - ko.Spec.ReplicationGroupID = respSnapshot.ReplicationGroupId - } - - if respSnapshot.KmsKeyId != nil { - ko.Spec.KMSKeyID = respSnapshot.KmsKeyId - } - - if respSnapshot.CacheClusterId != nil { - ko.Spec.CacheClusterID = respSnapshot.CacheClusterId - } - - if ko.Status.Conditions == nil { - ko.Status.Conditions = []*ackv1alpha1.Condition{} - } - snapshotStatus := respSnapshot.SnapshotStatus - syncConditionStatus := corev1.ConditionUnknown - if snapshotStatus != nil { - if *snapshotStatus == "available" || - *snapshotStatus == "failed" { - syncConditionStatus = corev1.ConditionTrue - } else { - // resource in "creating", "restoring","exporting" - syncConditionStatus = corev1.ConditionFalse - } - } - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncConditionStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncConditionStatus - } -} diff --git a/pkg/resource/snapshot/custom_update_api.go b/pkg/resource/snapshot/custom_update_api.go deleted file mode 100644 index 19ed766f..00000000 --- a/pkg/resource/snapshot/custom_update_api.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package snapshot - -import ( - "context" - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" -) - -// Snapshot API has no update -func (rm *resourceManager) customUpdateSnapshot( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - return latest, nil -} diff --git a/pkg/resource/snapshot/delta.go b/pkg/resource/snapshot/delta.go index f6e21589..4d50a10f 100644 --- a/pkg/resource/snapshot/delta.go +++ b/pkg/resource/snapshot/delta.go @@ -78,7 +78,9 @@ func newResourceDelta( delta.Add("Spec.SourceSnapshotName", a.ko.Spec.SourceSnapshotName, b.ko.Spec.SourceSnapshotName) } } - if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) } diff --git a/pkg/resource/snapshot/descriptor.go b/pkg/resource/snapshot/descriptor.go index 58f3bca1..a40d9ed1 100644 --- a/pkg/resource/snapshot/descriptor.go +++ b/pkg/resource/snapshot/descriptor.go @@ -20,6 +20,7 @@ import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" rtclient "sigs.k8s.io/controller-runtime/pkg/client" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/Snapshot" + FinalizerString = "finalizers.elasticache.services.k8s.aws/Snapshot" ) var ( @@ -43,10 +44,10 @@ var ( type resourceDescriptor struct { } -// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the -// API Group and Kind of CRs described by the descriptor -func (d *resourceDescriptor) GroupKind() *metav1.GroupKind { - return &GroupKind +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) } // EmptyRuntimeObject returns an empty object prototype that may be used in @@ -87,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -117,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -132,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/snapshot/custom_create_api.go b/pkg/resource/snapshot/hooks.go similarity index 52% rename from pkg/resource/snapshot/custom_create_api.go rename to pkg/resource/snapshot/hooks.go index bccdc3e8..0278d450 100644 --- a/pkg/resource/snapshot/custom_create_api.go +++ b/pkg/resource/snapshot/hooks.go @@ -18,8 +18,13 @@ import ( svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/aws/aws-sdk-go/aws/awserr" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -38,7 +43,7 @@ func (rm *resourceManager) CustomCreateSnapshot( return nil, err } - resp, respErr := rm.sdkapi.CopySnapshot(input) + resp, respErr := rm.sdkapi.CopySnapshot(ctx, input) rm.metrics.RecordAPICall("CREATE", "CopySnapshot", respErr) if respErr != nil { @@ -58,11 +63,12 @@ func (rm *resourceManager) CustomCreateSnapshot( if resp.Snapshot.AutoMinorVersionUpgrade != nil { ko.Status.AutoMinorVersionUpgrade = resp.Snapshot.AutoMinorVersionUpgrade } - if resp.Snapshot.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.Snapshot.AutomaticFailover + if resp.Snapshot.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.Snapshot.AutomaticFailover)) } if resp.Snapshot.CacheClusterCreateTime != nil { - ko.Status.CacheClusterCreateTime = &metav1.Time{*resp.Snapshot.CacheClusterCreateTime} + cacheClusterCreateTime := metav1.Time{Time: *resp.Snapshot.CacheClusterCreateTime} + ko.Status.CacheClusterCreateTime = &cacheClusterCreateTime } if resp.Snapshot.CacheNodeType != nil { ko.Status.CacheNodeType = resp.Snapshot.CacheNodeType @@ -87,7 +93,7 @@ func (rm *resourceManager) CustomCreateSnapshot( f11elem.CacheClusterID = f11iter.CacheClusterId } if f11iter.CacheNodeCreateTime != nil { - f11elem.CacheNodeCreateTime = &metav1.Time{*f11iter.CacheNodeCreateTime} + f11elem.CacheNodeCreateTime = &metav1.Time{Time: *f11iter.CacheNodeCreateTime} } if f11iter.CacheNodeId != nil { f11elem.CacheNodeID = f11iter.CacheNodeId @@ -106,14 +112,14 @@ func (rm *resourceManager) CustomCreateSnapshot( if f11iter.NodeGroupConfiguration.ReplicaAvailabilityZones != nil { f11elemf4f2 := []*string{} for _, f11elemf4f2iter := range f11iter.NodeGroupConfiguration.ReplicaAvailabilityZones { - var f11elemf4f2elem string - f11elemf4f2elem = *f11elemf4f2iter - f11elemf4f2 = append(f11elemf4f2, &f11elemf4f2elem) + f11elemf4f2iter := f11elemf4f2iter // Create new variable to avoid referencing loop variable + f11elemf4f2 = append(f11elemf4f2, &f11elemf4f2iter) } f11elemf4.ReplicaAvailabilityZones = f11elemf4f2 } if f11iter.NodeGroupConfiguration.ReplicaCount != nil { - f11elemf4.ReplicaCount = f11iter.NodeGroupConfiguration.ReplicaCount + replicaCount := int64(*f11iter.NodeGroupConfiguration.ReplicaCount) + f11elemf4.ReplicaCount = &replicaCount } if f11iter.NodeGroupConfiguration.Slots != nil { f11elemf4.Slots = f11iter.NodeGroupConfiguration.Slots @@ -124,20 +130,23 @@ func (rm *resourceManager) CustomCreateSnapshot( f11elem.NodeGroupID = f11iter.NodeGroupId } if f11iter.SnapshotCreateTime != nil { - f11elem.SnapshotCreateTime = &metav1.Time{*f11iter.SnapshotCreateTime} + f11elem.SnapshotCreateTime = &metav1.Time{Time: *f11iter.SnapshotCreateTime} } f11 = append(f11, f11elem) } ko.Status.NodeSnapshots = f11 } if resp.Snapshot.NumCacheNodes != nil { - ko.Status.NumCacheNodes = resp.Snapshot.NumCacheNodes + numNodes := int64(*resp.Snapshot.NumCacheNodes) + ko.Status.NumCacheNodes = &numNodes } if resp.Snapshot.NumNodeGroups != nil { - ko.Status.NumNodeGroups = resp.Snapshot.NumNodeGroups + numNodeGroups := int64(*resp.Snapshot.NumNodeGroups) + ko.Status.NumNodeGroups = &numNodeGroups } if resp.Snapshot.Port != nil { - ko.Status.Port = resp.Snapshot.Port + port := int64(*resp.Snapshot.Port) + ko.Status.Port = &port } if resp.Snapshot.PreferredAvailabilityZone != nil { ko.Status.PreferredAvailabilityZone = resp.Snapshot.PreferredAvailabilityZone @@ -150,7 +159,8 @@ func (rm *resourceManager) CustomCreateSnapshot( } if resp.Snapshot.SnapshotRetentionLimit != nil { - ko.Status.SnapshotRetentionLimit = resp.Snapshot.SnapshotRetentionLimit + retentionLimit := int64(*resp.Snapshot.SnapshotRetentionLimit) + ko.Status.SnapshotRetentionLimit = &retentionLimit } if resp.Snapshot.SnapshotSource != nil { ko.Status.SnapshotSource = resp.Snapshot.SnapshotSource @@ -185,15 +195,146 @@ func (rm *resourceManager) newCopySnapshotPayload( res := &svcsdk.CopySnapshotInput{} if r.ko.Spec.SourceSnapshotName != nil { - res.SetSourceSnapshotName(*r.ko.Spec.SourceSnapshotName) + res.SourceSnapshotName = r.ko.Spec.SourceSnapshotName } if r.ko.Spec.KMSKeyID != nil { - res.SetKmsKeyId(*r.ko.Spec.KMSKeyID) + res.KmsKeyId = r.ko.Spec.KMSKeyID } - if r.ko.Spec.SnapshotName != nil { - res.SetTargetSnapshotName(*r.ko.Spec.SnapshotName) + res.TargetSnapshotName = r.ko.Spec.SnapshotName } return res, nil } + +// CustomUpdateConditions sets conditions (terminal) on supplied snapshot +// it examines supplied resource to determine conditions. +// It returns true if conditions are updated +func (rm *resourceManager) CustomUpdateConditions( + ko *svcapitypes.Snapshot, + r *resource, + err error, +) bool { + snapshotStatus := r.ko.Status.SnapshotStatus + if snapshotStatus == nil || *snapshotStatus != "failed" { + return false + } + // Terminal condition + var terminalCondition *ackv1alpha1.Condition = nil + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } else { + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeTerminal { + terminalCondition = condition + break + } + } + if terminalCondition != nil && terminalCondition.Status == corev1.ConditionTrue { + // some other exception already put the resource in terminal condition + return false + } + } + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) + } + terminalCondition.Status = corev1.ConditionTrue + errorMessage := "Snapshot status: failed" + terminalCondition.Message = &errorMessage + return true +} + +func (rm *resourceManager) CustomDescribeSnapshotSetOutput( + ctx context.Context, + r *resource, + resp *elasticache.DescribeSnapshotsOutput, + ko *svcapitypes.Snapshot, +) (*svcapitypes.Snapshot, error) { + if len(resp.Snapshots) == 0 { + return ko, nil + } + elem := resp.Snapshots[0] + rm.customSetOutput(r, &elem, ko) + return ko, nil +} + +func (rm *resourceManager) CustomCreateSnapshotSetOutput( + ctx context.Context, + r *resource, + resp *elasticache.CreateSnapshotOutput, + ko *svcapitypes.Snapshot, +) (*svcapitypes.Snapshot, error) { + rm.customSetOutput(r, resp.Snapshot, ko) + return ko, nil +} + +func (rm *resourceManager) CustomCopySnapshotSetOutput( + r *resource, + resp *elasticache.CopySnapshotOutput, + ko *svcapitypes.Snapshot, +) *svcapitypes.Snapshot { + rm.customSetOutput(r, resp.Snapshot, ko) + return ko +} + +func (rm *resourceManager) customSetOutput( + r *resource, + respSnapshot *svcsdktypes.Snapshot, + ko *svcapitypes.Snapshot, +) { + if respSnapshot.ReplicationGroupId != nil { + ko.Spec.ReplicationGroupID = respSnapshot.ReplicationGroupId + } + + if respSnapshot.KmsKeyId != nil { + ko.Spec.KMSKeyID = respSnapshot.KmsKeyId + } + + if respSnapshot.CacheClusterId != nil { + ko.Spec.CacheClusterID = respSnapshot.CacheClusterId + } + + if ko.Status.Conditions == nil { + ko.Status.Conditions = []*ackv1alpha1.Condition{} + } + snapshotStatus := respSnapshot.SnapshotStatus + syncConditionStatus := corev1.ConditionUnknown + if snapshotStatus != nil { + if *snapshotStatus == "available" || + *snapshotStatus == "failed" { + syncConditionStatus = corev1.ConditionTrue + } else { + // resource in "creating", "restoring","exporting" + syncConditionStatus = corev1.ConditionFalse + } + } + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncConditionStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncConditionStatus + } +} + +// Snapshot API has no update +func (rm *resourceManager) customUpdateSnapshot( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + return latest, nil +} diff --git a/pkg/resource/snapshot/manager.go b/pkg/resource/snapshot/manager.go index 644d0d46..c5b9c1dc 100644 --- a/pkg/resource/snapshot/manager.go +++ b/pkg/resource/snapshot/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -103,6 +102,7 @@ func (rm *resourceManager) ReadOne( panic("resource manager's ReadOne() method received resource with nil CR object") } observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) if err != nil { if observed != nil { return rm.onError(observed, err) @@ -291,32 +291,76 @@ func (rm *resourceManager) EnsureTags( defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) tags := acktags.Merge(resourceTags, defaultTags) - r.ko.Spec.Tags = FromACKTags(tags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) return nil } +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/snapshot/manager_factory.go b/pkg/resource/snapshot/manager_factory.go index 6140bba4..e9011e66 100644 --- a/pkg/resource/snapshot/manager_factory.go +++ b/pkg/resource/snapshot/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/snapshot/references.go b/pkg/resource/snapshot/references.go index 89d2adb9..78f25a3e 100644 --- a/pkg/resource/snapshot/references.go +++ b/pkg/resource/snapshot/references.go @@ -17,6 +17,7 @@ package snapshot import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -24,19 +25,29 @@ import ( svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + return &resource{ko} +} + // ResolveReferences finds if there are any Reference field(s) present -// inside AWSResource passed in the parameter and attempts to resolve -// those reference field(s) into target field(s). -// It returns an AWSResource with resolved reference(s), and an error if the -// passed AWSResource's reference field(s) cannot be resolved. -// This method also adds/updates the ConditionTypeReferencesResolved for the -// AWSResource. +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. func (rm *resourceManager) ResolveReferences( ctx context.Context, apiReader client.Reader, res acktypes.AWSResource, -) (acktypes.AWSResource, error) { - return res, nil +) (acktypes.AWSResource, bool, error) { + return res, false, nil } // validateReferenceFields validates the reference field and corresponding @@ -44,9 +55,3 @@ func (rm *resourceManager) ResolveReferences( func validateReferenceFields(ko *svcapitypes.Snapshot) error { return nil } - -// hasNonNilReferences returns true if resource contains a reference to another -// resource -func hasNonNilReferences(ko *svcapitypes.Snapshot) bool { - return false -} diff --git a/pkg/resource/snapshot/resource.go b/pkg/resource/snapshot/resource.go index b3054026..e585ddd6 100644 --- a/pkg/resource/snapshot/resource.go +++ b/pkg/resource/snapshot/resource.go @@ -16,6 +16,8 @@ package snapshot import ( + "fmt" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -45,7 +47,7 @@ func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { } // IsBeingDeleted returns true if the Kubernetes resource has a non-zero -// deletion timestemp +// deletion timestamp func (r *resource) IsBeingDeleted() bool { return !r.ko.DeletionTimestamp.IsZero() } @@ -93,6 +95,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f3, ok := fields["snapshotName"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: snapshotName")) + } + r.ko.Spec.SnapshotName = &f3 + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/snapshot/sdk.go b/pkg/resource/snapshot/sdk.go index d0353ff1..7f9bd481 100644 --- a/pkg/resource/snapshot/sdk.go +++ b/pkg/resource/snapshot/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.Snapshot{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeSnapshotsOutput - resp, err = rm.sdkapi.DescribeSnapshotsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeSnapshots(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeSnapshots", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "CacheClusterNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "CacheClusterNotFound" { return nil, ackerr.NotFound } return nil, err @@ -100,8 +103,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if elem.AutomaticFailover != nil { - ko.Status.AutomaticFailover = elem.AutomaticFailover + if elem.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(elem.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -130,8 +133,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Status.CacheSubnetGroupName = nil } - if elem.DataTiering != nil { - ko.Status.DataTiering = elem.DataTiering + if elem.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(elem.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -178,25 +181,14 @@ func (rm *resourceManager) sdkFind( f12elemf4.PrimaryOutpostARN = f12iter.NodeGroupConfiguration.PrimaryOutpostArn } if f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones != nil { - f12elemf4f3 := []*string{} - for _, f12elemf4f3iter := range f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones { - var f12elemf4f3elem string - f12elemf4f3elem = *f12elemf4f3iter - f12elemf4f3 = append(f12elemf4f3, &f12elemf4f3elem) - } - f12elemf4.ReplicaAvailabilityZones = f12elemf4f3 + f12elemf4.ReplicaAvailabilityZones = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones) } if f12iter.NodeGroupConfiguration.ReplicaCount != nil { - f12elemf4.ReplicaCount = f12iter.NodeGroupConfiguration.ReplicaCount + replicaCountCopy := int64(*f12iter.NodeGroupConfiguration.ReplicaCount) + f12elemf4.ReplicaCount = &replicaCountCopy } if f12iter.NodeGroupConfiguration.ReplicaOutpostArns != nil { - f12elemf4f5 := []*string{} - for _, f12elemf4f5iter := range f12iter.NodeGroupConfiguration.ReplicaOutpostArns { - var f12elemf4f5elem string - f12elemf4f5elem = *f12elemf4f5iter - f12elemf4f5 = append(f12elemf4f5, &f12elemf4f5elem) - } - f12elemf4.ReplicaOutpostARNs = f12elemf4f5 + f12elemf4.ReplicaOutpostARNs = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaOutpostArns) } if f12iter.NodeGroupConfiguration.Slots != nil { f12elemf4.Slots = f12iter.NodeGroupConfiguration.Slots @@ -216,17 +208,20 @@ func (rm *resourceManager) sdkFind( ko.Status.NodeSnapshots = nil } if elem.NumCacheNodes != nil { - ko.Status.NumCacheNodes = elem.NumCacheNodes + numCacheNodesCopy := int64(*elem.NumCacheNodes) + ko.Status.NumCacheNodes = &numCacheNodesCopy } else { ko.Status.NumCacheNodes = nil } if elem.NumNodeGroups != nil { - ko.Status.NumNodeGroups = elem.NumNodeGroups + numNodeGroupsCopy := int64(*elem.NumNodeGroups) + ko.Status.NumNodeGroups = &numNodeGroupsCopy } else { ko.Status.NumNodeGroups = nil } if elem.Port != nil { - ko.Status.Port = elem.Port + portCopy := int64(*elem.Port) + ko.Status.Port = &portCopy } else { ko.Status.Port = nil } @@ -261,7 +256,8 @@ func (rm *resourceManager) sdkFind( ko.Spec.SnapshotName = nil } if elem.SnapshotRetentionLimit != nil { - ko.Status.SnapshotRetentionLimit = elem.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*elem.SnapshotRetentionLimit) + ko.Status.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Status.SnapshotRetentionLimit = nil } @@ -324,7 +320,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeSnapshotsInput{} if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } return res, nil @@ -353,7 +349,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateSnapshotOutput _ = resp - resp, err = rm.sdkapi.CreateSnapshotWithContext(ctx, input) + resp, err = rm.sdkapi.CreateSnapshot(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateSnapshot", err) if err != nil { return nil, err @@ -374,8 +370,8 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.AutoMinorVersionUpgrade = nil } - if resp.Snapshot.AutomaticFailover != nil { - ko.Status.AutomaticFailover = resp.Snapshot.AutomaticFailover + if resp.Snapshot.AutomaticFailover != "" { + ko.Status.AutomaticFailover = aws.String(string(resp.Snapshot.AutomaticFailover)) } else { ko.Status.AutomaticFailover = nil } @@ -404,8 +400,8 @@ func (rm *resourceManager) sdkCreate( } else { ko.Status.CacheSubnetGroupName = nil } - if resp.Snapshot.DataTiering != nil { - ko.Status.DataTiering = resp.Snapshot.DataTiering + if resp.Snapshot.DataTiering != "" { + ko.Status.DataTiering = aws.String(string(resp.Snapshot.DataTiering)) } else { ko.Status.DataTiering = nil } @@ -452,25 +448,14 @@ func (rm *resourceManager) sdkCreate( f12elemf4.PrimaryOutpostARN = f12iter.NodeGroupConfiguration.PrimaryOutpostArn } if f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones != nil { - f12elemf4f3 := []*string{} - for _, f12elemf4f3iter := range f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones { - var f12elemf4f3elem string - f12elemf4f3elem = *f12elemf4f3iter - f12elemf4f3 = append(f12elemf4f3, &f12elemf4f3elem) - } - f12elemf4.ReplicaAvailabilityZones = f12elemf4f3 + f12elemf4.ReplicaAvailabilityZones = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaAvailabilityZones) } if f12iter.NodeGroupConfiguration.ReplicaCount != nil { - f12elemf4.ReplicaCount = f12iter.NodeGroupConfiguration.ReplicaCount + replicaCountCopy := int64(*f12iter.NodeGroupConfiguration.ReplicaCount) + f12elemf4.ReplicaCount = &replicaCountCopy } if f12iter.NodeGroupConfiguration.ReplicaOutpostArns != nil { - f12elemf4f5 := []*string{} - for _, f12elemf4f5iter := range f12iter.NodeGroupConfiguration.ReplicaOutpostArns { - var f12elemf4f5elem string - f12elemf4f5elem = *f12elemf4f5iter - f12elemf4f5 = append(f12elemf4f5, &f12elemf4f5elem) - } - f12elemf4.ReplicaOutpostARNs = f12elemf4f5 + f12elemf4.ReplicaOutpostARNs = aws.StringSlice(f12iter.NodeGroupConfiguration.ReplicaOutpostArns) } if f12iter.NodeGroupConfiguration.Slots != nil { f12elemf4.Slots = f12iter.NodeGroupConfiguration.Slots @@ -490,17 +475,20 @@ func (rm *resourceManager) sdkCreate( ko.Status.NodeSnapshots = nil } if resp.Snapshot.NumCacheNodes != nil { - ko.Status.NumCacheNodes = resp.Snapshot.NumCacheNodes + numCacheNodesCopy := int64(*resp.Snapshot.NumCacheNodes) + ko.Status.NumCacheNodes = &numCacheNodesCopy } else { ko.Status.NumCacheNodes = nil } if resp.Snapshot.NumNodeGroups != nil { - ko.Status.NumNodeGroups = resp.Snapshot.NumNodeGroups + numNodeGroupsCopy := int64(*resp.Snapshot.NumNodeGroups) + ko.Status.NumNodeGroups = &numNodeGroupsCopy } else { ko.Status.NumNodeGroups = nil } if resp.Snapshot.Port != nil { - ko.Status.Port = resp.Snapshot.Port + portCopy := int64(*resp.Snapshot.Port) + ko.Status.Port = &portCopy } else { ko.Status.Port = nil } @@ -535,7 +523,8 @@ func (rm *resourceManager) sdkCreate( ko.Spec.SnapshotName = nil } if resp.Snapshot.SnapshotRetentionLimit != nil { - ko.Status.SnapshotRetentionLimit = resp.Snapshot.SnapshotRetentionLimit + snapshotRetentionLimitCopy := int64(*resp.Snapshot.SnapshotRetentionLimit) + ko.Status.SnapshotRetentionLimit = &snapshotRetentionLimitCopy } else { ko.Status.SnapshotRetentionLimit = nil } @@ -583,30 +572,30 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateSnapshotInput{} if r.ko.Spec.CacheClusterID != nil { - res.SetCacheClusterId(*r.ko.Spec.CacheClusterID) + res.CacheClusterId = r.ko.Spec.CacheClusterID } if r.ko.Spec.KMSKeyID != nil { - res.SetKmsKeyId(*r.ko.Spec.KMSKeyID) + res.KmsKeyId = r.ko.Spec.KMSKeyID } if r.ko.Spec.ReplicationGroupID != nil { - res.SetReplicationGroupId(*r.ko.Spec.ReplicationGroupID) + res.ReplicationGroupId = r.ko.Spec.ReplicationGroupID } if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } if r.ko.Spec.Tags != nil { - f4 := []*svcsdk.Tag{} + f4 := []svcsdktypes.Tag{} for _, f4iter := range r.ko.Spec.Tags { - f4elem := &svcsdk.Tag{} + f4elem := &svcsdktypes.Tag{} if f4iter.Key != nil { - f4elem.SetKey(*f4iter.Key) + f4elem.Key = f4iter.Key } if f4iter.Value != nil { - f4elem.SetValue(*f4iter.Value) + f4elem.Value = f4iter.Value } - f4 = append(f4, f4elem) + f4 = append(f4, *f4elem) } - res.SetTags(f4) + res.Tags = f4 } return res, nil @@ -639,7 +628,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteSnapshotOutput _ = resp - resp, err = rm.sdkapi.DeleteSnapshotWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteSnapshot(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteSnapshot", err) return nil, err } @@ -652,7 +641,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteSnapshotInput{} if r.ko.Spec.SnapshotName != nil { - res.SetSnapshotName(*r.ko.Spec.SnapshotName) + res.SnapshotName = r.ko.Spec.SnapshotName } return res, nil @@ -762,11 +751,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "InvalidParameter", "InvalidParameterValue", "InvalidParameterCombination", diff --git a/pkg/resource/snapshot/tags.go b/pkg/resource/snapshot/tags.go index 9ba952cb..e650549b 100644 --- a/pkg/resource/snapshot/tags.go +++ b/pkg/resource/snapshot/tags.go @@ -16,48 +16,104 @@ package snapshot import ( + "slices" + "strings" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) var ( - _ = svcapitypes.Snapshot{} - _ = acktags.NewTags() + _ = svcapitypes.Snapshot{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} ) -// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. // This method helps in creating the hub(acktags.Tags) for merging -// default controller tags with existing resource tags. -func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { result := acktags.NewTags() - if tags == nil || len(tags) == 0 { - return result - } + keyOrder := []string{} + if len(tags) == 0 { + return result, keyOrder + } for _, t := range tags { if t.Key != nil { - if t.Value == nil { - result[*t.Key] = "" - } else { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" } } } - return result + return result, keyOrder } -// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. // This method helps in setting the tags back inside AWSResource after merging -// default controller tags with existing resource tags. -func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } for k, v := range tags { - kCopy := k - vCopy := v - tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + tag := svcapitypes.Tag{Key: &k, Value: &v} result = append(result, &tag) } + return result } + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/user/custom_set_output.go b/pkg/resource/user/custom_set_output.go deleted file mode 100644 index 8846f03b..00000000 --- a/pkg/resource/user/custom_set_output.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - "context" - - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// set the custom Status fields upon creation -func (rm *resourceManager) CustomCreateUserSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.CreateUserOutput, - ko *svcapitypes.User, -) (*svcapitypes.User, error) { - return rm.CustomSetOutput(r, resp.AccessString, ko) -} - -// precondition: successful ModifyUserWithContext call -// By updating 'latest' Status fields, these changes should be applied to 'desired' -// upon patching -func (rm *resourceManager) CustomModifyUserSetOutput( - ctx context.Context, - r *resource, - resp *svcsdk.ModifyUserOutput, - ko *svcapitypes.User, -) (*svcapitypes.User, error) { - return rm.CustomSetOutput(r, resp.AccessString, ko) -} - -func (rm *resourceManager) CustomSetOutput( - r *resource, - responseAccessString *string, - ko *svcapitypes.User, -) (*svcapitypes.User, error) { - - lastRequested := *r.ko.Spec.AccessString - ko.Status.LastRequestedAccessString = &lastRequested - - expandedAccessStringValue := *responseAccessString - ko.Status.ExpandedAccessString = &expandedAccessStringValue - - return ko, nil -} diff --git a/pkg/resource/user/custom_update.go b/pkg/resource/user/custom_update.go deleted file mode 100644 index 7b5cefa5..00000000 --- a/pkg/resource/user/custom_update.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - "context" - "github.com/pkg/errors" - - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - "github.com/aws-controllers-k8s/runtime/pkg/requeue" -) - -// currently this function's only purpose is to requeue if the resource is currently unavailable -func (rm *resourceManager) CustomModifyUser( - ctx context.Context, - desired *resource, - latest *resource, - delta *ackcompare.Delta, -) (*resource, error) { - - // requeue if necessary - latestStatus := latest.ko.Status.Status - if latestStatus == nil || *latestStatus != "active" { - return nil, requeue.NeededAfter( - errors.New("User cannot be modified as its status is not 'active'."), - requeue.DefaultRequeueAfterDuration) - } - - return nil, nil -} diff --git a/pkg/resource/user/delta.go b/pkg/resource/user/delta.go index 746c7b38..1e81b1ab 100644 --- a/pkg/resource/user/delta.go +++ b/pkg/resource/user/delta.go @@ -64,7 +64,9 @@ func newResourceDelta( delta.Add("Spec.NoPasswordRequired", a.ko.Spec.NoPasswordRequired, b.ko.Spec.NoPasswordRequired) } } - if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) } if ackcompare.HasNilDifference(a.ko.Spec.UserID, b.ko.Spec.UserID) { diff --git a/pkg/resource/user/delta_util.go b/pkg/resource/user/delta_util.go deleted file mode 100644 index f36de45e..00000000 --- a/pkg/resource/user/delta_util.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" -import "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" - -// remove differences which are not meaningful (i.e. ones that don't warrant a call to rm.Update) -func filterDelta( - delta *ackcompare.Delta, - desired *resource, - latest *resource, -) { - // the returned AccessString can be different than the specified one; as long as the last requested AccessString - // matches the currently desired one, remove this difference from the delta - if delta.DifferentAt("Spec.AccessString") { - if desired.ko.Spec.AccessString != nil && - desired.ko.Status.LastRequestedAccessString != nil && - *desired.ko.Spec.AccessString == *desired.ko.Status.LastRequestedAccessString { - - common.RemoveFromDelta(delta, "Spec.AccessString") - } - } -} diff --git a/pkg/resource/user/descriptor.go b/pkg/resource/user/descriptor.go index d416feed..73d0041b 100644 --- a/pkg/resource/user/descriptor.go +++ b/pkg/resource/user/descriptor.go @@ -20,6 +20,7 @@ import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" rtclient "sigs.k8s.io/controller-runtime/pkg/client" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/User" + FinalizerString = "finalizers.elasticache.services.k8s.aws/User" ) var ( @@ -43,10 +44,10 @@ var ( type resourceDescriptor struct { } -// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the -// API Group and Kind of CRs described by the descriptor -func (d *resourceDescriptor) GroupKind() *metav1.GroupKind { - return &GroupKind +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) } // EmptyRuntimeObject returns an empty object prototype that may be used in @@ -87,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -117,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -132,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/user/hooks.go b/pkg/resource/user/hooks.go new file mode 100644 index 00000000..28e2b316 --- /dev/null +++ b/pkg/resource/user/hooks.go @@ -0,0 +1,166 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package user + +import ( + "context" + + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" + "github.com/aws-controllers-k8s/elasticache-controller/pkg/common" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws-controllers-k8s/runtime/pkg/requeue" +) + +// set the custom Status fields upon creation +func (rm *resourceManager) CustomCreateUserSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateUserOutput, + ko *svcapitypes.User, +) (*svcapitypes.User, error) { + return rm.CustomSetOutput(r, resp.AccessString, ko) +} + +// precondition: successful ModifyUserWithContext call +// By updating 'latest' Status fields, these changes should be applied to 'desired' +// upon patching +func (rm *resourceManager) CustomModifyUserSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.ModifyUserOutput, + ko *svcapitypes.User, +) (*svcapitypes.User, error) { + return rm.CustomSetOutput(r, resp.AccessString, ko) +} + +func (rm *resourceManager) CustomSetOutput( + r *resource, + responseAccessString *string, + ko *svcapitypes.User, +) (*svcapitypes.User, error) { + + lastRequested := *r.ko.Spec.AccessString + ko.Status.LastRequestedAccessString = &lastRequested + + expandedAccessStringValue := *responseAccessString + ko.Status.ExpandedAccessString = &expandedAccessStringValue + + return ko, nil +} + +// currently this function's only purpose is to requeue if the resource is currently unavailable +func (rm *resourceManager) CustomModifyUser( + ctx context.Context, + desired *resource, + latest *resource, + delta *ackcompare.Delta, +) (*resource, error) { + + // requeue if necessary + latestStatus := latest.ko.Status.Status + if latestStatus == nil || *latestStatus != "active" { + return nil, requeue.NeededAfter( + errors.New("User cannot be modified as its status is not 'active'."), + requeue.DefaultRequeueAfterDuration) + } + + return nil, nil +} + +// TODO: this should be generated in the future. In general, it doesn't seem like a good idea to add every non-nil +// Spec field in desired.Spec to the payload (i.e. what we do when building most inputs), unless there is +// actually a difference in the Spec field between desired and latest +func (rm *resourceManager) populateUpdatePayload( + input *svcsdk.ModifyUserInput, + r *resource, + delta *ackcompare.Delta, +) { + if delta.DifferentAt("Spec.AccessString") && r.ko.Spec.AccessString != nil { + input.AccessString = r.ko.Spec.AccessString + } + + if delta.DifferentAt("Spec.NoPasswordRequired") && r.ko.Spec.NoPasswordRequired != nil { + input.NoPasswordRequired = r.ko.Spec.NoPasswordRequired + } + + //TODO: add update for passwords field once we have framework-level support + +} + +/* + functions to update the state of the resource where the generated code or the set_output + functions are insufficient +*/ + +// set the ResourceSynced condition based on the User's Status. r is a wrapper around the User resource which will +// eventually be returned as "latest" +func (rm *resourceManager) setSyncedCondition( + status *string, + r *resource, +) { + // determine whether the resource can be considered synced + syncedStatus := corev1.ConditionUnknown + if status != nil { + if *status == "active" { + syncedStatus = corev1.ConditionTrue + } else { + syncedStatus = corev1.ConditionFalse + } + + } + + // TODO: add utility function in a common repo to do the below as it's done at least once per resource + + // set existing condition to the above status (or create a new condition with this status) + ko := r.ko + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncedStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncedStatus + } +} + +// remove differences which are not meaningful (i.e. ones that don't warrant a call to rm.Update) +func filterDelta( + delta *ackcompare.Delta, + desired *resource, + latest *resource, +) { + // the returned AccessString can be different than the specified one; as long as the last requested AccessString + // matches the currently desired one, remove this difference from the delta + if delta.DifferentAt("Spec.AccessString") { + if desired.ko.Spec.AccessString != nil && + desired.ko.Status.LastRequestedAccessString != nil && + *desired.ko.Spec.AccessString == *desired.ko.Status.LastRequestedAccessString { + + common.RemoveFromDelta(delta, "Spec.AccessString") + } + } +} diff --git a/pkg/resource/user/manager.go b/pkg/resource/user/manager.go index 79704ac9..6fabc4d0 100644 --- a/pkg/resource/user/manager.go +++ b/pkg/resource/user/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -103,6 +102,7 @@ func (rm *resourceManager) ReadOne( panic("resource manager's ReadOne() method received resource with nil CR object") } observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) if err != nil { if observed != nil { return rm.onError(observed, err) @@ -291,32 +291,76 @@ func (rm *resourceManager) EnsureTags( defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) tags := acktags.Merge(resourceTags, defaultTags) - r.ko.Spec.Tags = FromACKTags(tags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) return nil } +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/user/manager_factory.go b/pkg/resource/user/manager_factory.go index d5b128a0..64f4166f 100644 --- a/pkg/resource/user/manager_factory.go +++ b/pkg/resource/user/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/user/post_build_request.go b/pkg/resource/user/post_build_request.go deleted file mode 100644 index 487328cb..00000000 --- a/pkg/resource/user/post_build_request.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" -) - -// TODO: this should be generated in the future. In general, it doesn't seem like a good idea to add every non-nil -// Spec field in desired.Spec to the payload (i.e. what we do when building most inputs), unless there is -// actually a difference in the Spec field between desired and latest -func (rm *resourceManager) populateUpdatePayload( - input *svcsdk.ModifyUserInput, - r *resource, - delta *ackcompare.Delta, -) { - if delta.DifferentAt("Spec.AccessString") && r.ko.Spec.AccessString != nil { - input.AccessString = r.ko.Spec.AccessString - } - - if delta.DifferentAt("Spec.NoPasswordRequired") && r.ko.Spec.NoPasswordRequired != nil { - input.NoPasswordRequired = r.ko.Spec.NoPasswordRequired - } - - //TODO: add update for passwords field once we have framework-level support - -} diff --git a/pkg/resource/user/post_set_output.go b/pkg/resource/user/post_set_output.go deleted file mode 100644 index 8c1d576e..00000000 --- a/pkg/resource/user/post_set_output.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user - -import ( - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - corev1 "k8s.io/api/core/v1" -) - -/* - This file contains functions to update the state of the resource where the generated code or the set_output - functions are insufficient -*/ - -// set the ResourceSynced condition based on the User's Status. r is a wrapper around the User resource which will -// eventually be returned as "latest" -func (rm *resourceManager) setSyncedCondition( - status *string, - r *resource, -) { - // determine whether the resource can be considered synced - syncedStatus := corev1.ConditionUnknown - if status != nil { - if *status == "active" { - syncedStatus = corev1.ConditionTrue - } else { - syncedStatus = corev1.ConditionFalse - } - - } - - // TODO: add utility function in a common repo to do the below as it's done at least once per resource - - // set existing condition to the above status (or create a new condition with this status) - ko := r.ko - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncedStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncedStatus - } -} diff --git a/pkg/resource/user/references.go b/pkg/resource/user/references.go index fcb4ff21..14bba7e8 100644 --- a/pkg/resource/user/references.go +++ b/pkg/resource/user/references.go @@ -17,6 +17,7 @@ package user import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -24,19 +25,29 @@ import ( svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + return &resource{ko} +} + // ResolveReferences finds if there are any Reference field(s) present -// inside AWSResource passed in the parameter and attempts to resolve -// those reference field(s) into target field(s). -// It returns an AWSResource with resolved reference(s), and an error if the -// passed AWSResource's reference field(s) cannot be resolved. -// This method also adds/updates the ConditionTypeReferencesResolved for the -// AWSResource. +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. func (rm *resourceManager) ResolveReferences( ctx context.Context, apiReader client.Reader, res acktypes.AWSResource, -) (acktypes.AWSResource, error) { - return res, nil +) (acktypes.AWSResource, bool, error) { + return res, false, nil } // validateReferenceFields validates the reference field and corresponding @@ -44,9 +55,3 @@ func (rm *resourceManager) ResolveReferences( func validateReferenceFields(ko *svcapitypes.User) error { return nil } - -// hasNonNilReferences returns true if resource contains a reference to another -// resource -func hasNonNilReferences(ko *svcapitypes.User) bool { - return false -} diff --git a/pkg/resource/user/resource.go b/pkg/resource/user/resource.go index 4b575c30..92ee55c0 100644 --- a/pkg/resource/user/resource.go +++ b/pkg/resource/user/resource.go @@ -16,6 +16,8 @@ package user import ( + "fmt" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -45,7 +47,7 @@ func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { } // IsBeingDeleted returns true if the Kubernetes resource has a non-zero -// deletion timestemp +// deletion timestamp func (r *resource) IsBeingDeleted() bool { return !r.ko.DeletionTimestamp.IsZero() } @@ -93,6 +95,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f3, ok := fields["userID"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: userID")) + } + r.ko.Spec.UserID = &f3 + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/user/sdk.go b/pkg/resource/user/sdk.go index e486baa0..70f9a74c 100644 --- a/pkg/resource/user/sdk.go +++ b/pkg/resource/user/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.User{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeUsersOutput - resp, err = rm.sdkapi.DescribeUsersWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeUsers(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeUsers", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "UserNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "UserNotFound" { return nil, ackerr.NotFound } return nil, err @@ -103,10 +106,11 @@ func (rm *resourceManager) sdkFind( if elem.Authentication != nil { f2 := &svcapitypes.Authentication{} if elem.Authentication.PasswordCount != nil { - f2.PasswordCount = elem.Authentication.PasswordCount + passwordCountCopy := int64(*elem.Authentication.PasswordCount) + f2.PasswordCount = &passwordCountCopy } - if elem.Authentication.Type != nil { - f2.Type = elem.Authentication.Type + if elem.Authentication.Type != "" { + f2.Type = aws.String(string(elem.Authentication.Type)) } ko.Status.Authentication = f2 } else { @@ -128,13 +132,7 @@ func (rm *resourceManager) sdkFind( ko.Status.Status = nil } if elem.UserGroupIds != nil { - f6 := []*string{} - for _, f6iter := range elem.UserGroupIds { - var f6elem string - f6elem = *f6iter - f6 = append(f6, &f6elem) - } - ko.Status.UserGroupIDs = f6 + ko.Status.UserGroupIDs = aws.StringSlice(elem.UserGroupIds) } else { ko.Status.UserGroupIDs = nil } @@ -178,7 +176,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeUsersInput{} if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } return res, nil @@ -203,7 +201,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateUserOutput _ = resp - resp, err = rm.sdkapi.CreateUserWithContext(ctx, input) + resp, err = rm.sdkapi.CreateUser(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateUser", err) if err != nil { return nil, err @@ -227,10 +225,11 @@ func (rm *resourceManager) sdkCreate( if resp.Authentication != nil { f2 := &svcapitypes.Authentication{} if resp.Authentication.PasswordCount != nil { - f2.PasswordCount = resp.Authentication.PasswordCount + passwordCountCopy := int64(*resp.Authentication.PasswordCount) + f2.PasswordCount = &passwordCountCopy } - if resp.Authentication.Type != nil { - f2.Type = resp.Authentication.Type + if resp.Authentication.Type != "" { + f2.Type = aws.String(string(resp.Authentication.Type)) } ko.Status.Authentication = f2 } else { @@ -252,13 +251,7 @@ func (rm *resourceManager) sdkCreate( ko.Status.Status = nil } if resp.UserGroupIds != nil { - f6 := []*string{} - for _, f6iter := range resp.UserGroupIds { - var f6elem string - f6elem = *f6iter - f6 = append(f6, &f6elem) - } - ko.Status.UserGroupIDs = f6 + ko.Status.UserGroupIDs = aws.StringSlice(resp.UserGroupIds) } else { ko.Status.UserGroupIDs = nil } @@ -292,16 +285,16 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateUserInput{} if r.ko.Spec.AccessString != nil { - res.SetAccessString(*r.ko.Spec.AccessString) + res.AccessString = r.ko.Spec.AccessString } if r.ko.Spec.Engine != nil { - res.SetEngine(*r.ko.Spec.Engine) + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.NoPasswordRequired != nil { - res.SetNoPasswordRequired(*r.ko.Spec.NoPasswordRequired) + res.NoPasswordRequired = r.ko.Spec.NoPasswordRequired } if r.ko.Spec.Passwords != nil { - f3 := []*string{} + f3 := []string{} for _, f3iter := range r.ko.Spec.Passwords { var f3elem string if f3iter != nil { @@ -313,29 +306,29 @@ func (rm *resourceManager) newCreateRequestPayload( f3elem = tmpSecret } } - f3 = append(f3, &f3elem) + f3 = append(f3, f3elem) } - res.SetPasswords(f3) + res.Passwords = f3 } if r.ko.Spec.Tags != nil { - f4 := []*svcsdk.Tag{} + f4 := []svcsdktypes.Tag{} for _, f4iter := range r.ko.Spec.Tags { - f4elem := &svcsdk.Tag{} + f4elem := &svcsdktypes.Tag{} if f4iter.Key != nil { - f4elem.SetKey(*f4iter.Key) + f4elem.Key = f4iter.Key } if f4iter.Value != nil { - f4elem.SetValue(*f4iter.Value) + f4elem.Value = f4iter.Value } - f4 = append(f4, f4elem) + f4 = append(f4, *f4elem) } - res.SetTags(f4) + res.Tags = f4 } if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } if r.ko.Spec.UserName != nil { - res.SetUserName(*r.ko.Spec.UserName) + res.UserName = r.ko.Spec.UserName } return res, nil @@ -358,7 +351,7 @@ func (rm *resourceManager) sdkUpdate( if updated != nil || err != nil { return updated, err } - input, err := rm.newUpdateRequestPayload(ctx, desired) + input, err := rm.newUpdateRequestPayload(ctx, desired, delta) if err != nil { return nil, err } @@ -366,7 +359,7 @@ func (rm *resourceManager) sdkUpdate( var resp *svcsdk.ModifyUserOutput _ = resp - resp, err = rm.sdkapi.ModifyUserWithContext(ctx, input) + resp, err = rm.sdkapi.ModifyUser(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyUser", err) if err != nil { return nil, err @@ -390,10 +383,11 @@ func (rm *resourceManager) sdkUpdate( if resp.Authentication != nil { f2 := &svcapitypes.Authentication{} if resp.Authentication.PasswordCount != nil { - f2.PasswordCount = resp.Authentication.PasswordCount + passwordCountCopy := int64(*resp.Authentication.PasswordCount) + f2.PasswordCount = &passwordCountCopy } - if resp.Authentication.Type != nil { - f2.Type = resp.Authentication.Type + if resp.Authentication.Type != "" { + f2.Type = aws.String(string(resp.Authentication.Type)) } ko.Status.Authentication = f2 } else { @@ -415,13 +409,7 @@ func (rm *resourceManager) sdkUpdate( ko.Status.Status = nil } if resp.UserGroupIds != nil { - f6 := []*string{} - for _, f6iter := range resp.UserGroupIds { - var f6elem string - f6elem = *f6iter - f6 = append(f6, &f6elem) - } - ko.Status.UserGroupIDs = f6 + ko.Status.UserGroupIDs = aws.StringSlice(resp.UserGroupIds) } else { ko.Status.UserGroupIDs = nil } @@ -451,11 +439,15 @@ func (rm *resourceManager) sdkUpdate( func (rm *resourceManager) newUpdateRequestPayload( ctx context.Context, r *resource, + delta *ackcompare.Delta, ) (*svcsdk.ModifyUserInput, error) { res := &svcsdk.ModifyUserInput{} + if r.ko.Spec.Engine != nil { + res.Engine = r.ko.Spec.Engine + } if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } return res, nil @@ -477,7 +469,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteUserOutput _ = resp - resp, err = rm.sdkapi.DeleteUserWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteUser(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteUser", err) return nil, err } @@ -490,7 +482,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteUserInput{} if r.ko.Spec.UserID != nil { - res.SetUserId(*r.ko.Spec.UserID) + res.UserId = r.ko.Spec.UserID } return res, nil @@ -598,18 +590,18 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "UserAlreadyExists", "UserQuotaExceeded", "DuplicateUserName", "InvalidParameterValue", "InvalidParameterCombination", - "InvalidUserState", - "DefaultUserAssociatedToUserGroup": + "InvalidUserState": return true default: return false diff --git a/pkg/resource/user/tags.go b/pkg/resource/user/tags.go index 5261cfa2..e6528883 100644 --- a/pkg/resource/user/tags.go +++ b/pkg/resource/user/tags.go @@ -16,48 +16,104 @@ package user import ( + "slices" + "strings" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) var ( - _ = svcapitypes.User{} - _ = acktags.NewTags() + _ = svcapitypes.User{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} ) -// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. // This method helps in creating the hub(acktags.Tags) for merging -// default controller tags with existing resource tags. -func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { result := acktags.NewTags() - if tags == nil || len(tags) == 0 { - return result - } + keyOrder := []string{} + if len(tags) == 0 { + return result, keyOrder + } for _, t := range tags { if t.Key != nil { - if t.Value == nil { - result[*t.Key] = "" - } else { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" } } } - return result + return result, keyOrder } -// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. // This method helps in setting the tags back inside AWSResource after merging -// default controller tags with existing resource tags. -func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } for k, v := range tags { - kCopy := k - vCopy := v - tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + tag := svcapitypes.Tag{Key: &k, Value: &v} result = append(result, &tag) } + return result } + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/resource/user_group/custom_set_output.go b/pkg/resource/user_group/custom_set_output.go deleted file mode 100644 index 6f3f6fbf..00000000 --- a/pkg/resource/user_group/custom_set_output.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package user_group - -import ( - "context" - svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" - ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" - "github.com/aws/aws-sdk-go/service/elasticache" - corev1 "k8s.io/api/core/v1" -) - -func (rm *resourceManager) CustomDescribeUserGroupsSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.DescribeUserGroupsOutput, - ko *svcapitypes.UserGroup, -) (*svcapitypes.UserGroup, error) { - elem := resp.UserGroups[0] - rm.customSetOutput(elem.UserIds, - elem.Engine, - elem.Status, - ko) - return ko, nil -} - -func (rm *resourceManager) CustomCreateUserGroupSetOutput( - ctx context.Context, - r *resource, - resp *elasticache.CreateUserGroupOutput, - ko *svcapitypes.UserGroup, -) (*svcapitypes.UserGroup, error) { - rm.customSetOutput(resp.UserIds, - resp.Engine, - resp.Status, - ko) - return ko, nil -} - -func (rm *resourceManager) customSetOutput( - userIds []*string, - engine *string, - status *string, - ko *svcapitypes.UserGroup, -) { - if userIds != nil { - ko.Spec.UserIDs = userIds - } - - if engine != nil { - ko.Spec.Engine = engine - } - - syncConditionStatus := corev1.ConditionUnknown - if status != nil { - if *status == "active" { - syncConditionStatus = corev1.ConditionTrue - } else { - syncConditionStatus = corev1.ConditionFalse - } - } - var resourceSyncedCondition *ackv1alpha1.Condition = nil - for _, condition := range ko.Status.Conditions { - if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { - resourceSyncedCondition = condition - break - } - } - if resourceSyncedCondition == nil { - resourceSyncedCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeResourceSynced, - Status: syncConditionStatus, - } - ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) - } else { - resourceSyncedCondition.Status = syncConditionStatus - } -} diff --git a/pkg/resource/user_group/delta.go b/pkg/resource/user_group/delta.go index bf3f4d0c..4db66bbf 100644 --- a/pkg/resource/user_group/delta.go +++ b/pkg/resource/user_group/delta.go @@ -50,7 +50,9 @@ func newResourceDelta( delta.Add("Spec.Engine", a.ko.Spec.Engine, b.ko.Spec.Engine) } } - if !ackcompare.MapStringStringEqual(ToACKTags(a.ko.Spec.Tags), ToACKTags(b.ko.Spec.Tags)) { + desiredACKTags, _ := convertToOrderedACKTags(a.ko.Spec.Tags) + latestACKTags, _ := convertToOrderedACKTags(b.ko.Spec.Tags) + if !ackcompare.MapStringStringEqual(desiredACKTags, latestACKTags) { delta.Add("Spec.Tags", a.ko.Spec.Tags, b.ko.Spec.Tags) } if ackcompare.HasNilDifference(a.ko.Spec.UserGroupID, b.ko.Spec.UserGroupID) { @@ -60,8 +62,12 @@ func newResourceDelta( delta.Add("Spec.UserGroupID", a.ko.Spec.UserGroupID, b.ko.Spec.UserGroupID) } } - if !ackcompare.SliceStringPEqual(a.ko.Spec.UserIDs, b.ko.Spec.UserIDs) { + if len(a.ko.Spec.UserIDs) != len(b.ko.Spec.UserIDs) { delta.Add("Spec.UserIDs", a.ko.Spec.UserIDs, b.ko.Spec.UserIDs) + } else if len(a.ko.Spec.UserIDs) > 0 { + if !ackcompare.SliceStringPEqual(a.ko.Spec.UserIDs, b.ko.Spec.UserIDs) { + delta.Add("Spec.UserIDs", a.ko.Spec.UserIDs, b.ko.Spec.UserIDs) + } } return delta diff --git a/pkg/resource/user_group/descriptor.go b/pkg/resource/user_group/descriptor.go index f0cd5b8d..7e0965bc 100644 --- a/pkg/resource/user_group/descriptor.go +++ b/pkg/resource/user_group/descriptor.go @@ -20,6 +20,7 @@ import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" rtclient "sigs.k8s.io/controller-runtime/pkg/client" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,7 +28,7 @@ import ( ) const ( - finalizerString = "finalizers.elasticache.services.k8s.aws/UserGroup" + FinalizerString = "finalizers.elasticache.services.k8s.aws/UserGroup" ) var ( @@ -43,10 +44,10 @@ var ( type resourceDescriptor struct { } -// GroupKind returns a Kubernetes metav1.GroupKind struct that describes the -// API Group and Kind of CRs described by the descriptor -func (d *resourceDescriptor) GroupKind() *metav1.GroupKind { - return &GroupKind +// GroupVersionKind returns a Kubernetes schema.GroupVersionKind struct that +// describes the API Group, Version and Kind of CRs described by the descriptor +func (d *resourceDescriptor) GroupVersionKind() schema.GroupVersionKind { + return svcapitypes.GroupVersion.WithKind(GroupKind.Kind) } // EmptyRuntimeObject returns an empty object prototype that may be used in @@ -87,8 +88,8 @@ func (d *resourceDescriptor) IsManaged( // https://github.com/kubernetes-sigs/controller-runtime/issues/994 is // fixed. This should be able to be: // - // return k8sctrlutil.ContainsFinalizer(obj, finalizerString) - return containsFinalizer(obj, finalizerString) + // return k8sctrlutil.ContainsFinalizer(obj, FinalizerString) + return containsFinalizer(obj, FinalizerString) } // Remove once https://github.com/kubernetes-sigs/controller-runtime/issues/994 @@ -117,7 +118,7 @@ func (d *resourceDescriptor) MarkManaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.AddFinalizer(obj, finalizerString) + k8sctrlutil.AddFinalizer(obj, FinalizerString) } // MarkUnmanaged removes the supplied resource from management by ACK. What @@ -132,7 +133,7 @@ func (d *resourceDescriptor) MarkUnmanaged( // Should not happen. If it does, there is a bug in the code panic("nil RuntimeMetaObject in AWSResource") } - k8sctrlutil.RemoveFinalizer(obj, finalizerString) + k8sctrlutil.RemoveFinalizer(obj, FinalizerString) } // MarkAdopted places descriptors on the custom resource that indicate the diff --git a/pkg/resource/user_group/custom_update_api.go b/pkg/resource/user_group/hooks.go similarity index 63% rename from pkg/resource/user_group/custom_update_api.go rename to pkg/resource/user_group/hooks.go index d3f56e01..a832dca8 100644 --- a/pkg/resource/user_group/custom_update_api.go +++ b/pkg/resource/user_group/hooks.go @@ -16,11 +16,13 @@ package user_group import ( "context" "errors" + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" "github.com/aws-controllers-k8s/runtime/pkg/requeue" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + corev1 "k8s.io/api/core/v1" ) // Implements custom logic for UpdateUserGroup @@ -59,31 +61,31 @@ func (rm *resourceManager) customUpdateUserGroup( // User Ids to add { - var userIdsToAdd []*string + var userIdsToAdd []string for userId, include := range requiredUserIdsMap { if include { - userIdsToAdd = append(userIdsToAdd, &userId) + userIdsToAdd = append(userIdsToAdd, userId) } } - input.SetUserIdsToAdd(userIdsToAdd) + input.UserIdsToAdd = userIdsToAdd } // User Ids to remove { - var userIdsToRemove []*string + var userIdsToRemove []string for userId, include := range existingUserIdsMap { if include { - userIdsToRemove = append(userIdsToRemove, &userId) + userIdsToRemove = append(userIdsToRemove, userId) } } - input.SetUserIdsToRemove(userIdsToRemove) + input.UserIdsToRemove = userIdsToRemove } - resp, respErr := rm.sdkapi.ModifyUserGroupWithContext(ctx, input) + resp, respErr := rm.sdkapi.ModifyUserGroup(ctx, input) rm.metrics.RecordAPICall("UPDATE", "ModifyUserGroup", respErr) if respErr != nil { return nil, respErr @@ -104,18 +106,14 @@ func (rm *resourceManager) customUpdateUserGroup( if resp.PendingChanges.UserIdsToAdd != nil { f2f0 := []*string{} for _, f2f0iter := range resp.PendingChanges.UserIdsToAdd { - var f2f0elem string - f2f0elem = *f2f0iter - f2f0 = append(f2f0, &f2f0elem) + f2f0 = append(f2f0, &f2f0iter) } f2.UserIDsToAdd = f2f0 } if resp.PendingChanges.UserIdsToRemove != nil { f2f1 := []*string{} for _, f2f1iter := range resp.PendingChanges.UserIdsToRemove { - var f2f1elem string - f2f1elem = *f2f1iter - f2f1 = append(f2f1, &f2f1elem) + f2f1 = append(f2f1, &f2f1iter) } f2.UserIDsToRemove = f2f1 } @@ -126,9 +124,7 @@ func (rm *resourceManager) customUpdateUserGroup( if resp.ReplicationGroups != nil { f3 := []*string{} for _, f3iter := range resp.ReplicationGroups { - var f3elem string - f3elem = *f3iter - f3 = append(f3, &f3elem) + f3 = append(f3, &f3iter) } ko.Status.ReplicationGroups = f3 } else { @@ -141,7 +137,11 @@ func (rm *resourceManager) customUpdateUserGroup( } rm.setStatusDefaults(ko) - rm.customSetOutput(resp.UserIds, resp.Engine, resp.Status, ko) + rm.customSetOutput( + stringSliceToPointers(resp.UserIds), + resp.Engine, + resp.Status, + ko) return &resource{ko}, nil } } @@ -172,8 +172,86 @@ func (rm *resourceManager) newUpdateRequestPayload( res := &svcsdk.ModifyUserGroupInput{} if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } return res, nil } + +func (rm *resourceManager) CustomDescribeUserGroupsSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.DescribeUserGroupsOutput, + ko *svcapitypes.UserGroup, +) (*svcapitypes.UserGroup, error) { + elem := resp.UserGroups[0] + rm.customSetOutput( + stringSliceToPointers(elem.UserIds), + elem.Engine, + elem.Status, + ko) + return ko, nil +} + +func (rm *resourceManager) CustomCreateUserGroupSetOutput( + ctx context.Context, + r *resource, + resp *svcsdk.CreateUserGroupOutput, + ko *svcapitypes.UserGroup, +) (*svcapitypes.UserGroup, error) { + rm.customSetOutput( + stringSliceToPointers(resp.UserIds), + resp.Engine, + resp.Status, + ko) + return ko, nil +} + +func (rm *resourceManager) customSetOutput( + userIds []*string, + engine *string, + status *string, + ko *svcapitypes.UserGroup, +) { + if userIds != nil { + ko.Spec.UserIDs = userIds + } + + if engine != nil { + ko.Spec.Engine = engine + } + + syncConditionStatus := corev1.ConditionUnknown + if status != nil { + if *status == "active" { + syncConditionStatus = corev1.ConditionTrue + } else { + syncConditionStatus = corev1.ConditionFalse + } + } + var resourceSyncedCondition *ackv1alpha1.Condition = nil + for _, condition := range ko.Status.Conditions { + if condition.Type == ackv1alpha1.ConditionTypeResourceSynced { + resourceSyncedCondition = condition + break + } + } + if resourceSyncedCondition == nil { + resourceSyncedCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeResourceSynced, + Status: syncConditionStatus, + } + ko.Status.Conditions = append(ko.Status.Conditions, resourceSyncedCondition) + } else { + resourceSyncedCondition.Status = syncConditionStatus + } +} + +func stringSliceToPointers(slice []string) []*string { + ptrs := make([]*string, len(slice)) + for i, s := range slice { + s := s // Create new variable to avoid referencing loop variable + ptrs[i] = &s + } + return ptrs +} diff --git a/pkg/resource/user_group/manager.go b/pkg/resource/user_group/manager.go index 320d777d..1b3b43fa 100644 --- a/pkg/resource/user_group/manager.go +++ b/pkg/resource/user_group/manager.go @@ -32,9 +32,8 @@ import ( acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" ackutil "github.com/aws-controllers-k8s/runtime/pkg/util" - "github.com/aws/aws-sdk-go/aws/session" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" - svcsdkapi "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -59,6 +58,9 @@ type resourceManager struct { // cfg is a copy of the ackcfg.Config object passed on start of the service // controller cfg ackcfg.Config + // clientcfg is a copy of the client configuration passed on start of the + // service controller + clientcfg aws.Config // log refers to the logr.Logger object handling logging for the service // controller log logr.Logger @@ -73,12 +75,9 @@ type resourceManager struct { awsAccountID ackv1alpha1.AWSAccountID // The AWS Region that this resource manager targets awsRegion ackv1alpha1.AWSRegion - // sess is the AWS SDK Session object used to communicate with the backend - // AWS service API - sess *session.Session - // sdk is a pointer to the AWS service API interface exposed by the - // aws-sdk-go/services/{alias}/{alias}iface package. - sdkapi svcsdkapi.ElastiCacheAPI + // sdk is a pointer to the AWS service API client exposed by the + // aws-sdk-go-v2/services/{alias} package. + sdkapi *svcsdk.Client } // concreteResource returns a pointer to a resource from the supplied @@ -103,6 +102,7 @@ func (rm *resourceManager) ReadOne( panic("resource manager's ReadOne() method received resource with nil CR object") } observed, err := rm.sdkFind(ctx, r) + mirrorAWSTags(r, observed) if err != nil { if observed != nil { return rm.onError(observed, err) @@ -291,32 +291,76 @@ func (rm *resourceManager) EnsureTags( defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) tags := acktags.Merge(resourceTags, defaultTags) - r.ko.Spec.Tags = FromACKTags(tags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) return nil } +// FilterAWSTags ignores tags that have keys that start with "aws:" +// is needed to ensure the controller does not attempt to remove +// tags set by AWS. This function needs to be called after each Read +// operation. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { + r := rm.concreteResource(res) + if r == nil || r.ko == nil { + return + } + var existingTags []*svcapitypes.Tag + existingTags = r.ko.Spec.Tags + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) + ignoreSystemTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) +} + +// mirrorAWSTags ensures that AWS tags are included in the desired resource +// if they are present in the latest resource. This will ensure that the +// aws tags are not present in a diff. The logic of the controller will +// ensure these tags aren't patched to the resource in the cluster, and +// will only be present to make sure we don't try to remove these tags. +// +// Although there are a lot of similarities between this function and +// EnsureTags, they are very much different. +// While EnsureTags tries to make sure the resource contains the controller +// tags, mirrowAWSTags tries to make sure tags injected by AWS are mirrored +// from the latest resoruce to the desired resource. +func mirrorAWSTags(a *resource, b *resource) { + if a == nil || a.ko == nil || b == nil || b.ko == nil { + return + } + var existingLatestTags []*svcapitypes.Tag + var existingDesiredTags []*svcapitypes.Tag + existingDesiredTags = a.ko.Spec.Tags + existingLatestTags = b.ko.Spec.Tags + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) + syncAWSTags(desiredTags, latestTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) +} + // newResourceManager returns a new struct implementing // acktypes.AWSResourceManager +// This is for AWS-SDK-GO-V2 - Created newResourceManager With AWS sdk-Go-ClientV2 func newResourceManager( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, ) (*resourceManager, error) { return &resourceManager{ cfg: cfg, + clientcfg: clientcfg, log: log, metrics: metrics, rr: rr, awsAccountID: id, awsRegion: region, - sess: sess, - sdkapi: svcsdk.New(sess), + sdkapi: svcsdk.NewFromConfig(clientcfg), }, nil } diff --git a/pkg/resource/user_group/manager_factory.go b/pkg/resource/user_group/manager_factory.go index db798305..3bf74221 100644 --- a/pkg/resource/user_group/manager_factory.go +++ b/pkg/resource/user_group/manager_factory.go @@ -23,7 +23,7 @@ import ( ackcfg "github.com/aws-controllers-k8s/runtime/pkg/config" ackmetrics "github.com/aws-controllers-k8s/runtime/pkg/metrics" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" svcresource "github.com/aws-controllers-k8s/elasticache-controller/pkg/resource" @@ -47,14 +47,18 @@ func (f *resourceManagerFactory) ResourceDescriptor() acktypes.AWSResourceDescri // supplied AWS account func (f *resourceManagerFactory) ManagerFor( cfg ackcfg.Config, + clientcfg aws.Config, log logr.Logger, metrics *ackmetrics.Metrics, rr acktypes.Reconciler, - sess *session.Session, id ackv1alpha1.AWSAccountID, region ackv1alpha1.AWSRegion, + roleARN ackv1alpha1.AWSResourceName, ) (acktypes.AWSResourceManager, error) { - rmId := fmt.Sprintf("%s/%s", id, region) + // We use the account ID, region, and role ARN to uniquely identify a + // resource manager. This helps us to avoid creating multiple resource + // managers for the same account/region/roleARN combination. + rmId := fmt.Sprintf("%s/%s/%s", id, region, roleARN) f.RLock() rm, found := f.rmCache[rmId] f.RUnlock() @@ -66,7 +70,7 @@ func (f *resourceManagerFactory) ManagerFor( f.Lock() defer f.Unlock() - rm, err := newResourceManager(cfg, log, metrics, rr, sess, id, region) + rm, err := newResourceManager(cfg, clientcfg, log, metrics, rr, id, region) if err != nil { return nil, err } diff --git a/pkg/resource/user_group/references.go b/pkg/resource/user_group/references.go index 549cfb22..8557d535 100644 --- a/pkg/resource/user_group/references.go +++ b/pkg/resource/user_group/references.go @@ -17,6 +17,7 @@ package user_group import ( "context" + "sigs.k8s.io/controller-runtime/pkg/client" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -24,19 +25,29 @@ import ( svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) +// ClearResolvedReferences removes any reference values that were made +// concrete in the spec. It returns a copy of the input AWSResource which +// contains the original *Ref values, but none of their respective concrete +// values. +func (rm *resourceManager) ClearResolvedReferences(res acktypes.AWSResource) acktypes.AWSResource { + ko := rm.concreteResource(res).ko.DeepCopy() + + return &resource{ko} +} + // ResolveReferences finds if there are any Reference field(s) present -// inside AWSResource passed in the parameter and attempts to resolve -// those reference field(s) into target field(s). -// It returns an AWSResource with resolved reference(s), and an error if the -// passed AWSResource's reference field(s) cannot be resolved. -// This method also adds/updates the ConditionTypeReferencesResolved for the -// AWSResource. +// inside AWSResource passed in the parameter and attempts to resolve those +// reference field(s) into their respective target field(s). It returns a +// copy of the input AWSResource with resolved reference(s), a boolean which +// is set to true if the resource contains any references (regardless of if +// they are resolved successfully) and an error if the passed AWSResource's +// reference field(s) could not be resolved. func (rm *resourceManager) ResolveReferences( ctx context.Context, apiReader client.Reader, res acktypes.AWSResource, -) (acktypes.AWSResource, error) { - return res, nil +) (acktypes.AWSResource, bool, error) { + return res, false, nil } // validateReferenceFields validates the reference field and corresponding @@ -44,9 +55,3 @@ func (rm *resourceManager) ResolveReferences( func validateReferenceFields(ko *svcapitypes.UserGroup) error { return nil } - -// hasNonNilReferences returns true if resource contains a reference to another -// resource -func hasNonNilReferences(ko *svcapitypes.UserGroup) bool { - return false -} diff --git a/pkg/resource/user_group/resource.go b/pkg/resource/user_group/resource.go index 75377340..1727c8cb 100644 --- a/pkg/resource/user_group/resource.go +++ b/pkg/resource/user_group/resource.go @@ -16,6 +16,8 @@ package user_group import ( + "fmt" + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" ackerrors "github.com/aws-controllers-k8s/runtime/pkg/errors" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" @@ -45,7 +47,7 @@ func (r *resource) Identifiers() acktypes.AWSResourceIdentifiers { } // IsBeingDeleted returns true if the Kubernetes resource has a non-zero -// deletion timestemp +// deletion timestamp func (r *resource) IsBeingDeleted() bool { return !r.ko.DeletionTimestamp.IsZero() } @@ -93,6 +95,17 @@ func (r *resource) SetIdentifiers(identifier *ackv1alpha1.AWSIdentifiers) error return nil } +// PopulateResourceFromAnnotation populates the fields passed from adoption annotation +func (r *resource) PopulateResourceFromAnnotation(fields map[string]string) error { + f2, ok := fields["userGroupID"] + if !ok { + return ackerrors.NewTerminalError(fmt.Errorf("required field missing: userGroupID")) + } + r.ko.Spec.UserGroupID = &f2 + + return nil +} + // DeepCopy will return a copy of the resource func (r *resource) DeepCopy() acktypes.AWSResource { koCopy := r.ko.DeepCopy() diff --git a/pkg/resource/user_group/sdk.go b/pkg/resource/user_group/sdk.go index a9c736d8..90dce7c0 100644 --- a/pkg/resource/user_group/sdk.go +++ b/pkg/resource/user_group/sdk.go @@ -28,8 +28,10 @@ import ( ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go/aws" - svcsdk "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + smithy "github.com/aws/smithy-go" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +42,7 @@ import ( var ( _ = &metav1.Time{} _ = strings.ToLower("") - _ = &aws.JSONValue{} - _ = &svcsdk.ElastiCache{} + _ = &svcsdk.Client{} _ = &svcapitypes.UserGroup{} _ = ackv1alpha1.AWSAccountID("") _ = &ackerr.NotFound @@ -49,6 +50,7 @@ var ( _ = &reflect.Value{} _ = fmt.Sprintf("") _ = &ackrequeue.NoRequeue{} + _ = &aws.Config{} ) // sdkFind returns SDK-specific information about a supplied resource @@ -73,10 +75,11 @@ func (rm *resourceManager) sdkFind( return nil, err } var resp *svcsdk.DescribeUserGroupsOutput - resp, err = rm.sdkapi.DescribeUserGroupsWithContext(ctx, input) + resp, err = rm.sdkapi.DescribeUserGroups(ctx, input) rm.metrics.RecordAPICall("READ_MANY", "DescribeUserGroups", err) if err != nil { - if awsErr, ok := ackerr.AWSError(err); ok && awsErr.Code() == "UserGroupNotFound" { + var awsErr smithy.APIError + if errors.As(err, &awsErr) && awsErr.ErrorCode() == "UserGroupNotFound" { return nil, ackerr.NotFound } return nil, err @@ -108,35 +111,17 @@ func (rm *resourceManager) sdkFind( if elem.PendingChanges != nil { f3 := &svcapitypes.UserGroupPendingChanges{} if elem.PendingChanges.UserIdsToAdd != nil { - f3f0 := []*string{} - for _, f3f0iter := range elem.PendingChanges.UserIdsToAdd { - var f3f0elem string - f3f0elem = *f3f0iter - f3f0 = append(f3f0, &f3f0elem) - } - f3.UserIDsToAdd = f3f0 + f3.UserIDsToAdd = aws.StringSlice(elem.PendingChanges.UserIdsToAdd) } if elem.PendingChanges.UserIdsToRemove != nil { - f3f1 := []*string{} - for _, f3f1iter := range elem.PendingChanges.UserIdsToRemove { - var f3f1elem string - f3f1elem = *f3f1iter - f3f1 = append(f3f1, &f3f1elem) - } - f3.UserIDsToRemove = f3f1 + f3.UserIDsToRemove = aws.StringSlice(elem.PendingChanges.UserIdsToRemove) } ko.Status.PendingChanges = f3 } else { ko.Status.PendingChanges = nil } if elem.ReplicationGroups != nil { - f4 := []*string{} - for _, f4iter := range elem.ReplicationGroups { - var f4elem string - f4elem = *f4iter - f4 = append(f4, &f4elem) - } - ko.Status.ReplicationGroups = f4 + ko.Status.ReplicationGroups = aws.StringSlice(elem.ReplicationGroups) } else { ko.Status.ReplicationGroups = nil } @@ -151,13 +136,7 @@ func (rm *resourceManager) sdkFind( ko.Spec.UserGroupID = nil } if elem.UserIds != nil { - f7 := []*string{} - for _, f7iter := range elem.UserIds { - var f7elem string - f7elem = *f7iter - f7 = append(f7, &f7elem) - } - ko.Spec.UserIDs = f7 + ko.Spec.UserIDs = aws.StringSlice(elem.UserIds) } else { ko.Spec.UserIDs = nil } @@ -195,7 +174,7 @@ func (rm *resourceManager) newListRequestPayload( res := &svcsdk.DescribeUserGroupsInput{} if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } return res, nil @@ -220,7 +199,7 @@ func (rm *resourceManager) sdkCreate( var resp *svcsdk.CreateUserGroupOutput _ = resp - resp, err = rm.sdkapi.CreateUserGroupWithContext(ctx, input) + resp, err = rm.sdkapi.CreateUserGroup(ctx, input) rm.metrics.RecordAPICall("CREATE", "CreateUserGroup", err) if err != nil { return nil, err @@ -249,35 +228,17 @@ func (rm *resourceManager) sdkCreate( if resp.PendingChanges != nil { f3 := &svcapitypes.UserGroupPendingChanges{} if resp.PendingChanges.UserIdsToAdd != nil { - f3f0 := []*string{} - for _, f3f0iter := range resp.PendingChanges.UserIdsToAdd { - var f3f0elem string - f3f0elem = *f3f0iter - f3f0 = append(f3f0, &f3f0elem) - } - f3.UserIDsToAdd = f3f0 + f3.UserIDsToAdd = aws.StringSlice(resp.PendingChanges.UserIdsToAdd) } if resp.PendingChanges.UserIdsToRemove != nil { - f3f1 := []*string{} - for _, f3f1iter := range resp.PendingChanges.UserIdsToRemove { - var f3f1elem string - f3f1elem = *f3f1iter - f3f1 = append(f3f1, &f3f1elem) - } - f3.UserIDsToRemove = f3f1 + f3.UserIDsToRemove = aws.StringSlice(resp.PendingChanges.UserIdsToRemove) } ko.Status.PendingChanges = f3 } else { ko.Status.PendingChanges = nil } if resp.ReplicationGroups != nil { - f4 := []*string{} - for _, f4iter := range resp.ReplicationGroups { - var f4elem string - f4elem = *f4iter - f4 = append(f4, &f4elem) - } - ko.Status.ReplicationGroups = f4 + ko.Status.ReplicationGroups = aws.StringSlice(resp.ReplicationGroups) } else { ko.Status.ReplicationGroups = nil } @@ -292,13 +253,7 @@ func (rm *resourceManager) sdkCreate( ko.Spec.UserGroupID = nil } if resp.UserIds != nil { - f7 := []*string{} - for _, f7iter := range resp.UserIds { - var f7elem string - f7elem = *f7iter - f7 = append(f7, &f7elem) - } - ko.Spec.UserIDs = f7 + ko.Spec.UserIDs = aws.StringSlice(resp.UserIds) } else { ko.Spec.UserIDs = nil } @@ -321,33 +276,27 @@ func (rm *resourceManager) newCreateRequestPayload( res := &svcsdk.CreateUserGroupInput{} if r.ko.Spec.Engine != nil { - res.SetEngine(*r.ko.Spec.Engine) + res.Engine = r.ko.Spec.Engine } if r.ko.Spec.Tags != nil { - f1 := []*svcsdk.Tag{} + f1 := []svcsdktypes.Tag{} for _, f1iter := range r.ko.Spec.Tags { - f1elem := &svcsdk.Tag{} + f1elem := &svcsdktypes.Tag{} if f1iter.Key != nil { - f1elem.SetKey(*f1iter.Key) + f1elem.Key = f1iter.Key } if f1iter.Value != nil { - f1elem.SetValue(*f1iter.Value) + f1elem.Value = f1iter.Value } - f1 = append(f1, f1elem) + f1 = append(f1, *f1elem) } - res.SetTags(f1) + res.Tags = f1 } if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } if r.ko.Spec.UserIDs != nil { - f3 := []*string{} - for _, f3iter := range r.ko.Spec.UserIDs { - var f3elem string - f3elem = *f3iter - f3 = append(f3, &f3elem) - } - res.SetUserIds(f3) + res.UserIds = aws.ToStringSlice(r.ko.Spec.UserIDs) } return res, nil @@ -380,7 +329,7 @@ func (rm *resourceManager) sdkDelete( } var resp *svcsdk.DeleteUserGroupOutput _ = resp - resp, err = rm.sdkapi.DeleteUserGroupWithContext(ctx, input) + resp, err = rm.sdkapi.DeleteUserGroup(ctx, input) rm.metrics.RecordAPICall("DELETE", "DeleteUserGroup", err) return nil, err } @@ -393,7 +342,7 @@ func (rm *resourceManager) newDeleteRequestPayload( res := &svcsdk.DeleteUserGroupInput{} if r.ko.Spec.UserGroupID != nil { - res.SetUserGroupId(*r.ko.Spec.UserGroupID) + res.UserGroupId = r.ko.Spec.UserGroupID } return res, nil @@ -501,11 +450,12 @@ func (rm *resourceManager) terminalAWSError(err error) bool { if err == nil { return false } - awsErr, ok := ackerr.AWSError(err) - if !ok { + + var terminalErr smithy.APIError + if !errors.As(err, &terminalErr) { return false } - switch awsErr.Code() { + switch terminalErr.ErrorCode() { case "DuplicateUserNameFault", "UserGroupAlreadyExistsFault", "InvalidParameterCombination", diff --git a/pkg/resource/user_group/tags.go b/pkg/resource/user_group/tags.go index d6aa58e4..6ae21343 100644 --- a/pkg/resource/user_group/tags.go +++ b/pkg/resource/user_group/tags.go @@ -16,48 +16,104 @@ package user_group import ( + "slices" + "strings" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" ) var ( - _ = svcapitypes.UserGroup{} - _ = acktags.NewTags() + _ = svcapitypes.UserGroup{} + _ = acktags.NewTags() + ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} ) -// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. // This method helps in creating the hub(acktags.Tags) for merging -// default controller tags with existing resource tags. -func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { result := acktags.NewTags() - if tags == nil || len(tags) == 0 { - return result - } + keyOrder := []string{} + if len(tags) == 0 { + return result, keyOrder + } for _, t := range tags { if t.Key != nil { - if t.Value == nil { - result[*t.Key] = "" - } else { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" } } } - return result + return result, keyOrder } -// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. // This method helps in setting the tags back inside AWSResource after merging -// default controller tags with existing resource tags. -func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } for k, v := range tags { - kCopy := k - vCopy := v - tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + tag := svcapitypes.Tag{Key: &k, Value: &v} result = append(result, &tag) } + return result } + +// ignoreSystemTags ignores tags that have keys that start with "aws:" +// and ACKSystemTags, to avoid patching them to the resourceSpec. +// Eg. resources created with cloudformation have tags that cannot be +// removed by an ACK controller +func ignoreSystemTags(tags acktags.Tags) { + for k := range tags { + if strings.HasPrefix(k, "aws:") || + slices.Contains(ACKSystemTags, k) { + delete(tags, k) + } + } +} + +// syncAWSTags ensures AWS-managed tags (prefixed with "aws:") from the latest resource state +// are preserved in the desired state. This prevents the controller from attempting to +// modify AWS-managed tags, which would result in an error. +// +// AWS-managed tags are automatically added by AWS services (e.g., CloudFormation, Service Catalog) +// and cannot be modified or deleted through normal tag operations. Common examples include: +// - aws:cloudformation:stack-name +// - aws:servicecatalog:productArn +// +// Parameters: +// - a: The target Tags map to be updated (typically desired state) +// - b: The source Tags map containing AWS-managed tags (typically latest state) +// +// Example: +// +// latest := Tags{"aws:cloudformation:stack-name": "my-stack", "environment": "prod"} +// desired := Tags{"environment": "dev"} +// SyncAWSTags(desired, latest) +// desired now contains {"aws:cloudformation:stack-name": "my-stack", "environment": "dev"} +func syncAWSTags(a acktags.Tags, b acktags.Tags) { + for k := range b { + if strings.HasPrefix(k, "aws:") { + a[k] = b[k] + } + } +} diff --git a/pkg/testutil/test_suite_config.go b/pkg/testutil/test_suite_config.go deleted file mode 100644 index 01a1c366..00000000 --- a/pkg/testutil/test_suite_config.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -// TestSuite represents instructions to run unit tests using test fixtures and mock service apis -type TestSuite struct { - Tests []TestConfig `json:"tests"` -} - -// TestConfig represents declarative unit test -type TestConfig struct { - Name string `json:"name"` - Description string `json:"description"` - Scenarios []TestScenario `json:"scenarios"` -} - -// TestScenario represents declarative test scenario details -type TestScenario struct { - Name string `json:"name"` - Description string `json:"description"` - // Fixture lets you specify test scenario given input fixtures - Fixture Fixture `json:"given"` - // UnitUnderTest lets you specify the unit to test - // For example resource manager API: ReadOne, Create, Update, Delete - UnitUnderTest string `json:"invoke"` - // Expect lets you specify test scenario expected outcome fixtures - Expect Expect `json:"expect"` -} - -// Fixture represents test scenario fixture to load from file paths -type Fixture struct { - // DesiredState lets you specify fixture path to load the desired state fixture - DesiredState string `json:"desired_state"` - // LatestState lets you specify fixture path to load the current state fixture - LatestState string `json:"latest_state"` - // ServiceAPIs lets you specify fixture path to mock service sdk api response - ServiceAPIs []ServiceAPI `json:"svc_api"` -} - -// ServiceAPI represents details about the the service sdk api and fixture path to mock its response -type ServiceAPI struct { - Operation string `json:"operation"` - Output string `json:"output_fixture"` - ServiceAPIError *ServiceAPIError `json:"error,omitempty"` -} - -// ServiceAPIError contains the specification for the error of the mock API response -type ServiceAPIError struct { - // Code here is usually the type of fault/error, not the HTTP status code - Code string `json:"code"` - Message string `json:"message"` -} - -// Expect represents test scenario expected outcome fixture to load from file path -type Expect struct { - LatestState string `json:"latest_state"` - // Error is a string matching the message of the expected error returned from the ResourceManager operation. - // Possible errors can be found in runtime/pkg/errors/error.go - Error string `json:"error"` -} diff --git a/pkg/testutil/test_suite_runner.go b/pkg/testutil/test_suite_runner.go deleted file mode 100644 index 6c120636..00000000 --- a/pkg/testutil/test_suite_runner.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -import ( - "context" - "errors" - "fmt" - mocksvcsdkapi "github.com/aws-controllers-k8s/elasticache-controller/mocks/aws-sdk-go/elasticache" - acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "path/filepath" - "strings" - "testing" -) - -// TestSuiteRunner runs given test suite config with the help of delegate supplied to it -type TestSuiteRunner struct { - TestSuite *TestSuite - Delegate TestRunnerDelegate -} - -// fixtureContext is runtime context for test scenario given fixture. -type fixtureContext struct { - desired acktypes.AWSResource - latest acktypes.AWSResource - mocksdkapi *mocksvcsdkapi.ElastiCacheAPI - resourceManager acktypes.AWSResourceManager -} - -// TODO: remove if no longer used -// expectContext is runtime context for test scenario expectation fixture. -type expectContext struct { - latest acktypes.AWSResource - err error -} - -// TestRunnerDelegate provides interface for custom resource tests to implement. -// TestSuiteRunner depends on it to run tests for custom resource. -type TestRunnerDelegate interface { - ResourceDescriptor() acktypes.AWSResourceDescriptor - Equal(desired acktypes.AWSResource, latest acktypes.AWSResource) bool // remove it when ResourceDescriptor.Delta() is available - ResourceManager(*mocksvcsdkapi.ElastiCacheAPI) acktypes.AWSResourceManager - EmptyServiceAPIOutput(apiName string) (interface{}, error) - GoTestRunner() *testing.T -} - -// RunTests runs the tests from the test suite -func (runner *TestSuiteRunner) RunTests() { - if runner.TestSuite == nil || runner.Delegate == nil { - panic(errors.New("failed to run test suite")) - } - - for _, test := range runner.TestSuite.Tests { - fmt.Printf("Starting test: %s\n", test.Name) - for _, scenario := range test.Scenarios { - fmt.Printf("Running test scenario: %s\n", scenario.Name) - fixtureCxt := runner.setupFixtureContext(&scenario.Fixture) - runner.runTestScenario(scenario.Name, fixtureCxt, scenario.UnitUnderTest, &scenario.Expect) - } - fmt.Printf("Test: %s completed.\n", test.Name) - } -} - -// runTestScenario runs given test scenario which is expressed as: given fixture context, unit to test, expected fixture context. -func (runner *TestSuiteRunner) runTestScenario(scenarioName string, fixtureCxt *fixtureContext, unitUnderTest string, expectation *Expect) { - t := runner.Delegate.GoTestRunner() - t.Run(scenarioName, func(t *testing.T) { - rm := fixtureCxt.resourceManager - assert := assert.New(t) - - var actual acktypes.AWSResource = nil - var err error = nil - switch unitUnderTest { - case "ReadOne": - actual, err = rm.ReadOne(context.Background(), fixtureCxt.desired) - case "Create": - actual, err = rm.Create(context.Background(), fixtureCxt.desired) - case "Update": - delta := runner.Delegate.ResourceDescriptor().Delta(fixtureCxt.desired, fixtureCxt.latest) - actual, err = rm.Update(context.Background(), fixtureCxt.desired, fixtureCxt.latest, delta) - case "Delete": - actual, err = rm.Delete(context.Background(), fixtureCxt.desired) - default: - panic(errors.New(fmt.Sprintf("unit under test: %s not supported", unitUnderTest))) - } - runner.assertExpectations(assert, expectation, actual, err) - }) -} - -/* - assertExpectations validates the actual outcome against the expected outcome. - -There are two components to the expected outcome, corresponding to the return values of the resource manager's CRUD operation: - 1. the actual return value of type AWSResource ("expect.latest_state" in test_suite.yaml) - 2. the error ("expect.error" in test_suite.yaml) - -With each of these components, there are three possibilities in test_suite.yaml, which are interpreted as follows: - 1. the key does not exist, or was provided with no value: no explicit expectations, don't assert anything - 2. the key was provided with value "nil": explicit expectation; assert that the error or return value is nil - 3. the key was provided with value other than "nil": explicit expectation; assert that the value matches the - expected value - -However, if neither expect.latest_state nor error are provided, assertExpectations will fail the test case. -*/ -func (runner *TestSuiteRunner) assertExpectations(assert *assert.Assertions, expectation *Expect, actual acktypes.AWSResource, err error) { - if expectation.LatestState == "" && expectation.Error == "" { - fmt.Println("Invalid test case: no expectation given for either latest_state or error") - assert.True(false) - return - } - - // expectation exists for at least one of LatestState and Error; assert results independently - if expectation.LatestState == "nil" { - assert.Nil(actual) - } else if expectation.LatestState != "" { - expectedLatest := runner.loadAWSResource(expectation.LatestState) - assert.NotNil(actual) - - delta := runner.Delegate.ResourceDescriptor().Delta(expectedLatest, actual) - assert.Equal(0, len(delta.Differences)) - if len(delta.Differences) > 0 { - fmt.Println("Unexpected differences:") - for _, difference := range delta.Differences { - fmt.Printf("Path: %v, expected: %v, actual: %v\n", difference.Path, difference.A, difference.B) - } - } - - // Delta only contains `Spec` differences. Thus, we need Delegate.Equal to compare `Status`. - assert.True(runner.Delegate.Equal(expectedLatest, actual), "Expected status, spec details did not match with actual.") - } - - if expectation.Error == "nil" { - assert.Nil(err) - } else if expectation.Error != "" { - expectedError := errors.New(expectation.Error) - assert.NotNil(err) - - assert.Equal(expectedError.Error(), err.Error()) - } -} - -// setupFixtureContext provides runtime context for test scenario given fixture. -func (runner *TestSuiteRunner) setupFixtureContext(fixture *Fixture) *fixtureContext { - if fixture == nil { - return nil - } - var cxt = fixtureContext{} - if fixture.DesiredState != "" { - cxt.desired = runner.loadAWSResource(fixture.DesiredState) - } - if fixture.LatestState != "" { - cxt.latest = runner.loadAWSResource(fixture.LatestState) - } - mocksdkapi := &mocksvcsdkapi.ElastiCacheAPI{} - for _, serviceApi := range fixture.ServiceAPIs { - if serviceApi.Operation != "" { - - if serviceApi.ServiceAPIError != nil { - mockError := CreateAWSError(*serviceApi.ServiceAPIError) - mocksdkapi.On(serviceApi.Operation, mock.Anything, mock.Anything).Return(nil, mockError) - } else if serviceApi.Operation != "" && serviceApi.Output != "" { - var outputObj, err = runner.Delegate.EmptyServiceAPIOutput(serviceApi.Operation) - apiOutputFixturePath := append([]string{"testdata"}, strings.Split(serviceApi.Output, "/")...) - LoadFromFixture(filepath.Join(apiOutputFixturePath...), outputObj) - mocksdkapi.On(serviceApi.Operation, mock.Anything, mock.Anything).Return(outputObj, nil) - if err != nil { - panic(err) - } - } - } - } - cxt.mocksdkapi = mocksdkapi - cxt.resourceManager = runner.Delegate.ResourceManager(mocksdkapi) - return &cxt -} - -// loadAWSResource loads AWSResource from the supplied fixture file. -func (runner *TestSuiteRunner) loadAWSResource(resourceFixtureFilePath string) acktypes.AWSResource { - if resourceFixtureFilePath == "" { - panic(errors.New(fmt.Sprintf("resourceFixtureFilePath not specified"))) - } - var rd = runner.Delegate.ResourceDescriptor() - ro := rd.EmptyRuntimeObject() - path := append([]string{"testdata"}, strings.Split(resourceFixtureFilePath, "/")...) - LoadFromFixture(filepath.Join(path...), ro) - return rd.ResourceFromRuntimeObject(ro) -} diff --git a/pkg/testutil/util.go b/pkg/testutil/util.go deleted file mode 100644 index 25006859..00000000 --- a/pkg/testutil/util.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/ghodss/yaml" - "io/ioutil" - "path" - "strings" -) - -// LoadFromFixture fills an empty pointer variable with the -// data from a fixture JSON/YAML file. -func LoadFromFixture( - fixturePath string, - output interface{}, // output should be an addressable type (i.e. a pointer) -) { - contents, err := ioutil.ReadFile(fixturePath) - if err != nil { - panic(err) - } - if strings.HasSuffix(fixturePath, ".json") { - err = json.Unmarshal(contents, output) - } else if strings.HasSuffix(fixturePath, ".yaml") || - strings.HasSuffix(fixturePath, ".yml") { - err = yaml.Unmarshal(contents, output) - } else { - panic(errors.New( - fmt.Sprintf("fixture file format not supported: %s", path.Base(fixturePath)))) - } - if err != nil { - panic(err) - } -} - -// CreateAWSError is used for mocking the types of errors received from aws-sdk-go -// so that the expected code path executes. Support for specifying the HTTP status code and request ID -// can be added in the future if needed -func CreateAWSError(awsError ServiceAPIError) awserr.RequestFailure { - error := awserr.New(awsError.Code, awsError.Message, nil) - return awserr.NewRequestFailure(error, 0, "") -} diff --git a/pkg/testutil/util_test.go b/pkg/testutil/util_test.go deleted file mode 100644 index 2fedba01..00000000 --- a/pkg/testutil/util_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package testutil - -import ( - ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestCreateAWSError(t *testing.T) { - assert := assert.New(t) - - // Basic case to test type conversion and extraction of error code/message - t.Run("CreateAWSError", func(t *testing.T) { - errorSpec := ServiceAPIError{Code: "ReplicationGroupNotFoundFault", Message: "ReplicationGroup rg-cmd not found"} - respErr := CreateAWSError(errorSpec) - - awsErr, ok := ackerr.AWSError(respErr) - - assert.True(ok) - assert.Equal("ReplicationGroupNotFoundFault", awsErr.Code()) - assert.Equal("ReplicationGroup rg-cmd not found", awsErr.Message()) - }) - -} diff --git a/pkg/util/engine_version.go b/pkg/util/engine_version.go new file mode 100644 index 00000000..e83b6034 --- /dev/null +++ b/pkg/util/engine_version.go @@ -0,0 +1,60 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package util + +import ( + "strconv" + "strings" +) + +// EngineVersionsMatch returns true if desired and latest engine versions match and false otherwise +// precondition: both desiredEV and latestEV are non-nil +// this handles the case where only the major EV is specified, e.g. "6.x" (or similar), +// but the latest version shows the minor version, e.g. "6.0.5". +func EngineVersionsMatch(desiredEV, latestEV string) bool { + if desiredEV == latestEV { + return true + } + + dMaj, dMin := versionNumbersFromString(desiredEV) + lMaj, lMin := versionNumbersFromString(latestEV) + last := len(desiredEV) - 1 + + // if the last character of desiredEV is "x" or the major version is higher than 5, ignore patch version when comparing. + // See https://github.com/aws-controllers-k8s/community/issues/1737 + if dMaj > 5 || desiredEV[last:] == "x" { + return dMaj == lMaj && (dMin < 0 || dMin == lMin) + } + + return false +} + +// versionNumbersFromString takes a version string like "6.2", "6.x" or "7.0.4" and +// returns the major, minor and patch numbers. If no minor or patch numbers are present +// or contain the "x" placeholder, -1 is returned for that version number. +func versionNumbersFromString(version string) (int, int) { + parts := strings.Split(version, ".") + major := -1 + minor := -1 + if len(parts) == 0 { + return major, minor + } + major, _ = strconv.Atoi(parts[0]) + if len(parts) > 1 { + if !strings.EqualFold(parts[1], "x") { + minor, _ = strconv.Atoi(parts[1]) + } + } + return major, minor +} diff --git a/pkg/util/tags.go b/pkg/util/tags.go new file mode 100644 index 00000000..ae81352c --- /dev/null +++ b/pkg/util/tags.go @@ -0,0 +1,158 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package util + +import ( + "context" + "errors" + + ackv1alpha1 "github.com/aws-controllers-k8s/runtime/apis/core/v1alpha1" + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" + "github.com/aws-controllers-k8s/runtime/pkg/metrics" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + acktags "github.com/aws-controllers-k8s/runtime/pkg/tags" + svcsdk "github.com/aws/aws-sdk-go-v2/service/elasticache" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + + svcapitypes "github.com/aws-controllers-k8s/elasticache-controller/apis/v1alpha1" +) + +var requeueWaitWhileTagUpdated = ackrequeue.NeededAfter( + errors.New("tags Update is in progress"), + ackrequeue.DefaultRequeueAfterDuration, +) + +// GetTags retrieves the resource's associated tags. +func GetTags( + ctx context.Context, + sdkapi *svcsdk.Client, + metrics *metrics.Metrics, + resourceARN string, +) ([]*svcapitypes.Tag, error) { + resp, err := sdkapi.ListTagsForResource( + ctx, + &svcsdk.ListTagsForResourceInput{ + ResourceName: &resourceARN, + }, + ) + metrics.RecordAPICall("GET", "ListTagsForResource", err) + if err != nil { + return nil, err + } + tags := make([]*svcapitypes.Tag, 0, len(resp.TagList)) + for _, tag := range resp.TagList { + tags = append(tags, &svcapitypes.Tag{ + Key: tag.Key, + Value: tag.Value, + }) + } + return tags, nil +} + +// SyncTags keeps the resource's tags in sync +// +// NOTE(jaypipes): Elasticache's Tagging APIs differ from other AWS APIs in the +// following ways: +// +// 1. The names of the tagging API operations are different. Other APIs use the +// Tagris `ListTagsForResource`, `TagResource` and `UntagResource` API +// calls. RDS uses `ListTagsForResource`, `AddTagsToResource` and +// `RemoveTagsFromResource`. +// +// 2. Even though the name of the `ListTagsForResource` API call is the same, +// the structure of the input and the output are different from other APIs. +// For the input, instead of a `ResourceArn` field, Elasticache names the field +// `ResourceName`, but actually expects an ARN, not the cache cluster +// name. This is the same for the `AddTagsToResource` and +// `RemoveTagsFromResource` input shapes. For the output shape, the field is +// called `TagList` instead of `Tags` but is otherwise the same struct with +// a `Key` and `Value` member field. +func SyncTags( + ctx context.Context, + desiredTags []*svcapitypes.Tag, + latestTags []*svcapitypes.Tag, + latestACKResourceMetadata *ackv1alpha1.ResourceMetadata, + toACKTags func(tags []*svcapitypes.Tag) (acktags.Tags, []string), + sdkapi *svcsdk.Client, + metrics *metrics.Metrics, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.syncTags") + defer func() { exit(err) }() + + arn := (*string)(latestACKResourceMetadata.ARN) + + from, _ := toACKTags(latestTags) + to, _ := toACKTags(desiredTags) + + added, _, removed := ackcompare.GetTagsDifference(from, to) + + // NOTE(jaypipes): According to the elasticache API documentation, adding a tag + // with a new value overwrites any existing tag with the same key. So, we + // don't need to do anything to "update" a Tag. Simply including it in the + // AddTagsToResource call is enough. + for key := range removed { + if _, ok := added[key]; ok { + delete(removed, key) + } + } + + // Modify tags causing the cache cluster to be modified and become unavailable temporarily + // so after adding or removing tags, we have to wait for the cache cluster to be available again + // process: add tags -> requeue -> remove tags -> requeue -> other update + if len(added) > 0 { + toAdd := make([]svcsdktypes.Tag, 0, len(added)) + for key, val := range added { + key, val := key, val + toAdd = append(toAdd, svcsdktypes.Tag{ + Key: &key, + Value: &val, + }) + } + + rlog.Debug("adding tags to cache cluster", "tags", added) + _, err = sdkapi.AddTagsToResource( + ctx, + &svcsdk.AddTagsToResourceInput{ + ResourceName: arn, + Tags: toAdd, + }, + ) + metrics.RecordAPICall("UPDATE", "AddTagsToResource", err) + if err != nil { + return err + } + } else if len(removed) > 0 { + toRemove := make([]string, 0, len(removed)) + for key := range removed { + key := key + toRemove = append(toRemove, key) + } + rlog.Debug("removing tags from cache cluster", "tags", removed) + _, err = sdkapi.RemoveTagsFromResource( + ctx, + &svcsdk.RemoveTagsFromResourceInput{ + ResourceName: arn, + TagKeys: toRemove, + }, + ) + metrics.RecordAPICall("UPDATE", "RemoveTagsFromResource", err) + if err != nil { + return err + } + } + + return requeueWaitWhileTagUpdated +} diff --git a/scripts/install-mockery.sh b/scripts/install-mockery.sh deleted file mode 100755 index 462283ad..00000000 --- a/scripts/install-mockery.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -# A script that installs the mockery CLI tool that is used to build Go mocks -# for our interfaces to use in unit testing. This script installs mockery into -# the bin/mockery path and really should just be used in testing scripts. - -set -eo pipefail - -SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$SCRIPTS_DIR/.." -BIN_DIR="$ROOT_DIR/bin" - -OS=$(uname -s) -ARCH=$(uname -m) -VERSION=2.2.2 -MOCKERY_RELEASE_URL="https://github.com/vektra/mockery/releases/download/v${VERSION}/mockery_${VERSION}_${OS}_${ARCH}.tar.gz" - -if [[ ! -f $BIN_DIR/mockery ]]; then - echo -n "Installing mockery into bin/mockery ... " - mkdir -p $BIN_DIR - cd $BIN_DIR - wget -q --no-check-certificate --content-disposition $MOCKERY_RELEASE_URL -O mockery.tar.gz - tar -xf mockery.tar.gz - echo "ok." -fi diff --git a/templates/hooks/cache_cluster/sdk_create_post_set_output.go.tpl b/templates/hooks/cache_cluster/sdk_create_post_set_output.go.tpl new file mode 100644 index 00000000..967aaeb8 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_create_post_set_output.go.tpl @@ -0,0 +1,6 @@ + if isCreating(&resource{ko}) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } diff --git a/templates/hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl b/templates/hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl new file mode 100644 index 00000000..11633009 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_delete_pre_build_request.go.tpl @@ -0,0 +1,32 @@ + if isDeleting(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgCurrentlyDeleting, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileDeleting + } + if isModifying(r) { + // Setting resource synced condition to false will trigger a requeue of + // the resource. + ackcondition.SetSynced( + r, + corev1.ConditionFalse, + &condMsgNoDeleteWhileModifying, + nil, + ) + // Need to return a requeue error here, otherwise: + // - reconciler.deleteResource() marks the resource unmanaged + // - reconciler.HandleReconcileError() does not update status for unmanaged resource + // - reconciler.handleRequeues() is not invoked for delete code path. + // TODO: return err as nil when reconciler is updated. + return r, requeueWaitWhileModifying + } diff --git a/templates/hooks/cache_cluster/sdk_read_many_post_build_request.go.tpl b/templates/hooks/cache_cluster/sdk_read_many_post_build_request.go.tpl new file mode 100644 index 00000000..f2bc4b47 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_read_many_post_build_request.go.tpl @@ -0,0 +1,2 @@ + // Include cache node info to get endpoint details for clusters + input.ShowCacheNodeInfo = aws.Bool(true) \ No newline at end of file diff --git a/templates/hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl b/templates/hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl new file mode 100644 index 00000000..cf067441 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_read_many_post_set_output.go.tpl @@ -0,0 +1,27 @@ + if pendingModifications := ko.Status.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = pendingModifications.NumCacheNodes + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } + if isAvailable(r) { + ackcondition.SetSynced(&resource{ko}, corev1.ConditionTrue, nil, nil) + } else { + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) + return &resource{ko}, nil + } + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } diff --git a/templates/hooks/cache_cluster/sdk_update_post_build_request.go.tpl b/templates/hooks/cache_cluster/sdk_update_post_build_request.go.tpl new file mode 100644 index 00000000..c571ff4b --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_update_post_build_request.go.tpl @@ -0,0 +1,3 @@ + if err := rm.updateCacheClusterPayload(input, desired, latest, delta); err != nil { + return nil, ackerr.NewTerminalError(err) + } diff --git a/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl b/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl new file mode 100644 index 00000000..07ffb00b --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_update_post_set_output.go.tpl @@ -0,0 +1,11 @@ + if pendingModifications := resp.CacheCluster.PendingModifiedValues; pendingModifications != nil { + if pendingModifications.NumCacheNodes != nil { + ko.Spec.NumCacheNodes = Int64OrNil(pendingModifications.NumCacheNodes) + } + if pendingModifications.CacheNodeType != nil { + ko.Spec.CacheNodeType = pendingModifications.CacheNodeType + } + if pendingModifications.TransitEncryptionEnabled != nil { + ko.Spec.TransitEncryptionEnabled = pendingModifications.TransitEncryptionEnabled + } + } diff --git a/templates/hooks/cache_cluster/sdk_update_pre_build_request.go.tpl b/templates/hooks/cache_cluster/sdk_update_pre_build_request.go.tpl new file mode 100644 index 00000000..a1f55996 --- /dev/null +++ b/templates/hooks/cache_cluster/sdk_update_pre_build_request.go.tpl @@ -0,0 +1,9 @@ + if delta.DifferentAt("Spec.Tags") { + if err = rm.syncTags(ctx, desired, latest); err != nil { + return nil, err + } + } else if !delta.DifferentExcept("Spec.Tags") { + // If the only difference between the desired and latest is in the + // Spec.Tags field, we can skip the ModifyCacheCluster call. + return desired, nil + } diff --git a/templates/hooks/cache_subnet_group/sdk_read_many_post_set_output.go.tpl b/templates/hooks/cache_subnet_group/sdk_read_many_post_set_output.go.tpl new file mode 100644 index 00000000..93b3e520 --- /dev/null +++ b/templates/hooks/cache_subnet_group/sdk_read_many_post_set_output.go.tpl @@ -0,0 +1,8 @@ + + subnets := make([]*string, 0, len(ko.Status.Subnets)) + for _, subnetIdIter := range ko.Status.Subnets { + if subnetIdIter.SubnetIdentifier != nil { + subnets = append(subnets, subnetIdIter.SubnetIdentifier) + } + } + ko.Spec.SubnetIDs = subnets diff --git a/templates/hooks/replication_group/sdk_delete_post_request.go.tpl b/templates/hooks/replication_group/sdk_delete_post_request.go.tpl index e0ee7bb9..37fcb092 100644 --- a/templates/hooks/replication_group/sdk_delete_post_request.go.tpl +++ b/templates/hooks/replication_group/sdk_delete_post_request.go.tpl @@ -1,6 +1,6 @@ // delete call successful if err == nil { - rp, _ := rm.setReplicationGroupOutput(r, resp.ReplicationGroup) + rp, _ := rm.setReplicationGroupOutput(ctx, r, resp.ReplicationGroup) // Setting resource synced condition to false will trigger a requeue of // the resource. ackcondition.SetSynced( diff --git a/templates/hooks/replication_group/sdk_file_end.go.tpl b/templates/hooks/replication_group/sdk_file_end.go.tpl index bf8e8c80..45595375 100644 --- a/templates/hooks/replication_group/sdk_file_end.go.tpl +++ b/templates/hooks/replication_group/sdk_file_end.go.tpl @@ -1,8 +1,9 @@ // This method copies the data from given {{ .CRD.Names.Camel }} by populating it // into copy of supplied resource and returns that. func (rm *resourceManager) set{{ .CRD.Names.Camel }}Output ( + ctx context.Context, r *resource, - obj *svcsdk.{{ .CRD.Names.Camel }}, + obj *svcsdktypes.{{ .CRD.Names.Camel }}, ) (*resource, error) { if obj == nil || r == nil || diff --git a/templates/hooks/replication_group/sdk_file_end_set_output_post_populate.go.tpl b/templates/hooks/replication_group/sdk_file_end_set_output_post_populate.go.tpl new file mode 100644 index 00000000..ae519213 --- /dev/null +++ b/templates/hooks/replication_group/sdk_file_end_set_output_post_populate.go.tpl @@ -0,0 +1 @@ + rm.customSetOutput(ctx, *obj, ko) \ No newline at end of file diff --git a/templates/hooks/replication_group/sdk_read_many_post_set_output.go.tpl b/templates/hooks/replication_group/sdk_read_many_post_set_output.go.tpl index 85f6a11a..93c482db 100644 --- a/templates/hooks/replication_group/sdk_read_many_post_set_output.go.tpl +++ b/templates/hooks/replication_group/sdk_read_many_post_set_output.go.tpl @@ -1,3 +1,5 @@ + + rm.updateSpecFields(ctx, resp.ReplicationGroups[0], &resource{ko}) if isDeleting(r) { // Setting resource synced condition to false will trigger a requeue of @@ -21,6 +23,19 @@ ) return &resource{ko}, nil } + + if isCreating(r){ + // Setting resource synced condition to false will trigger a requeue of + // the resource. No need to return a requeue error here. + ackcondition.SetSynced( + &resource{ko}, + corev1.ConditionFalse, + &condMsgCurrentlyCreating, + nil, + ) + return &resource{ko}, nil + } + if isCreateFailed(r) { // This is a terminal state and by setting a Terminal condition on the // resource, we will prevent it from being requeued. @@ -32,3 +47,12 @@ ) return &resource{ko}, nil } + + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } diff --git a/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl b/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl index d8c6113e..a3adf04d 100644 --- a/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl +++ b/templates/hooks/replication_group/sdk_update_post_build_request.go.tpl @@ -1,5 +1,8 @@ if !delta.DifferentAt("Spec.LogDeliveryConfigurations") { - input.SetLogDeliveryConfigurations(nil) + input.LogDeliveryConfigurations = nil + } + if !delta.DifferentAt("Spec.TransitEncryptionEnabled") { + input.TransitEncryptionEnabled = nil } if delta.DifferentAt("UserGroupIDs") { for _, diff := range delta.Differences { @@ -9,7 +12,7 @@ // User groups to add { - var userGroupsToAdd []*string + var userGroupsToAdd []string for _, requiredUserGroup := range requiredUserGroups { found := false @@ -21,16 +24,18 @@ } if !found { - userGroupsToAdd = append(userGroupsToAdd, requiredUserGroup) + if requiredUserGroup != nil { + userGroupsToAdd = append(userGroupsToAdd, *requiredUserGroup) + } } } - input.SetUserGroupIdsToAdd(userGroupsToAdd) + input.UserGroupIdsToAdd = userGroupsToAdd } // User groups to remove { - var userGroupsToRemove []*string + var userGroupsToRemove []string for _, existingUserGroup := range existingUserGroups { found := false @@ -42,12 +47,14 @@ } if !found { - userGroupsToRemove = append(userGroupsToRemove, existingUserGroup) + if existingUserGroup != nil { + userGroupsToRemove = append(userGroupsToRemove, *existingUserGroup) + } } } - input.SetUserGroupIdsToRemove(userGroupsToRemove) + input.UserGroupIdsToRemove = userGroupsToRemove } } } - } \ No newline at end of file + } diff --git a/templates/hooks/replication_group/sdk_update_pre_build_request.go.tpl b/templates/hooks/replication_group/sdk_update_pre_build_request.go.tpl new file mode 100644 index 00000000..4c3f8647 --- /dev/null +++ b/templates/hooks/replication_group/sdk_update_pre_build_request.go.tpl @@ -0,0 +1,5 @@ + if delta.DifferentAt("Spec.Tags") { + if err = rm.syncTags(ctx, desired, latest); err != nil { + return nil, err + } + } diff --git a/templates/hooks/serverless_cache/sdk_read_many_post_set_output.go.tpl b/templates/hooks/serverless_cache/sdk_read_many_post_set_output.go.tpl new file mode 100644 index 00000000..e05a001d --- /dev/null +++ b/templates/hooks/serverless_cache/sdk_read_many_post_set_output.go.tpl @@ -0,0 +1,9 @@ +// Get the ARN from the resource metadata +if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil { + // Retrieve the tags for the resource + resourceARN := string(*ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, resourceARN) + if err == nil { + ko.Spec.Tags = tags + } +} \ No newline at end of file diff --git a/templates/hooks/serverless_cache_snapshot/sdk_create_post_set_output.go.tpl b/templates/hooks/serverless_cache_snapshot/sdk_create_post_set_output.go.tpl new file mode 100644 index 00000000..e3ec1b8b --- /dev/null +++ b/templates/hooks/serverless_cache_snapshot/sdk_create_post_set_output.go.tpl @@ -0,0 +1,4 @@ +// If tags are specified, mark the resource as needing a sync +if ko.Spec.Tags != nil { + ackcondition.SetSynced(&resource{ko}, corev1.ConditionFalse, nil, nil) +} \ No newline at end of file diff --git a/templates/hooks/serverless_cache_snapshot/sdk_read_many_post_set_output.go.tpl b/templates/hooks/serverless_cache_snapshot/sdk_read_many_post_set_output.go.tpl new file mode 100644 index 00000000..dff6ee3e --- /dev/null +++ b/templates/hooks/serverless_cache_snapshot/sdk_read_many_post_set_output.go.tpl @@ -0,0 +1,11 @@ + // Only fetch tags if the snapshot is available + // ListTagsForResource fails when snapshot is still creating + if ko.Status.ACKResourceMetadata != nil && ko.Status.ACKResourceMetadata.ARN != nil && + isServerlessCacheSnapshotAvailable(&resource{ko}) { + resourceARN := (*string)(ko.Status.ACKResourceMetadata.ARN) + tags, err := rm.getTags(ctx, *resourceARN) + if err != nil { + return nil, err + } + ko.Spec.Tags = tags + } \ No newline at end of file diff --git a/test/e2e/bootstrap_resources.py b/test/e2e/bootstrap_resources.py index 205b8fd9..5a11e71f 100644 --- a/test/e2e/bootstrap_resources.py +++ b/test/e2e/bootstrap_resources.py @@ -16,8 +16,10 @@ """ from dataclasses import dataclass -from acktest.resources import read_bootstrap_config from e2e import bootstrap_directory +import yaml +import logging +from pathlib import Path @dataclass class TestBootstrapResources: @@ -60,3 +62,33 @@ def get_bootstrap_resources(bootstrap_file_name: str = "bootstrap.yaml"): **read_bootstrap_config(bootstrap_directory, bootstrap_file_name=bootstrap_file_name), ) return _bootstrap_resources + + +def write_bootstrap_config(bootstrap: dict, output_path: Path, bootstrap_file_name: str = "bootstrap.yaml"): + """ Dumps the bootstrap object into a YAML file at a given path. + + Args: + bootstrap: The bootstrap object. + output_path: The directory in which to dump the bootstrap yaml. + bootstrap_file_name: The name of the created bootstrap yaml file. + """ + path = output_path / bootstrap_file_name + logging.info(f"Wrote bootstrap to {path}") + with open(path, "w") as stream: + yaml.safe_dump(bootstrap, stream) + + +def read_bootstrap_config(config_dir: Path, bootstrap_file_name: str = "bootstrap.yaml") -> dict: + """ Reads a bootstrap dictionary from a given bootstrap file. + + Args: + config_dir: The directory in which the bootstrap yaml exists. + bootstrap_file_name: The name of the created bootstrap yaml file. + + Returns: + dict: The bootstrap dictionary read from the file. + """ + path = config_dir / bootstrap_file_name + with open(path, "r") as stream: + bootstrap = yaml.safe_load(stream) + return bootstrap \ No newline at end of file diff --git a/test/e2e/cache_parameter_group/e2e.sh b/test/e2e/cache_parameter_group/e2e.sh deleted file mode 100755 index 69d10ffc..00000000 --- a/test/e2e/cache_parameter_group/e2e.sh +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env bash - -############################################## -# Tests for AWS ElastiCache Cache Parameter Group -############################################## - -set -u - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -check_is_installed jq "Please install jq before running this script." - -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test: $service_name/$test_name" - -setup_cache_parameter_group_fields() { - # uses non local variable for later use in tests - cpg_name="ack-test-cpg-1" - cpg_description="$cpg_name description" - cpg_parameter_1_name="activedefrag" - cpg_parameter_1_value="yes" - cpg_parameter_2_name="active-defrag-cycle-max" - cpg_parameter_2_value="74" - cpg_parameter_3_name="active-defrag-cycle-min" - cpg_parameter_3_value="10" -} -setup_cache_parameter_group_fields - -################################################# -# create cache parameter group -################################################# - -ack_create_cache_parameter_group() { - local cpg_yaml="$(provide_cache_parameter_group_yaml)" - echo "$cpg_yaml" | kubectl apply -f - - sleep 10 -} -assert_k8s_status_cache_parameters_defaults() { - assert_k8s_status_parameters_value_source "$cpg_name" "activedefrag" "no" "system" - assert_k8s_status_parameters_value_source "$cpg_name" "active-defrag-cycle-max" "75" "system" - assert_k8s_status_parameters_value_source "$cpg_name" "active-defrag-cycle-min" "5" "system" -} - -debug_msg "Testing create Cache Parameter Group: $cpg_name." -assert_cache_parameter_group_does_not_exist "$cpg_name" -ack_create_cache_parameter_group -assert_cache_parameter_group_exists "$cpg_name" -assert_k8s_status_cache_parameters_defaults - -################################################# -# modify cache parameter group -################################################# -debug_msg "Testing modify Cache Parameter Group: $cpg_name." -######################### -## Add parameters -######################### -debug_msg "Testing Add Parameters to Cache Parameter Group: $cpg_name." - -assert_no_custom_cache_parameters() { - local actual_value=$(aws_get_cache_parameters_property "$cpg_name" ".Parameters" "user" | jq length) - assert_equal "0" "$actual_value" "Expected: 0 actual: $actual_value found for user parameters in cache parameter group $cpg_name" || \ - log_and_exit "cacheparametergroups/$cpg_name" -} - -ack_set_custom_cache_parameters() { - local cpg_yaml="$(provide_custom_cache_parameters_group_yaml)" - echo "$cpg_yaml" | kubectl apply -f - - sleep 10 -} - -assert_custom_cache_parameters() { - local actual_parameters=$(aws_get_cache_parameters_property "$cpg_name" ".Parameters" "user") - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_1_name" "$cpg_parameter_1_value" - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_2_name" "$cpg_parameter_2_value" - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_3_name" "$cpg_parameter_3_value" -} - -assert_k8s_status_cache_parameters_custom() { - assert_k8s_status_parameters_value_source "$cpg_name" "$cpg_parameter_1_name" "$cpg_parameter_1_value" "user" - assert_k8s_status_parameters_value_source "$cpg_name" "$cpg_parameter_2_name" "$cpg_parameter_2_value" "user" - assert_k8s_status_parameters_value_source "$cpg_name" "$cpg_parameter_3_name" "$cpg_parameter_3_value" "user" -} - -assert_no_custom_cache_parameters -ack_set_custom_cache_parameters -assert_custom_cache_parameters -assert_k8s_status_cache_parameters_custom - -######################### -## Update parameter -######################### -debug_msg "Testing Update Parameters to Cache Parameter Group: $cpg_name." -update_cache_parameter_group_fields() { - # uses non local variable for later use in tests - cpg_parameter_1_value="no" - cpg_parameter_2_value="70" - cpg_parameter_3_value="15" -} - -update_cache_parameter_group_fields -ack_set_custom_cache_parameters -assert_custom_cache_parameters -assert_cpg_events "$cpg_name" - -######################### -## Remove parameter -######################### -debug_msg "Testing Remove Parameters to Cache Parameter Group: $cpg_name." -ack_remove_custom_cache_parameters() { - # keeps only parameter1. removes parameter2, sets parameter 3 to "" - local cpg_yaml="$(provide_custom_remove_cache_parameters_group_yaml)" - echo "$cpg_yaml" | kubectl apply -f - - sleep 10 -} - -assert_remove_custom_cache_parameters() { - # verify only parameter 1 is of source type 'user' - local actual_parameters=$(aws_get_cache_parameters_property "$cpg_name" ".Parameters" "user") - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_1_name" "$cpg_parameter_1_value" - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_2_name" "" - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_3_name" "" - - # validate that the parameter 2 and 3 are now system default - local actual_parameters=$(aws_get_cache_parameters_property "$cpg_name" ".Parameters" "system") - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_2_name" "75" - assert_parameters_name_value "$actual_parameters" "$cpg_parameter_3_name" "5" -} - -assert_k8s_status_cache_parameters_remove_custom() { - assert_k8s_status_parameters_value_source "$cpg_name" "$cpg_parameter_1_name" "$cpg_parameter_1_value" "user" - assert_k8s_status_parameters_value_source "$cpg_name" "$cpg_parameter_2_name" "75" "system" - assert_k8s_status_parameters_value_source "$cpg_name" "$cpg_parameter_3_name" "5" "system" -} - -ack_remove_custom_cache_parameters -assert_remove_custom_cache_parameters -assert_k8s_status_cache_parameters_remove_custom - -################################################# -# reset cache parameter group -# (remove all parameters) -################################################# -debug_msg "Testing Reset Parameters to Cache Parameter Group: $cpg_name." - -reset_all_custom_cache_parameters() { - # yaml has no parameters - local cpg_yaml="$(provide_cache_parameter_group_yaml)" - echo "$cpg_yaml" | kubectl apply -f - - sleep 10 -} -reset_all_custom_cache_parameters -assert_no_custom_cache_parameters -assert_k8s_status_cache_parameters_defaults - -################################################# -# delete cache parameter group -################################################# -debug_msg "Testing delete Cache Parameter Group: $cpg_name." - -ack_delete_cache_parameter_group() { - kubectl delete CacheParameterGroup/"$cpg_name" 2>/dev/null - assert_equal "0" "$?" "Expected success from kubectl delete but got $?" || log_and_exit "cacheparametergroups/$cpg_name" - sleep 5 -} -ack_delete_cache_parameter_group -assert_cache_parameter_group_does_not_exist "$cpg_name" - -debug_msg "Test completed." diff --git a/test/e2e/cache_subnet_group/smoke.sh b/test/e2e/cache_subnet_group/smoke.sh deleted file mode 100755 index 32a8fd9b..00000000 --- a/test/e2e/cache_subnet_group/smoke.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env bash - -############################################## -# Tests for AWS ElastiCache Cache Subnet Group -############################################## - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -service_name="elasticache" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test: $service_name/$test_name" - -aws_resource_name="ack-test-smoke-${service_name}-subnet-gp" -k8s_resource_name="cachesubnetgroups/$aws_resource_name" - -# pre-req: a subnet id to create cache subnet group -if ! aws_subnet_ids_json="$(get_default_subnets)"; then - echo "FAIL: No default subnet id found to run the test. Ensure that subnet id is available to run the test." - exit 1 -else - aws_subnet_id="$(echo "$aws_subnet_ids_json" | jq -r -e '.[0]')" -fi - -if [ -z "$aws_subnet_id" ]; then - echo "no subnet id found to run the test. Ensure that subnet id is available to run the test." - exit 1 -else - echo "using subnet id: ${aws_subnet_id} for test." -fi - - -describe_subnet_json() { - daws elasticache describe-cache-subnet-groups --cache-subnet-group-name "$aws_resource_name" --output json >/dev/null 2>&1 -} -get_subnet_group_description() { - if [[ $# -ne 1 ]]; then - echo "FAIL: Wrong number of arguments passed to get_subnet_group_description" - echo "Usage: get_subnet_group_description $subnet_group_name" - exit 1 - fi - local subnet_group_name="$1" - local subnet_group_desc="$(aws elasticache describe-cache-subnet-groups --cache-subnet-group-name "$subnet_group_name" --output json | jq -r -e '.CacheSubnetGroups[] | .CacheSubnetGroupDescription')" - echo "$subnet_group_desc" -} - -# PRE-CHECKS -describe_subnet_json -if [[ $? -ne 255 && $? -ne 254 ]]; then - echo "FAIL: expected $aws_resource_name to not exist in ${service_name}. Did previous test run cleanup?" - exit 1 -fi - -if k8s_resource_exists "$k8s_resource_name"; then - echo "FAIL: expected $k8s_resource_name to not exist on K8s cluster. Did previous test run cleanup?" - exit 1 -fi - -# TEST ACTIONS and ASSERTIONS - -## Create -debug_msg "Creating subnet group $aws_resource_name in ${service_name}" -cat </dev/null -assert_equal "0" "$?" "Expected success from kubectl delete but got $?" || exit 1 - -sleep 20 - -describe_subnet_json -if [[ $? -ne 255 && $? -ne 254 ]]; then - echo "FAIL: expected $aws_resource_name to be deleted in ${service_name}" - kubectl logs -n ack-system "$ack_ctrl_pod_id" - exit 1 -fi - -# pod may restart upon refresh of credentials, remove this check for now -# assert_pod_not_restarted $ack_ctrl_pod_id diff --git a/test/e2e/declarative_test_fwk/__init__.py b/test/e2e/declarative_test_fwk/__init__.py deleted file mode 100644 index 5d489e5d..00000000 --- a/test/e2e/declarative_test_fwk/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. diff --git a/test/e2e/declarative_test_fwk/helper.py b/test/e2e/declarative_test_fwk/helper.py deleted file mode 100644 index 7705f38e..00000000 --- a/test/e2e/declarative_test_fwk/helper.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Helper for Declarative tests framework for custom resources -""" - -from e2e.declarative_test_fwk import model - -import logging -from typing import Tuple -from time import sleep -from acktest.k8s import resource as k8s - -# holds custom resource helper references -TEST_HELPERS = dict() - - -def register_resource_helper(resource_kind: str, resource_plural: str): - """Decorator to discover Custom Resource Helper - - Args: - resource_kind: custom resource kind - resource_plural: custom resource kind plural - - Returns: - wrapper - """ - - def registrar(cls): - global TEST_HELPERS - if issubclass(cls, ResourceHelper): - TEST_HELPERS[resource_kind.lower()] = cls - cls.resource_plural = resource_plural.lower() - logging.info(f"Registered ResourceHelper: {cls.__name__} for custom resource kind: {resource_kind}") - else: - msg = f"Unable to register helper for {resource_kind} resource: {cls} is not a subclass of ResourceHelper" - logging.error(msg) - raise Exception(msg) - return registrar - - -class ResourceHelper: - """Provides generic verb (create, patch, delete) methods for custom resources. - Keep its methods stateless. Methods are on instance to allow specialization. - """ - - DEFAULT_WAIT_SECS = 30 - - def create(self, input_data: dict, input_replacements: dict = {}) -> Tuple[k8s.CustomResourceReference, dict]: - """Creates custom resource inside Kubernetes cluster per the specifications in input data. - - Args: - input_data: custom resource details - input_replacements: input replacements - - Returns: - k8s.CustomResourceReference, created custom resource - """ - - reference = self.custom_resource_reference(input_data, input_replacements) - _ = k8s.create_custom_resource(reference, input_data) - resource = k8s.wait_resource_consumed_by_controller(reference, wait_periods=10) - assert resource is not None - return reference, resource - - def patch(self, input_data: dict, input_replacements: dict = {}) -> Tuple[k8s.CustomResourceReference, dict]: - """Patches custom resource inside Kubernetes cluster per the specifications in input data. - - Args: - input_data: custom resource patch details - input_replacements: input replacements - - Returns: - k8s.CustomResourceReference, created custom resource - """ - - reference = self.custom_resource_reference(input_data, input_replacements) - _ = k8s.patch_custom_resource(reference, input_data) - sleep(self.DEFAULT_WAIT_SECS) # required as controller has likely not placed the resource in modifying - resource = k8s.wait_resource_consumed_by_controller(reference, wait_periods=10) - assert resource is not None - return reference, resource - - def delete(self, reference: k8s.CustomResourceReference) -> None: - """Deletes custom resource inside Kubernetes cluster and waits for delete completion - - Args: - reference: custom resource reference - - Returns: - None - """ - - resource = k8s.get_resource(reference) - if not resource: - logging.warning(f"ResourceReference {reference} not found. Not invoking k8s delete api.") - return - - k8s.delete_custom_resource(reference, wait_periods=30, period_length=60) # throws exception if wait fails - sleep(self.DEFAULT_WAIT_SECS) - self.wait_for_delete(reference) # throws exception if wait fails - - def assert_expectations(self, verb: str, input_data: dict, expectations: model.ExpectDict, reference: k8s.CustomResourceReference) -> None: - """Asserts custom resource reference inside Kubernetes cluster against the supplied expectations - - :param verb: expectations after performing the verb (apply, patch, delete) - :param input_data: input data to verb - :param expectations: expectations to assert - :param reference: custom resource reference - :return: None - """ - self._assert_conditions(expectations, reference, wait=False) - # conditions expectations met, now check current resource against expectations - resource = k8s.get_resource(reference) - self.assert_items(expectations.get("status"), resource.get("status")) - - # self._assert_state(expectations.get("spec"), resource) # uncomment to support spec assertions - - def wait_for(self, wait_expectations: dict, reference: k8s.CustomResourceReference) -> None: - """Waits for custom resource reference details inside Kubernetes cluster to match supplied config, - currently supports wait on "status.conditions", - it can be enhanced later for wait on any/other properties. - - Args: - wait_expectations: properties to wait for - reference: custom resource reference - - Returns: - None - """ - - # wait for conditions - self._assert_conditions(wait_expectations, reference, wait=True) - - def _assert_conditions(self, expectations: dict, reference: k8s.CustomResourceReference, wait: bool = True) -> None: - expect_conditions: dict = {} - if "status" in expectations and "conditions" in expectations["status"]: - expect_conditions = expectations["status"]["conditions"] - - default_wait_periods = 60 - # period_length = 1 will result in condition check every second - default_period_length = 1 - for (condition_name, expected_value) in expect_conditions.items(): - if type(expected_value) is str: - # Example: ACK.Terminal: "True" - if wait: - assert k8s.wait_on_condition(reference, condition_name, expected_value, - wait_periods=default_wait_periods, period_length=default_period_length) - else: - actual_condition = k8s.get_resource_condition(reference, condition_name) - assert actual_condition is not None - assert expected_value == actual_condition.get("status"), f"Condition status mismatch. Expected condition: {condition_name} - {expected_value} but found {actual_condition}" - - elif type(expected_value) is dict: - # Example: - # ACK.ResourceSynced: - # status: "False" - # message: "Expected message ..." - # timeout: 60 # seconds - condition_value = expected_value.get("status") - condition_message = expected_value.get("message") - # default wait 60 seconds - wait_timeout = expected_value.get("timeout", default_wait_periods) - - if wait: - assert k8s.wait_on_condition(reference, condition_name, condition_value, - wait_periods=wait_timeout, period_length=default_period_length) - - actual_condition = k8s.get_resource_condition(reference, - condition_name) - assert actual_condition is not None - assert condition_value == actual_condition.get("status"), f"Condition status mismatch. Expected condition: {condition_name} - {expected_value} but found {actual_condition}" - if condition_message is not None: - assert condition_message == actual_condition.get("message"), f"Condition message mismatch. Expected condition: {condition_name} - {expected_value} but found {actual_condition}" - - else: - raise Exception(f"Condition {condition_name} is provided with invalid value: {expected_value} ") - - def assert_items(self, expectations: dict, state: dict) -> None: - """Asserts state against supplied expectations - Override it as needed for custom verifications - - Args: - expectations: dictionary with items (expected) to assert in state - state: dictionary with items (actual) - - Returns: - None - """ - - if not expectations: - # nothing to assert as there are no expectations - return - if not state: - # there are expectations but no given state to validate - # following assert will fail and assert introspection will provide useful information for debugging - assert expectations == state - - for (key, value) in expectations.items(): - # conditions are processed separately - if key == "conditions": - continue - assert (key, value) == (key, state.get(key)) - - def custom_resource_reference(self, input_data: dict, input_replacements: dict = {}) -> k8s.CustomResourceReference: - """Helper method to provide k8s.CustomResourceReference for supplied input - - Args: - input_data: custom resource input data - input_replacements: input replacements - - Returns: - k8s.CustomResourceReference - """ - - resource_name = input_data.get("metadata").get("name") - crd_group = input_replacements.get("CRD_GROUP") - crd_version = input_replacements.get("CRD_VERSION") - - reference = k8s.CustomResourceReference( - crd_group, crd_version, self.resource_plural, resource_name, namespace="default") - return reference - - def wait_for_delete(self, reference: k8s.CustomResourceReference) -> None: - """Override this method to implement custom wail logic on resource delete. - - Args: - reference: custom resource reference - - Returns: - None - """ - - logging.debug(f"No-op wait_for_delete()") - - -def get_resource_helper(resource_kind: str) -> ResourceHelper: - """Provides ResourceHelper for supplied custom resource kind - If no helper is registered for the supplied resource kind then returns default ResourceHelper - - Args: - resource_kind: custom resource kind string - - Returns: - custom resource helper instance - """ - - helper_cls = TEST_HELPERS.get(resource_kind.lower()) - if helper_cls: - return helper_cls() - return ResourceHelper() diff --git a/test/e2e/declarative_test_fwk/loader.py b/test/e2e/declarative_test_fwk/loader.py deleted file mode 100644 index cfac760d..00000000 --- a/test/e2e/declarative_test_fwk/loader.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Test Scenarios loader for Declarative tests framework for custom resources -""" - -from e2e.declarative_test_fwk import model -import pytest -import os -import glob -from typing import Iterable, List -from pathlib import Path -from os.path import isfile, join, isdir -from acktest.resources import load_resource_file, random_suffix_name - - -def list_scenarios(scenarios_directory: Path) -> Iterable: - """Lists test scenarios from given directory - - Args: - scenarios_directory: directory containing scenarios yaml files - - Returns: - Iterable scenarios for pytest parameterized fixture - """ - - scenarios_list = [] - scenario_files = glob.glob(str(scenarios_directory) + "/**/*.yaml", recursive=True) - - for scenario_file in scenario_files: - scenarios_list.append(pytest.param(Path(scenario_file), marks=marks(Path(scenario_file)))) - - return scenarios_list - - -def load_scenario(scenario_file: Path, resource_directory: Path = None, replacements: dict = {}) -> model.Scenario: - """Loads scenario from given scenario_file - - Args: - scenario_file: yaml file containing scenario - resource_directory: Path to custom resources directory - replacements: input replacements - - Returns: - Scenario reference - """ - - scenario_name = scenario_file.stem - replacements = replacements.copy() - replacements["RANDOM_SUFFIX"] = random_suffix_name("", 16) - scenario = model.Scenario(resource_directory, load_resource_file( - scenario_file.parent, scenario_name, additional_replacements=replacements), replacements) - return scenario - - -def idfn(scenario_file_full_path: Path) -> str: - """Provides scenario file name as scenario test id - - Args: - scenario_file_full_path: test scenario file path - - Returns: - scenario test id string - """ - - return scenario_file_full_path.name - - -def marks(scenario_file_path: Path) -> List: - """Provides pytest markers for the given scenario - - Args: - scenario_file_path: test scenario file path - - Returns: - pytest markers for the scenario - """ - - scenario_config = load_resource_file( - scenario_file_path.parent, scenario_file_path.stem) - - markers = [] - for mark in scenario_config.get("marks", []): - if mark == "canary": - markers.append(pytest.mark.canary) - elif mark == "slow": - markers.append(pytest.mark.slow) - elif mark == "blocked": - markers.append(pytest.mark.blocked) - return markers diff --git a/test/e2e/declarative_test_fwk/model.py b/test/e2e/declarative_test_fwk/model.py deleted file mode 100644 index 9940edeb..00000000 --- a/test/e2e/declarative_test_fwk/model.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Model for Declarative tests framework for custom resources -""" - - -from enum import Enum, auto -from typing import TypedDict, Dict, Union, List -from pathlib import Path -from os.path import join -from acktest.resources import load_resource_file - - -class Verb(Enum): - """ - Verb for custom resource in a test step. - """ - create = auto() - patch = auto() - delete = auto() - - -# fields for 'resource' field in a test Scenario -class ResourceDict(TypedDict, total=False): - apiVersion: str - kind: str - metadata: Dict - - -# fields for 'create' Verb in a test step -class CreateDict(ResourceDict): - spec: Dict - - -# fields for 'patch' Verb in a test step -class PatchDict(ResourceDict): - spec: Dict - - -# fields for 'delete' Verb in a test step -class DeleteDict(ResourceDict): - pass - - -# fields for 'expect' field in a test step -class ExpectDict(TypedDict, total=False): - spec: Dict - status: Dict - - -# fields in a test step -class StepDict(TypedDict, total=False): - id: str - description: str - create: Union[str, CreateDict] - patch: Union[str, PatchDict] - delete: Union[str, DeleteDict] - wait: Union[int, Dict] - expect: ExpectDict - - -class Step: - """ - Represents a declarative test step - """ - - def __init__(self, resource_directory: Path, config: StepDict, custom_resource_details: dict, replacements: dict = {}): - self.config = config - self.custom_resource_details = custom_resource_details - self.replacements = replacements - - self.verb = None - self.input_data = {} - self.expectations: ExpectDict = None - - # (k8s.CustomResourceReference, ko) to teardown - self.teardown_list = [] - - # validate: only one verb per step - step_verb = None - for verb in list(Verb): - if verb.name in self.config: - if not step_verb: - step_verb = verb - else: - raise ValueError(f"Multiple verbs specified for step: {self.id}." - f" Please specify only one verb from" - f" supported verbs: { {verb.name for verb in list(Verb)} }.") - - # a step with no verb can be used to assert preconditions - # thus, verb is optional. - if step_verb: - self.verb = step_verb - self.input_data = self.config.get(step_verb.name) - if type(self.input_data) is str: - if self.input_data.endswith(".yaml"): - # load input data from resource file - resource_file_name = self.input_data - resource_file_path = Path(join(resource_directory, resource_file_name)) - self.input_data = load_resource_file( - resource_file_path.parent, resource_file_path.stem, additional_replacements=replacements) - else: - # consider the input as resource name string - # confirm that self.custom_resource_details must be provided with same name - if self.custom_resource_details["metadata"]["name"] != self.input_data: - raise ValueError(f"Unable to determine input data for '{self.verb}' at step: {self.id}") - # self.custom_resource_details will be mixed in into self.input_data - self.input_data = {} - - if len(self.input_data) == 0 and not self.custom_resource_details: - raise ValueError(f"Unable to determine custom resource at step: {self.id}") - - if self.custom_resource_details: - self.input_data = {**self.custom_resource_details, **self.input_data} - - self.wait = self.config.get("wait") - self.expectations = self.config.get("expect") - - @property - def id(self) -> str: - return self.config.get("id", "") - - @property - def description(self) -> str: - return self.config.get("description", "") - - @property - def resource_kind(self) -> str: - return self.input_data.get("kind") - - def __str__(self) -> str: - return f"Step(id='{self.id}')" - - def __repr__(self) -> str: - return str(self) - - -# fields in a test scenario -class ScenarioDict(TypedDict, total=False): - id: str - description: str - marks: List[str] - resource: ResourceDict - steps: List[StepDict] - - -class Scenario: - """ - Represents a declarative test scenario with steps - """ - - def __init__(self, resource_directory: Path, config: ScenarioDict, replacements: dict = {}): - self.config = config - self.test_steps = [] - self.replacements = replacements - custom_resource_details = self.config.get("resource", {}) - for step_config in self.config.get("steps", []): - self.test_steps.append(Step(resource_directory, step_config, custom_resource_details.copy(), replacements)) - - @property - def id(self) -> str: - return self.config.get("id", "") - - @property - def description(self) -> str: - return self.config.get("description", "") - - @property - def steps(self): - return self.test_steps - - def __str__(self) -> str: - return f"Scenario(id='{self.id}')" - - def __repr__(self) -> str: - return str(self) diff --git a/test/e2e/declarative_test_fwk/runner.py b/test/e2e/declarative_test_fwk/runner.py deleted file mode 100644 index 4492c13e..00000000 --- a/test/e2e/declarative_test_fwk/runner.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Runner for Declarative tests framework scenarios for custom resources -""" - -from e2e.declarative_test_fwk import model, helper -import pytest -import sys -import logging -from time import sleep -from acktest.k8s import resource as k8s - - -def run(scenario: model.Scenario) -> None: - """Runs steps in the given scenario - - Args: - scenario: the scenario to run - - Returns: - None - """ - - logging.info(f"Execute: {scenario}") - for step in scenario.steps: - run_step(step) - - -def teardown(scenario: model.Scenario) -> None: - """Teardown steps in the given scenario in reverse run order - - Args: - scenario: the scenario to teardown - - Returns: - None - """ - - logging.info(f"Teardown: {scenario}") - teardown_failures = [] - # tear down steps in reverse order - for step in reversed(scenario.steps): - try: - teardown_step(step) - except: - error = f"Failed to teardown: {step}. " \ - f"Unexpected error: {sys.exc_info()[0]}" - teardown_failures.append(error) - - if len(teardown_failures) != 0: - teardown_failures.insert(0, f"Failures during teardown: {scenario}") - failures = "\n\t- ".join(teardown_failures) - logging.error(failures) - pytest.fail(failures) - - -def run_step(step: model.Step) -> None: - """Runs a test scenario step - - Args: - step: the step to run - - Returns: - None - """ - - logging.info(f"Execute: {step}") - if step.verb == model.Verb.create: - create_resource(step) - elif step.verb == model.Verb.patch: - patch_resource(step) - elif step.verb == model.Verb.delete: - delete_resource(step) - wait(step) - assert_expectations(step) - - -def create_resource(step: model.Step) -> None: - """Perform the Verb "create" for given test step. - It results in creating custom resource inside Kubernetes cluster per the specification from the step. - - Args: - step: test step - - Returns: - None - """ - - logging.debug(f"create: {step}") - if not step.input_data: - return - resource_helper = helper.get_resource_helper(step.resource_kind) - (reference, ko) = resource_helper.create(step.input_data, step.replacements) - # track created reference to teardown later - step.teardown_list.append((reference, ko)) - - -def patch_resource(step: model.Step) -> None: - """Perform the Verb "patch" for given test step. - It results in patching custom resource inside Kubernetes cluster per the specification from the step. - - Args: - step: test step - - Returns: - None - """ - - logging.debug(f"patch: {step}") - if not step.input_data: - return - - resource_helper = helper.get_resource_helper(step.resource_kind) - (reference, ko) = resource_helper.patch(step.input_data, step.replacements) - # no need to teardown patched reference, its creator should tear it down. - - -def delete_resource(step: model.Step, reference: k8s.CustomResourceReference = None) -> None: - """Perform the Verb "delete" for given custom resource reference in given test step. - It results in deleting the custom resource inside Kubernetes cluster. - - Args: - step: test step - reference: custom resource reference to delete - - Returns: - None - """ - - resource_helper = helper.get_resource_helper(step.resource_kind) - if not reference: - logging.debug(f"delete: {step}") - reference = resource_helper.custom_resource_reference(step.input_data, step.replacements) - if k8s.get_resource_exists(reference): - logging.debug(f"deleting resource: {reference}") - resource_helper.delete(reference) - else: - logging.info(f"Resource already deleted: {reference}") - - -def wait(step: model.Step) -> None: - """Performs wait logic for the given step. - The step provides the wait details (properties/conditions values to wait for) - - Args: - step: test step - - Returns: - None - """ - - logging.debug(f"wait: {step}") - if not step.wait: - return - - if type(step.wait) is int: - interval_seconds = step.wait - logging.debug(f"Going to sleep for {interval_seconds} seconds during step {step}") - sleep(interval_seconds) - return - - resource_helper = helper.get_resource_helper(step.resource_kind) - reference = resource_helper.custom_resource_reference(step.input_data, step.replacements) - try: - resource_helper.wait_for(step.wait, reference) - except AssertionError as ae: - logging.error(f"Wait failed, AssertionError at {step}") - raise ae - except Exception as e: - logging.error(f"Wait failed, Exception at {step}") - raise e - - -def assert_expectations(step: model.Step) -> None: - """Asserts expectations as specified in the Step. - - Args: - step: test step - - Returns: - None - """ - - logging.info(f"assert: {step}") - if not step.expectations: - return - - resource_helper = helper.get_resource_helper(step.resource_kind) - reference = resource_helper.custom_resource_reference(step.input_data, step.replacements) - try: - resource_helper.assert_expectations(step.verb, step.input_data, step.expectations, reference) - except AssertionError as ae: - logging.error(f"AssertionError at {step}") - raise ae - - -def teardown_step(step: model.Step) -> None: - """Teardown custom resources that were created during step execution (run) inside Kubernetes cluster. - - Args: - step: test step - - Returns: - None - """ - - if not step or len(step.teardown_list) == 0: - return - - logging.info(f"teardown: {step}") - - for (reference, _) in step.teardown_list: - if reference: - delete_resource(step, reference) - - # clear list - step.teardown_list = [] diff --git a/test/e2e/replication_group/creation.sh b/test/e2e/replication_group/creation.sh deleted file mode 100755 index a436b7c6..00000000 --- a/test/e2e/replication_group/creation.sh +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/env bash - -# replication group creation tests: testing valid and invalid inputs - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -check_is_installed jq "Please install jq before running this script." -AWS_REGION=${AWS_REGION:-"us-west-2"} -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test group: $service_name/$test_name------------------------------" -debug_msg "selected AWS region: $AWS_REGION" - -# attempt creation of replication group with numeric name: negative test, expect failure -test_create_rg_numeric_name() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="12345" - output_msg=$(provide_replication_group_yaml | kubectl apply -f - 2>&1) - error_code=$? - - # kubectl apply should fail given a numeric resource name - if [ $error_code -eq 0 ]; then - echo "FAIL: expected creation of replication group $rg_id to have failed due to numeric name" - exit 1 - fi - - # check that error message is the one we expect - if [[ $output_msg != *"unable to decode \"STDIN\""* ]]; then - echo "FAIL: creation of replication group $rg_id failed as expected, but error message different than expected:" - echo "$output_msg" - exit 1 - fi -} - -# attempt creation of RG with invalid name (has space): negative test, expect failure -test_create_rg_name_contains_spaces() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="new rg" - output_msg=$(provide_replication_group_yaml | kubectl apply -f - 2>&1) - error_code=$? - - # kubectl apply should fail given a resource name with spaces - if [ $error_code -eq 0 ]; then - echo "FAIL: expected creation of replication group $rg_id to have failed since name contains spaces" - exit 1 - fi - - # check that error message is the one we expect - if [[ $output_msg != *"a DNS-1123 subdomain must consist of"* ]]; then - echo "FAIL: creation of replication group $rg_id failed as expected, but error message different than expected:" - echo "$output_msg" - exit 1 - fi -} - -# attempt creation of RG with capital letters in name: negative test, expect failure -test_create_rg_mixed_case_name() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="newRG" - output_msg=$(provide_replication_group_yaml | kubectl apply -f - 2>&1) - error_code=$? - - # kubectl apply should fail given a mixed-case resource name - if [ $error_code -eq 0 ]; then - echo "FAIL: expected creation of replication group $rg_id to have failed due to mixed-case name" - exit 1 - fi - - # check that error message is the one we expect - if [[ $output_msg != *"a DNS-1123 subdomain must consist of"* ]]; then - echo "FAIL: creation of replication group $rg_id failed as expected, but error message different than expected:" - echo "$output_msg" - exit 1 - fi -} - -# create replication group with one node group (cluster mode disabled), no replicas -test_create_rg_single_shard_no_replicas() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="single-shard-no-replicas" - num_node_groups=1 - replicas_per_node_group=0 - automatic_failover_enabled="false" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available - wait_and_assert_replication_group_synced_and_available "$rg_id" - - # ensure node type modification list exists. - assert_replication_list_allowed_node_type_modifications "$rg_id" - - # ensure events exist. - assert_replication_group_events "$rg_id" -} - -# create RG with custom node group specification where ID isn't surrounded by quotes: negative test, -# expect failure -test_create_rg_specify_node_group_no_quotes() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-custom-node-no-quotes" - automatic_failover_enabled="true" - num_node_groups=1 - replicas_per_node_group=1 - multi_az_enabled="false" - yaml_base="$(provide_replication_group_yaml)" - rg_yaml=$(cat <&1) - error_code=$? - - # kubectl apply should fail if node group ID is not placed within quotes - if [ $error_code -eq 0 ]; then - echo "FAIL: expected config application for replication group $rg_id to have failed" - exit 1 - fi - - # check that error message is the one we expect - if [[ $output_msg != *"spec.nodeGroupConfiguration.nodeGroupID: Invalid value"* ]]; then - echo "FAIL: creation of replication group $rg_id failed as expected, but error message different than expected:" - echo "$output_msg" - exit 1 - fi -} - -# create replication group with custom nodeGroupConfiguration -test_create_rg_custom_node_config() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-custom-node-config" - num_node_groups=1 - replicas_per_node_group=2 - yaml_base="$(provide_replication_group_yaml)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available - wait_and_assert_replication_group_synced_and_available "$rg_id" -} - -# create RG with custom param group -test_create_rg_custom_param_group() { - debug_msg "executing ${FUNCNAME[0]}" - - # create custom param group - k8s_controller_reload_credentials "elasticache" - daws elasticache create-cache-parameter-group --cache-parameter-group-name "pgtest" --cache-parameter-group-family "redis6.x" --description "test" 1>/dev/null 2>&1 - daws elasticache modify-cache-parameter-group --cache-parameter-group-name "pgtest" --parameter-name-values "ParameterName=reserved-memory-percent,ParameterValue=30" 1>/dev/null 2>&1 - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-custom-param-group" - num_node_groups=1 - replicas_per_node_group=2 - yaml_base="$(provide_replication_group_yaml)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - aws_assert_rg_param_group "$rg_id" "pgtest" -} - -# run tests -test_create_rg_numeric_name -test_create_rg_name_contains_spaces -test_create_rg_mixed_case_name -test_create_rg_single_shard_no_replicas -test_create_rg_specify_node_group_no_quotes -test_create_rg_custom_node_config -test_create_rg_custom_param_group - -k8s_perform_rg_test_cleanup \ No newline at end of file diff --git a/test/e2e/replication_group/e2e.sh b/test/e2e/replication_group/e2e.sh deleted file mode 100755 index dac42dd0..00000000 --- a/test/e2e/replication_group/e2e.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env bash - -############################################## -# Tests for AWS ElastiCache Replication Group -############################################## - -set -u - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -check_is_installed jq "Please install jq before running this script." - -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test: $service_name/$test_name" -debug_msg "selected AWS region: $AWS_REGION" - -setup_replication_group_fields() { - # uses non local variable for later use in tests - # cluster mode enabled replication group - rg_id="ack-test-rg-1" - rg_description="$rg_id description" - num_node_groups="2" - replicas_per_node_group="1" -} -setup_replication_group_fields - -ack_apply_replication_group_yaml() { - rg_yaml="$(provide_replication_group_yaml)" - echo "$rg_yaml" | kubectl apply -f - -} - -ack_apply_replication_group_with_node_groups_yaml() { - rg_yaml="$(provide_replication_group_detailed_yaml)" # helps determine node groups to retain during decrease - echo "$rg_yaml" | kubectl apply -f - -} - -k8s_controller_reload_credentials "$service_name" - -################################################# -# create replication group -################################################# -ack_create_replication_group() { - setup_replication_group_fields - debug_msg "Testing create replication group: $rg_id." - ack_apply_replication_group_yaml -} - -ack_create_replication_group -wait_and_assert_replication_group_synced_and_available "$rg_id" - -################################################# -# modify replication group -################################################# -k8s_assert_replication_group_status_property "$rg_id" ".description" "$rg_description" -ack_modify_replication_group() { - # uses non local variable for later use in tests - rg_description="$rg_id description updated" - debug_msg "Testing modify replication group: $rg_id." - ack_apply_replication_group_yaml -} -ack_modify_replication_group -wait_and_assert_replication_group_synced_and_available "$rg_id" -k8s_assert_replication_group_status_property "$rg_id" ".description" "$rg_description" - -################################################# -# modify replication group shards count -################################################# -test_update_shards_count_increase() { - k8s_assert_replication_group_shard_count "$rg_id" "$num_node_groups" # assert current value - # uses non local variable for later use in tests - num_node_groups="3" # increases from 2 to 3 - debug_msg "Testing modify replication group: $rg_id shards count to new value: $num_node_groups." - ack_apply_replication_group_yaml - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" "$num_node_groups" # assert updated value -} -test_update_shards_count_increase - -test_update_shards_count_decrease() { - k8s_assert_replication_group_shard_count "$rg_id" "$num_node_groups" # assert current value - # uses non local variable for later use in tests - num_node_groups="2" # decreases from 3 to 2 - debug_msg "Testing modify replication group: $rg_id shards count to new value: $num_node_groups." - ack_apply_replication_group_with_node_groups_yaml - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" "$num_node_groups" # assert updated value -} -test_update_shards_count_decrease - -################################################# -# modify replication group replica count -################################################# -ack_modify_replication_group_replica_count() { - # uses non local variable for later use in tests - replicas_per_node_group="$1" - debug_msg "Testing modify replication group: $rg_id replica count to new value: $replicas_per_node_group." - ack_apply_replication_group_yaml -} -test_update_replica_count() { - k8s_assert_replication_group_replica_count "$rg_id" "$replicas_per_node_group" # assert current value - ack_modify_replication_group_replica_count "$1" - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_replica_count "$rg_id" "$replicas_per_node_group" # assert updated value -} -### increase replicas count -test_update_replica_count "2" -### decrease replicas count -test_update_replica_count "1" - -################################################# -# delete replication group -################################################# -debug_msg "Testing delete replication group: $rg_id." -kubectl delete ReplicationGroup/"$rg_id" 2>/dev/null -assert_equal "0" "$?" "Expected success from kubectl delete but got $?" || exit 1 -sleep 5 -aws_wait_replication_group_deleted "$rg_id" "FAIL: expected replication group $rg_id to have been deleted in ${service_name}" - diff --git a/test/e2e/replication_group/misc.sh b/test/e2e/replication_group/misc.sh deleted file mode 100644 index acce3dcb..00000000 --- a/test/e2e/replication_group/misc.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash - -# replication group miscellaneous tests - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -check_is_installed jq "Please install jq before running this script." - -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test group: $service_name/$test_name------------------------------" -debug_msg "selected AWS region: $AWS_REGION" - -# test creation of replication group with explicit security group -test_create_rg_specify_sg() { - - # create security group in default VPC to use - local vpc_json=$(daws ec2 describe-vpcs | jq -r '.Vpcs[] | select( .IsDefault == true )') - local default_vpc_id=$(echo "$vpc_json" | jq -r '.VpcId') - daws ec2 create-security-group --group-name "test-sg-default" --description "sg for automated elasticache ACK test" --vpc-id "$default_vpc_id" 1>/dev/null 2>&1 - - # retrieve security group ID from newly created security group - local sg_id=$(daws ec2 describe-security-groups | jq -r -e '.SecurityGroups[] | select( .GroupName == "test-sg-default" ) | .GroupId') - assert_equal "0" "$?" "Could not find security group ID for security group test-sg-default" || exit 1 - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-specify-sg" - num_node_groups=1 - replicas_per_node_group=0 - automatic_failover_enabled="false" - multi_az_enabled="false" - yaml_base=$(provide_replication_group_yaml "$rg_id") - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - local primary_cluster=$(aws_get_replication_group_json "$rg_id" | jq -r -e ".MemberClusters[0]") - assert_equal "0" "$?" "Could not find cache cluster for replication group $rg_id" || log_and_exit "replicationgroups/$rg_id" - daws elasticache describe-cache-clusters --cache-cluster-id "$primary_cluster" | jq -r '.CacheClusters[0]' | grep "$sg_id" - assert_equal "0" "$?" "Expected replication group $rg_id to have security group $sg_id" || log_and_exit "replicationgroups/$rg_id" -} - -# create multiple RGs and check deletion succeeds -test_rg_deletion_multiple() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for creation of first replication group - clear_rg_parameter_variables - rg_id="rg-deletion-1" - num_node_groups=1 - replicas_per_node_group=0 - automatic_failover_enabled="false" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure first RG successfully created and available - wait_and_assert_replication_group_synced_and_available "$rg_id" - - # generate and apply yaml for creation of second replication group - rg_id="rg-deletion-2" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure second RG successfully created and available - wait_and_assert_replication_group_synced_and_available "$rg_id" - - # delete and wait for deletion to complete - kubectl delete ReplicationGroup --all 2>/dev/null - assert_equal "0" "$?" "Expected success from kubectl delete but got $?" || exit 1 - - aws_wait_replication_group_deleted "rg-deletion-1" "FAIL: expected replication group rg-deletion-1 to have been deleted in ${service_name}" - aws_wait_replication_group_deleted "rg-deletion-2" "FAIL: expected replication group rg-deletion-2 to have been deleted in ${service_name}" -} - -# run tests -test_create_rg_specify_sg # failing -test_rg_deletion_multiple - -k8s_perform_rg_test_cleanup \ No newline at end of file diff --git a/test/e2e/replication_group/replication.sh b/test/e2e/replication_group/replication.sh deleted file mode 100755 index 6f64242b..00000000 --- a/test/e2e/replication_group/replication.sh +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/env bash - -# tests covering "replication", i.e. adding/removing replicas, auto failover, etc. - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -check_is_installed jq "Please install jq before running this script." - -AWS_REGION=${AWS_REGION:-"us-west-2"} - -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test group: $service_name/$test_name------------------------------" -debug_msg "selected AWS region: $AWS_REGION" - -# create cluster mode disabled replication group without replicas, attempt to modify to -# negative replica count: negative test, expect failure -test_modify_rg_negative_replicas() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="test-rg-modify-negative-replicas" - automatic_failover_enabled="false" - num_node_groups="1" - replicas_per_node_group="0" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_replica_count "$rg_id" 0 - - # update config and apply: attempt to change to negative replica count - replicas_per_node_group="-1" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - check_rg_terminal_condition_true "$rg_id" "New replica count must be between" -} - -# modify replication group to enable auto failover -test_modify_rg_enable_auto_failover() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="test-enable-failover" - automatic_failover_enabled="false" - multi_az_enabled="false" - num_node_groups=1 - replicas_per_node_group=1 - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check property as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_status_property "$rg_id" ".automaticFailover" "disabled" - - # update configuration and apply - automatic_failover_enabled="true" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait until RG available again then check value updated - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_status_property "$rg_id" ".automaticFailover" "enabled" -} - -# create cluster mode disabled replication group with one replica and auto failover enabled, attempt to remove -# replica while keeping auto failover enabled: negative test, expect failure -test_modify_rg_remove_replica_with_af_enabled() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="test-rg-remove-last-replica-af-enabled" - automatic_failover_enabled="true" - num_node_groups="1" - replicas_per_node_group="1" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_replica_count "$rg_id" 1 - k8s_assert_replication_group_status_property "$rg_id" ".automaticFailover" "enabled" - - # update config and apply: attempt to remove replica while keeping auto failover enabled - replicas_per_node_group="0" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - check_rg_terminal_condition_true "$rg_id" "Must have at least 1 replica when cluster mode is disabled with auto failover enabled" -} - -# create cluster mode disabled replication group with one replica and auto failover enabled, then remove the -# replica while disabling auto failover -test_modify_rg_remove_replica_disable_af() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="test-rg-remove-last-replica-disable-af" - automatic_failover_enabled="true" - num_node_groups="1" - replicas_per_node_group="1" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_replica_count "$rg_id" 1 - k8s_assert_replication_group_status_property "$rg_id" ".automaticFailover" "enabled" - k8s_assert_replication_group_total_node_count "$rg_id" 2 - - # update config and apply: remove replica while disabling auto failover - replicas_per_node_group="0" - automatic_failover_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert new state - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_replica_count "$rg_id" 0 - k8s_assert_replication_group_status_property "$rg_id" ".automaticFailover" "disabled" - k8s_assert_replication_group_total_node_count "$rg_id" 1 -} - -# create 1 shard/no replica RG, enable autofailover while adding replicas -test_modify_rg_enable_af_add_replicas() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="test-enable-af-add-replicas" - num_node_groups=1 - replicas_per_node_group=0 - automatic_failover_enabled="false" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 1 - k8s_assert_replication_group_replica_count "$rg_id" 0 - k8s_assert_replication_group_status_property "$rg_id" ".automaticFailover" "disabled" - k8s_assert_replication_group_status_property "$rg_id" ".multiAZ" "disabled" - k8s_assert_replication_group_total_node_count "$rg_id" 1 - - # update config and apply: enable autofailover and add replicas to satisfy enabling condition - replicas_per_node_group=2 - automatic_failover_enabled="true" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert new state - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 1 - k8s_assert_replication_group_replica_count "$rg_id" 2 - k8s_assert_replication_group_status_property "$rg_id" ".automaticFailover" "enabled" - k8s_assert_replication_group_status_property "$rg_id" ".multiAZ" "disabled" - k8s_assert_replication_group_total_node_count "$rg_id" 3 -} - -# create replication group with one shard and one replica with custom node group configuration. Increase -# replicas per node group and specify preferred AZ -test_modify_rg_increase_replica_specify_az() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-inc-replica-specify-az" - num_node_groups=1 - yaml_base="$(provide_replication_group_yaml_for_replica_config)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 1 - k8s_assert_replication_group_replica_count "$rg_id" 1 - k8s_assert_replication_group_total_node_count "$rg_id" 2 - - # update config and apply: increase replica count, specify additional AZ - yaml_base="$(provide_replication_group_yaml_for_replica_config)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert resource state - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 1 - k8s_assert_replication_group_replica_count "$rg_id" 2 - k8s_assert_replication_group_total_node_count "$rg_id" 3 -} - -# create a cluster mode enabled RG, then scale up while adding replicas -test_modify_rg_cme_scale_up_add_replicas() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-cme-scale-up-add-replicas" - cache_node_type="cache.t3.micro" - num_node_groups="2" - yaml_base=$(provide_replication_group_yaml_for_replica_config) - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 2 - k8s_assert_replication_group_replica_count "$rg_id" 1 - k8s_assert_replication_group_total_node_count "$rg_id" 4 - aws_assert_replication_group_property "$rg_id" ".CacheNodeType" "cache.t3.micro" - - # update config and apply: scale out and add replicas - cache_node_type="cache.t3.small" - yaml_base=$(provide_replication_group_yaml_for_replica_config "$rg_id") - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert new resource state - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 2 - k8s_assert_replication_group_replica_count "$rg_id" 2 - k8s_assert_replication_group_total_node_count "$rg_id" 6 - aws_assert_replication_group_property "$rg_id" ".CacheNodeType" "cache.t3.small" -} - -# ensure node roles are correct after failover: create a cluster mode disabled RG with one replica and -# invoke the test-failover API. Ensure node roles from k8s are in sync with node roles from AWS CLI -test_rg_failover_roles() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-failover-roles" - num_node_groups=1 - replicas_per_node_group=1 - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure second RG successfully created and available, assert initial node roles - wait_and_assert_replication_group_synced_and_available "$rg_id" - local shard_json=$(aws_get_replication_group_json "$rg_id" | jq -r '.NodeGroups[0]') - local node1_role=$(echo "$shard_json" | jq -r '.NodeGroupMembers[] | select(.CacheClusterId=="rg-failover-roles-001") | .CurrentRole') - local node2_role=$(echo "$shard_json" | jq -r '.NodeGroupMembers[] | select(.CacheClusterId=="rg-failover-roles-002") | .CurrentRole') - assert_equal "primary" "$node1_role" "Node $rg_id-001 has role $node1_role, but expected primary before failover" || \ - log_and_exit "replicationgroups/$rg_id" - assert_equal "replica" "$node2_role" "Node $rg_id-002 has role $node2_role, but expected replica before failover" || \ - log_and_exit "replicationgroups/$rg_id" - - # call test-failover API to trigger failover to replica - daws elasticache test-failover --replication-group-id "$rg_id" --node-group-id "0001" 1>/dev/null 2>&1 - local err_code=$? - assert_equal "0" "$err_code" "Expected success from test-failover call but got $err_code" || exit 1 - - # wait for failover to complete (initial primary node takes role "replica") - local wait_failed="true" - for i in $(seq 0 9); do - sleep 30 - k8s_controller_reload_credentials "$service_name" - local shard_json=$(aws_get_replication_group_json "$rg_id" | jq -r '.NodeGroups[0]') - local node1_role=$(echo "$shard_json" | jq -r -e '.NodeGroupMembers[] | select(.CacheClusterId=="rg-failover-roles-001") | .CurrentRole') - assert_equal "0" "$?" "Node roles missing in replication group $rg_id" || log_and_exit "replicationgroups/$rg_id" - if [[ "$node1_role" == "replica" ]]; then - wait_failed="false" - break - fi - done - if [[ $wait_failed == "true" ]]; then - echo "FAIL: node $rg_id-001 should have role replica after failover operation" - log_and_exit "replicationgroups/$rg_id" - fi - - # roles updated in service at this point, ensure roles in k8s status match - local shard_k8s=$(k8s_get_rg_field "$rg_id" ".status .nodeGroups[0]") - local node1_role_k8s=$(echo "$shard_k8s" | jq -r '.nodeGroupMembers[] | select(.cacheClusterID=="rg-failover-roles-001") | .currentRole') - local node2_role_k8s=$(echo "$shard_k8s" | jq -r '.nodeGroupMembers[] | select(.cacheClusterID=="rg-failover-roles-002") | .currentRole') - - assert_equal "replica" "$node1_role_k8s" "Node $rg_id-001 has role $node1_role, but expected replica after failover" || \ - log_and_exit "replicationgroups/$rg_id" - assert_equal "primary" "$node2_role_k8s" "Node $rg_id-002 has role $node2_role, but expected primary after failover" || \ - log_and_exit "replicationgroups/$rg_id" -} - -# run tests -test_modify_rg_negative_replicas -test_modify_rg_enable_auto_failover -test_modify_rg_remove_replica_with_af_enabled - -test_modify_rg_remove_replica_disable_af -test_modify_rg_enable_af_add_replicas -test_modify_rg_increase_replica_specify_az -test_modify_rg_cme_scale_up_add_replicas -test_rg_failover_roles - -k8s_perform_rg_test_cleanup \ No newline at end of file diff --git a/test/e2e/replication_group/scaling.sh b/test/e2e/replication_group/scaling.sh deleted file mode 100755 index 5ecfdce8..00000000 --- a/test/e2e/replication_group/scaling.sh +++ /dev/null @@ -1,295 +0,0 @@ -#!/usr/bin/env bash - -# replication group scaling tests: horizontal and vertical scaling - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -check_is_installed jq "Please install jq before running this script." -AWS_REGION=${AWS_REGION:-"us-west-2"} - -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test group: $service_name/$test_name------------------------------" -debug_msg "selected AWS region: $AWS_REGION" - -# attempt to scale out a cluster mode disabled RG with no replicas: negative test, expect failure -test_modify_rg_cmd_scale_out() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="test-cmd-scale-out" - automatic_failover_enabled="false" - num_node_groups="1" - replicas_per_node_group="0" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 1 - k8s_assert_replication_group_replica_count "$rg_id" 0 - - # update config and apply: attempt to scale out - # config application should actually succeed in this case, but leave RG with Terminal Condition set True - num_node_groups=2 - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure terminal condition exists, is set true, and has expected message - check_rg_terminal_condition_true "$rg_id" "Operation is only applicable for cluster mode enabled" -} - -# create a cluster mode disabled RG with 3 replicas, and scale up -test_modify_rg_cmd_scale_up() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="test-cmd-scale-up" - automatic_failover_enabled="true" - cache_node_type="cache.t3.micro" - num_node_groups=1 - replicas_per_node_group=3 - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - aws_assert_replication_group_property "$rg_id" ".CacheNodeType" "cache.t3.micro" - - # update config and apply: scale up to larger instance - cache_node_type="cache.t3.small" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert new state - wait_and_assert_replication_group_synced_and_available "$rg_id" - aws_assert_replication_group_property "$rg_id" ".CacheNodeType" "cache.t3.small" -} - -# create a cluster mode enabled RG, then attempt to scale out and increase replica count -test_modify_rg_cme_scale_out_add_replica() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-cme-scale-out-add-replica" - num_node_groups="2" - yaml_base="$(provide_replication_group_yaml_for_replica_config)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 2 - k8s_assert_replication_group_replica_count "$rg_id" 1 - k8s_assert_replication_group_total_node_count "$rg_id" 4 - - # update config and apply: scale out and add replicas - num_node_groups="3" - yaml_base="$(provide_replication_group_yaml_for_replica_config)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert new resource state - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 3 - k8s_assert_replication_group_replica_count "$rg_id" 2 - k8s_assert_replication_group_total_node_count "$rg_id" 9 -} - -# scale out a cluster mode enabled RG where replica count is uneven between shards (i.e. there is a replicaCount -# specified for each node group rather than one replicasPerNodeGroup property for the entire RG) -test_modify_rg_cme_scale_out_uneven_shards() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-cme-scale-out-uneven-shards" - yaml_base=$(provide_replication_group_yaml_basic "$rg_id") - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 2 - k8s_assert_replication_group_total_node_count "$rg_id" 5 #skip checking each node group for now - - # update config and apply: scale out and add replicas - yaml_base=$(provide_replication_group_yaml_basic "$rg_id") - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert new resource state - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 3 - k8s_assert_replication_group_total_node_count "$rg_id" 7 -} - -# basic scale out test for cluster mode enabled replication groups, # replicas/node group unchanged -test_modify_rg_cme_scale_out_basic() { - debug_msg "executing ${FUNCNAME[0]}" - - # generate and apply yaml for replication group creation - clear_rg_parameter_variables - rg_id="rg-cme-scale-out-basic" - num_node_groups="2" - yaml_base="$(provide_replication_group_yaml_for_replica_config)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # ensure resource successfully created and available, check resource is as expected - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 2 - k8s_assert_replication_group_replica_count "$rg_id" 1 - k8s_assert_replication_group_total_node_count "$rg_id" 4 - - # update config and apply: scale out - num_node_groups="3" - yaml_base="$(provide_replication_group_yaml_for_replica_config)" - rg_yaml=$(cat <&1 - exit_if_rg_config_application_failed $? "$rg_id" - - # wait and assert resource state - wait_and_assert_replication_group_synced_and_available "$rg_id" - k8s_assert_replication_group_shard_count "$rg_id" 3 - k8s_assert_replication_group_replica_count "$rg_id" 1 - k8s_assert_replication_group_total_node_count "$rg_id" 6 -} - -# run tests -test_modify_rg_cmd_scale_out -test_modify_rg_cmd_scale_up -test_modify_rg_cme_scale_out_add_replica # failing, terminal condition shows "2 validation errors" after new config - issue with distribution of AZs in test case? -test_modify_rg_cme_scale_out_uneven_shards -test_modify_rg_cme_scale_out_basic - -k8s_perform_rg_test_cleanup \ No newline at end of file diff --git a/test/e2e/requirements.txt b/test/e2e/requirements.txt index 17018829..e469fd1c 100644 --- a/test/e2e/requirements.txt +++ b/test/e2e/requirements.txt @@ -1 +1 @@ -acktest @ git+https://github.com/aws-controllers-k8s/test-infra.git@955d7831ee374a212250179e95a5f3b75e555fd9 \ No newline at end of file +acktest @ git+https://github.com/aws-controllers-k8s/test-infra.git@5a09bbdb961ea14a65b15b63769134125023ac61 \ No newline at end of file diff --git a/test/e2e/resources/cache_cluster_simple.yaml b/test/e2e/resources/cache_cluster_simple.yaml new file mode 100644 index 00000000..8c437627 --- /dev/null +++ b/test/e2e/resources/cache_cluster_simple.yaml @@ -0,0 +1,16 @@ +# A simple CacheCluster manifest. +apiVersion: elasticache.services.k8s.aws/v1alpha1 +kind: CacheCluster +metadata: + name: $CACHE_CLUSTER_ID +spec: + cacheClusterID: $CACHE_CLUSTER_ID + cacheNodeType: cache.t3.micro + numCacheNodes: 2 + engine: memcached + autoMinorVersionUpgrade: false + tags: + - key: t1 + value: v1 + - key: t2 + value: v2 diff --git a/test/e2e/resources/replicationgroup_authtoken.yaml b/test/e2e/resources/replicationgroup_authtoken.yaml deleted file mode 100644 index 47483beb..00000000 --- a/test/e2e/resources/replicationgroup_authtoken.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - engine: redis - replicationGroupID: $RG_ID - description: Auth token test - cacheNodeType: cache.t3.micro - numNodeGroups: 1 - replicasPerNodeGroup: 0 - transitEncryptionEnabled: true - cacheSubnetGroupName: default - authToken: - namespace: default - name: $NAME - key: $KEY \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_cme_ngc.yaml b/test/e2e/resources/replicationgroup_cme_ngc.yaml deleted file mode 100644 index 6a36f200..00000000 --- a/test/e2e/resources/replicationgroup_cme_ngc.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# CME replication group. "NGC" means that the field nodeGroupConfiguration is specified, with a detailed configuration -# for each shard -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - cacheNodeType: cache.t3.micro - engine: redis - nodeGroupConfiguration: - - nodeGroupID: $NGID1 - primaryAvailabilityZone: us-west-2a - replicaAvailabilityZones: - - us-west-2b - - us-west-2c - replicaCount: 2 - - nodeGroupID: $NGID2 - primaryAvailabilityZone: us-west-2b - replicaAvailabilityZones: - - us-west-2c - - us-west-2a - replicaCount: 2 - description: cluster-mode enabled RG - replicationGroupID: $RG_ID \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_cmd_update.yaml b/test/e2e/resources/replicationgroup_create_delete.yaml similarity index 100% rename from test/e2e/resources/replicationgroup_cmd_update.yaml rename to test/e2e/resources/replicationgroup_create_delete.yaml diff --git a/test/e2e/resources/replicationgroup_input_coverage.yaml b/test/e2e/resources/replicationgroup_input_coverage.yaml deleted file mode 100644 index 544d4d28..00000000 --- a/test/e2e/resources/replicationgroup_input_coverage.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - atRestEncryptionEnabled: true - autoMinorVersionUpgrade: true - automaticFailoverEnabled: true - cacheNodeType: cache.t3.small - cacheParameterGroupName: default.redis6.x.cluster.on - cacheSubnetGroupName: default - engine: redis - engineVersion: 6.x - kmsKeyID: $KMS_KEY_ID - multiAZEnabled: true - nodeGroupConfiguration: - - nodeGroupID: "1111" - primaryAvailabilityZone: us-west-2a - replicaAvailabilityZones: - - us-west-2b - replicaCount: 1 - slots: 0-5999 - - nodeGroupID: "2222" - primaryAvailabilityZone: us-west-2c - replicaAvailabilityZones: - - us-west-2a - - us-west-2c - - us-west-2b - replicaCount: 3 - slots: 6000-16383 - notificationTopicARN: $SNS_TOPIC_ARN - numNodeGroups: 2 - port: 6380 - preferredMaintenanceWindow: sun:23:00-mon:01:30 - description: test replication group for input field coverage - replicationGroupID: $RG_ID - securityGroupIDs: - - $SG_ID - snapshotRetentionLimit: 5 - snapshotWindow: 05:00-06:00 - tags: - - key: service - value: elasticache - - key: region - value: us-west-2 - transitEncryptionEnabled: true - userGroupIDs: - - $USERGROUP_ID - logDeliveryConfigurations: - - - destinationType: cloudwatch-logs - logFormat: json - logType: slow-log - destinationDetails: - cloudWatchLogsDetails: - logGroup: $LOG_GROUP \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_largecluster.yaml b/test/e2e/resources/replicationgroup_largecluster.yaml deleted file mode 100644 index 2daf10be..00000000 --- a/test/e2e/resources/replicationgroup_largecluster.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: $NUM_NODE_GROUPS - replicasPerNodeGroup: $REPLICAS_PER_NODE_GROUP - description: large cluster mode enabled RG - replicationGroupID: $RG_ID \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_rpng.yaml b/test/e2e/resources/replicationgroup_rpng.yaml deleted file mode 100644 index 555e2b00..00000000 --- a/test/e2e/resources/replicationgroup_rpng.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# "RPNG" means that the field replicasPerNodeGroup is set (instead of nodeGroupConfiguration), -# meaning that this replication group has a uniform configuration across all shards. This is one of the more -# basic configurations as very few fields are specified and the resulting RG can either be CME or CMD -# (depending on the number of specified node groups). -apiVersion: elasticache.services.k8s.aws/v1alpha1 -kind: ReplicationGroup -metadata: - name: $RG_ID -spec: - cacheNodeType: cache.t3.micro - engine: redis - numNodeGroups: $NUM_NODE_GROUPS - replicasPerNodeGroup: $REPLICAS_PER_NODE_GROUP - description: cluster-mode enabled RG - replicationGroupID: $RG_ID \ No newline at end of file diff --git a/test/e2e/resources/replicationgroup_cme_misc.yaml b/test/e2e/resources/replicationgroup_update.yaml similarity index 63% rename from test/e2e/resources/replicationgroup_cme_misc.yaml rename to test/e2e/resources/replicationgroup_update.yaml index 6f47dc29..24798dd8 100644 --- a/test/e2e/resources/replicationgroup_cme_misc.yaml +++ b/test/e2e/resources/replicationgroup_update.yaml @@ -1,5 +1,3 @@ -# CME replication group with some optional fields specified. The modification of these fields is typically quicker -# than other actions (such as scaling) which require provisioning/deletion of nodes. apiVersion: elasticache.services.k8s.aws/v1alpha1 kind: ReplicationGroup metadata: @@ -7,6 +5,8 @@ metadata: spec: cacheNodeType: cache.t3.micro engine: redis + ipDiscovery: $IP_DISCOVERY + networkType: $NETWORK_TYPE numNodeGroups: 2 preferredMaintenanceWindow: $PMW replicasPerNodeGroup: 1 @@ -14,3 +14,8 @@ spec: replicationGroupID: $RG_ID snapshotRetentionLimit: $SRL snapshotWindow: $SW + tags: + - key: tag_to_remove + value: should_be_removed + - key: tag_to_update + value: old_value \ No newline at end of file diff --git a/test/e2e/resources/serverless_cache_basic.yaml b/test/e2e/resources/serverless_cache_basic.yaml new file mode 100644 index 00000000..6715b324 --- /dev/null +++ b/test/e2e/resources/serverless_cache_basic.yaml @@ -0,0 +1,13 @@ +# Basic ServerlessCache for testing basic creation and deletion +apiVersion: elasticache.services.k8s.aws/v1alpha1 +kind: ServerlessCache +metadata: + name: $SC_NAME +spec: + serverlessCacheName: $SC_NAME + engine: $ENGINE + majorEngineVersion: "$MAJOR_ENGINE_VERSION" + cacheUsageLimits: + eCPUPerSecond: + minimum: $ECPU_MIN + maximum: $ECPU_MAX \ No newline at end of file diff --git a/test/e2e/resources/serverless_cache_snapshot_basic.yaml b/test/e2e/resources/serverless_cache_snapshot_basic.yaml new file mode 100644 index 00000000..f3d9c71c --- /dev/null +++ b/test/e2e/resources/serverless_cache_snapshot_basic.yaml @@ -0,0 +1,8 @@ +# Basic ServerlessCacheSnapshot for testing basic creation and deletion +apiVersion: elasticache.services.k8s.aws/v1alpha1 +kind: ServerlessCacheSnapshot +metadata: + name: $SNAPSHOT_NAME +spec: + serverlessCacheSnapshotName: $SNAPSHOT_NAME + serverlessCacheName: $SC_NAME \ No newline at end of file diff --git a/test/e2e/scenarios/Resharding/cme_scale_in_config_rollback.yaml b/test/e2e/scenarios/Resharding/cme_scale_in_config_rollback.yaml deleted file mode 100644 index a412d4e2..00000000 --- a/test/e2e/scenarios/Resharding/cme_scale_in_config_rollback.yaml +++ /dev/null @@ -1,52 +0,0 @@ -id: "RG_CME_SCALE_IN_CONFIG_ROLLBACK" -description: "In this test we execute scale in which will rollback" -#marks: -# - slow -# - blocked -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: reshard$RANDOM_SUFFIX -steps: - - id: "create_CME_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: reshard$RANDOM_SUFFIX - description: Scaling in rollback - cacheNodeType: cache.m5.large - snapshotName: test-scale-in-config-rollback - cacheParameterGroupName: default.redis6.x.cluster.on - nodeGroupConfiguration: - - nodeGroupID: "0001" - replicaCount: 0 - - nodeGroupID: "0002" - replicaCount: 0 - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "scale_in" - description: "Scaling in to 1 shard" - patch: - spec: - nodeGroupConfiguration: - - nodeGroupID: "0001" - replicaCount: 0 - wait: 300 - expect: - status: - conditions: - ACK.Terminal: "True" - ACK.ResourceSynced: - status: "False" - - id: "delete_CME_RG" - description: "Delete cluster mode enabled replication group" - delete: reshard$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/Resharding/cme_scale_in_rollback.yaml b/test/e2e/scenarios/Resharding/cme_scale_in_rollback.yaml deleted file mode 100644 index 69f60843..00000000 --- a/test/e2e/scenarios/Resharding/cme_scale_in_rollback.yaml +++ /dev/null @@ -1,45 +0,0 @@ -id: "RG_CME_SCALE_IN_ROLLBACK" -description: "In this test we execute scale in which will rollback" -#marks: -# - slow -# - blocked -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: reshard$RANDOM_SUFFIX -steps: - - id: "create_CME_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: reshard$RANDOM_SUFFIX - description: Scaling in rollback - cacheNodeType: cache.m5.large - numNodeGroups: 2 - snapshotName: test-scale-in-rollback - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "scale_in" - description: "Scaling in to 1 shard" - patch: - spec: - numNodeGroups: 1 - wait: 300 - expect: - status: - conditions: - ACK.Terminal: "True" - ACK.ResourceSynced: - status: "False" - - id: "delete_CME_RG" - description: "Delete cluster mode enabled replication group" - delete: reshard$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cmd_basic_create_update.yaml b/test/e2e/scenarios/ScaleUpAndDown/cmd_basic_create_update.yaml deleted file mode 100644 index 256f5846..00000000 --- a/test/e2e/scenarios/ScaleUpAndDown/cmd_basic_create_update.yaml +++ /dev/null @@ -1,45 +0,0 @@ -id: "RG_CMD_basic_create_update" -description: "In this test we create RG with absolutely minimum configuration and try to update it." -#marks: -# - slow -# - blocked -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: scaling$RANDOM_SUFFIX -steps: - - id: "create_CMD_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: scaling$RANDOM_SUFFIX - description: Basic create and update of CMD RG - cacheNodeType: cache.t3.micro - numNodeGroups: 1 - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "invalid_cacheNodeType_on_CMD_replication_group_causes_Terminal_condition" - description: "Negative test include transitEncryptionEnabled" - patch: - spec: - cacheNodeType: cache.micro # invalid value - wait: 60 - expect: - status: - conditions: - ACK.Terminal: "True" - ACK.ResourceSynced: - status: "True" - # message: "Expected message ..." - - id: "delete_CMD_RG" - description: "Delete cluster mode disabled replication group" - delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_down.yaml b/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_down.yaml deleted file mode 100644 index 3879b9a9..00000000 --- a/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_down.yaml +++ /dev/null @@ -1,43 +0,0 @@ -id: "CMD_SCALE_DOWN" -description: "Scale down CMD" -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: scaling$RANDOM_SUFFIX -steps: - - id: "create_CMD_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: scaling$RANDOM_SUFFIX - description: Scale down for CMD - cacheNodeType: cache.t3.medium - numNodeGroups: 1 - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "scale_down" - description: "Scale down to t3.micro" - patch: - spec: - cacheNodeType: cache.t3.micro - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "delete_CMD_RG" - description: "Delete cluster mode disabled replication group" - delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_up.yaml b/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_up.yaml deleted file mode 100644 index 3c808fcd..00000000 --- a/test/e2e/scenarios/ScaleUpAndDown/cmd_scale_up.yaml +++ /dev/null @@ -1,43 +0,0 @@ -id: "CMD_SCALE_UP" -description: "Scale up CMD" -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: scaling$RANDOM_SUFFIX -steps: - - id: "create_CMD_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: scaling$RANDOM_SUFFIX - description: Scale up for CMD - cacheNodeType: cache.t3.micro - numNodeGroups: 1 - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "scale_up" - description: "Scale up from t3.micro to t3.medium" - patch: - spec: - cacheNodeType: cache.t3.medium - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "delete_CMD_RG" - description: "Delete cluster mode disabled replication group" - delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down.yaml b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down.yaml deleted file mode 100644 index 3d97520d..00000000 --- a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down.yaml +++ /dev/null @@ -1,43 +0,0 @@ -id: "CME_SCALE_DOWN" -description: "Scale Down CME" -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: scaling$RANDOM_SUFFIX -steps: - - id: "create_CMD_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: scaling$RANDOM_SUFFIX - description: Scale down CME - cacheNodeType: cache.t3.medium - numNodeGroups: 2 - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "scale_down" - description: "Scale down from t3.medium to t3.micro" - patch: - spec: - cacheNodeType: cache.t3.micro - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "delete_CED_RG" - description: "Delete cluster mode enabled replication group" - delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down_rollback.yaml b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down_rollback.yaml deleted file mode 100644 index d9f2891d..00000000 --- a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_down_rollback.yaml +++ /dev/null @@ -1,45 +0,0 @@ -id: "RG_CME_SCALE_DOWN_ROLLBACK" -description: "In this test we execute scale down which will rollback" -#marks: -# - slow -# - blocked -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: scaling$RANDOM_SUFFIX -steps: - - id: "create_CME_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: scaling$RANDOM_SUFFIX - description: Scaling down rollback - cacheNodeType: cache.m5.large - numNodeGroups: 2 - snapshotName: test-scale-down-rollback - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "smaller_instance_scale_down" - description: "Scaling down with smaller instance" - patch: - spec: - cacheNodeType: cache.t3.micro - wait: 300 - expect: - status: - conditions: - ACK.Terminal: "True" - ACK.ResourceSynced: - status: "False" - - id: "delete_CME_RG" - description: "Delete cluster mode enabled replication group" - delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_up.yaml b/test/e2e/scenarios/ScaleUpAndDown/cme_scale_up.yaml deleted file mode 100644 index a40fd417..00000000 --- a/test/e2e/scenarios/ScaleUpAndDown/cme_scale_up.yaml +++ /dev/null @@ -1,43 +0,0 @@ -id: "CME_SCALE_UP" -description: "Scale Up CME" -resource: - apiVersion: $CRD_GROUP/$CRD_VERSION - kind: ReplicationGroup - metadata: - name: scaling$RANDOM_SUFFIX -steps: - - id: "create_CMD_replication_group" - description: "Initial config" - create: - spec: - engine: redis - replicationGroupID: scaling$RANDOM_SUFFIX - description: Scale Up CME - cacheNodeType: cache.t3.micro - numNodeGroups: 2 - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "scale_up" - description: "Scale up from t3.micro to t3.medium" - patch: - spec: - cacheNodeType: cache.t3.medium - wait: - status: - conditions: - ACK.ResourceSynced: - status: "True" - timeout: 2800 - expect: - status: - status: "available" - - id: "delete_CME_RG" - description: "Delete cluster mode enabled replication group" - delete: scaling$RANDOM_SUFFIX diff --git a/test/e2e/service_bootstrap.py b/test/e2e/service_bootstrap.py index 63f5fdd3..206667da 100755 --- a/test/e2e/service_bootstrap.py +++ b/test/e2e/service_bootstrap.py @@ -14,6 +14,7 @@ """ import boto3 +import yaml import logging import re from dataclasses import dataclass @@ -23,7 +24,7 @@ from acktest import resources from e2e import bootstrap_directory from e2e.util import wait_usergroup_active, wait_snapshot_available -from e2e.bootstrap_resources import TestBootstrapResources +from e2e.bootstrap_resources import TestBootstrapResources, write_bootstrap_config def create_sns_topic() -> str: topic_name = random_suffix_name("ack-sns-topic", 32) @@ -171,4 +172,4 @@ def service_bootstrap() -> dict: if __name__ == "__main__": config = service_bootstrap() - resources.write_bootstrap_config(config, bootstrap_directory) \ No newline at end of file + write_bootstrap_config(config, bootstrap_directory) \ No newline at end of file diff --git a/test/e2e/service_cleanup.py b/test/e2e/service_cleanup.py index d11785a3..76743a0f 100644 --- a/test/e2e/service_cleanup.py +++ b/test/e2e/service_cleanup.py @@ -18,7 +18,7 @@ from acktest import resources from e2e import bootstrap_directory -from e2e.bootstrap_resources import TestBootstrapResources +from e2e.bootstrap_resources import TestBootstrapResources, read_bootstrap_config def delete_sns_topic(topic_ARN: str): sns = boto3.client("sns") @@ -146,6 +146,6 @@ def service_cleanup(config: dict): logging.exception(f"Unable to delete Elasticache cache parameter group {resources.CPGName}") -if __name__ == "__main__": - bootstrap_config = resources.read_bootstrap_config(bootstrap_directory) - service_cleanup(bootstrap_config) \ No newline at end of file +if __name__ == "__main__": + bootstrap_config = read_bootstrap_config(bootstrap_directory) + service_cleanup(bootstrap_config) \ No newline at end of file diff --git a/test/e2e/snapshot/e2e.sh b/test/e2e/snapshot/e2e.sh deleted file mode 100755 index a66ca82d..00000000 --- a/test/e2e/snapshot/e2e.sh +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/env bash - -# snapshot: basic e2e tests - -THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -ROOT_DIR="$THIS_DIR/../../../.." -SCRIPTS_DIR="$ROOT_DIR/scripts" - -source "$SCRIPTS_DIR/lib/common.sh" -source "$SCRIPTS_DIR/lib/k8s.sh" -source "$SCRIPTS_DIR/lib/testutil.sh" -source "$SCRIPTS_DIR/lib/aws/elasticache.sh" - -check_is_installed jq "Please install jq before running this script." - -test_name="$( filenoext "${BASH_SOURCE[0]}" )" -ack_ctrl_pod_id=$( controller_pod_id ) -debug_msg "executing test group: $service_name/$test_name------------------------------" -debug_msg "selected AWS region: $AWS_REGION" - -# basic test covering the four snapshot APIs -test_snapshot_CRUD() { - debug_msg "executing ${FUNCNAME[0]}" - - # delete snapshots if they already exist - no need to wait due to upcoming replication group wait - local snapshot_name="snapshot-test" - local copied_snapshot_name="snapshot-copy" - daws elasticache delete-snapshot --snapshot-name "$snapshot_name" 1>/dev/null 2>&1 - daws elasticache delete-snapshot --snapshot-name "$copied_snapshot_name" 1>/dev/null 2>&1 - - # delete replication group if it already exists (we want it to be created to below specification) - clear_rg_parameter_variables - rg_id="rg-snapshot-test" # non-local because for now, provide_replication_group_yaml uses unscoped variables - daws elasticache describe-replication-groups --replication-group-id "$rg_id" 1>/dev/null 2>&1 - if [[ "$?" == "0" ]]; then - daws elasticache delete-replication-group --replication-group-id "$rg_id" 1>/dev/null 2>&1 - aws_wait_replication_group_deleted "$rg_id" "FAIL: expected replication group $rg_id to have been deleted in ${service_name}" - fi - - # create replication group for snapshot - num_node_groups=1 - replicas_per_node_group=0 - automatic_failover_enabled="false" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - wait_and_assert_replication_group_synced_and_available "$rg_id" - - # proceed to CRUD test: create first snapshot - local cc_id="$rg_id-001" - local snapshot_yaml=$(cat </dev/null 2>&1 - if [[ "$?" == "0" ]]; then - daws elasticache delete-replication-group --replication-group-id "$rg_id" 1>/dev/null 2>&1 - aws_wait_replication_group_deleted "$rg_id" "FAIL: expected replication group $rg_id to have been deleted in ${service_name}" - fi - - # create cluster mode disabled replication group for snapshot - num_node_groups=1 - replicas_per_node_group=0 - automatic_failover_enabled="false" - multi_az_enabled="false" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - wait_and_assert_replication_group_synced_and_available "$rg_id" - - # case 1: specify only the replication group - should fail as RG snapshot not permitted for CMD RG - local snapshot_name="snapshot-cmd" - daws elasticache delete-snapshot --snapshot-name "$snapshot_name" 1>/dev/null 2>&1 - sleep 10 - local snapshot_yaml=$(cat </dev/null 2>&1 - sleep 10 - local cc_id="$rg_id-001" - local snapshot_yaml=$(cat </dev/null 2>&1 - if [[ "$?" == "0" ]]; then - daws elasticache delete-replication-group --replication-group-id "$rg_id" 1>/dev/null 2>&1 - aws_wait_replication_group_deleted "$rg_id" "FAIL: expected replication group $rg_id to have been deleted in ${service_name}" - fi - - # create cluster mode enabled replication group for snapshot - num_node_groups=2 - replicas_per_node_group=1 - automatic_failover_enabled="true" - multi_az_enabled="true" - provide_replication_group_yaml | kubectl apply -f - 2>&1 - exit_if_rg_config_application_failed $? "$rg_id" - wait_and_assert_replication_group_synced_and_available "$rg_id" - - # case 1: specify only RG - local snapshot_name="snapshot-cme" - daws elasticache delete-snapshot --snapshot-name "$snapshot_name" 1>/dev/null 2>&1 - sleep 10 - local snapshot_yaml=$(cat </dev/null 2>&1 - sleep 10 - local cc_id="$rg_id-0001-001" # Replication group has two node group, picking first node group. - local snapshot_yaml=$(cat <cleanup() invokes patchResource() - # assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False", wait_periods=1) - rg_deletion_waiter.wait(ReplicationGroupId=rg_deletion_input["RG_ID"]) + rg_deletion_waiter.wait(ReplicationGroupId=input_dict["RG_ID"]) diff --git a/test/e2e/tests/test_replicationgroup_largecluster.py b/test/e2e/tests/test_replicationgroup_largecluster.py deleted file mode 100644 index ff056b68..00000000 --- a/test/e2e/tests/test_replicationgroup_largecluster.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Large cluster test for replication group resource -""" - -import pytest - -from time import sleep -from acktest.k8s import resource as k8s - -from e2e.tests.test_replicationgroup import make_replication_group, rg_deletion_waiter, make_rg_name, DEFAULT_WAIT_SECS -from e2e.util import provide_node_group_configuration - -@pytest.fixture(scope="module") -def rg_largecluster_input(make_rg_name): - return { - "RG_ID": make_rg_name("rg-large-cluster"), - "NUM_NODE_GROUPS": "125", - "REPLICAS_PER_NODE_GROUP": "3" - } - -@pytest.fixture(scope="module") -def rg_largecluster(rg_largecluster_input, make_replication_group, rg_deletion_waiter): - input_dict = rg_largecluster_input - - (reference, resource) = make_replication_group("replicationgroup_largecluster", input_dict, input_dict["RG_ID"]) - yield (reference, resource) - - # teardown - k8s.delete_custom_resource(reference) - sleep(DEFAULT_WAIT_SECS) - rg_deletion_waiter.wait(ReplicationGroupId=input_dict["RG_ID"]) - -class TestReplicationGroupLargeCluster: - - @pytest.mark.slow - def test_rg_largecluster(self, rg_largecluster_input, rg_largecluster): - (reference, _) = rg_largecluster - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=240) - - # assertions after initial creation - desired_node_groups = int(rg_largecluster_input['NUM_NODE_GROUPS']) - desired_replica_count = int(rg_largecluster_input['REPLICAS_PER_NODE_GROUP']) - desired_total_nodes = (desired_node_groups * (1 + desired_replica_count)) - resource = k8s.get_resource(reference) - assert resource['status']['status'] == "available" - assert len(resource['status']['nodeGroups']) == desired_node_groups - assert len(resource['status']['memberClusters']) == desired_total_nodes - - # update, wait for resource to sync - desired_node_groups = desired_node_groups - 10 - desired_total_nodes = (desired_node_groups * (1 + desired_replica_count)) - patch = {"spec": {"numNodeGroups": desired_node_groups, - "nodeGroupConfiguration": provide_node_group_configuration(desired_node_groups)}} - _ = k8s.patch_custom_resource(reference, patch) - sleep(DEFAULT_WAIT_SECS) # required as controller has likely not placed the resource in modifying - assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True", wait_periods=240) - - # assert new state after scaling in - resource = k8s.get_resource(reference) - assert resource['status']['status'] == "available" - assert len(resource['status']['nodeGroups']) == desired_node_groups - assert len(resource['status']['memberClusters']) == desired_total_nodes \ No newline at end of file diff --git a/test/e2e/tests/test_scenarios.py b/test/e2e/tests/test_scenarios.py deleted file mode 100644 index bbaf37c4..00000000 --- a/test/e2e/tests/test_scenarios.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -""" -Tests for custom resources. -Uses declarative tests framework for custom resources. - -To add test: add scenario yaml to scenarios/ directory. -""" - -from e2e.declarative_test_fwk import runner, loader, helper - -import pytest -import boto3 -import logging - -from e2e import service_marker, scenarios_directory, resource_directory, CRD_VERSION, CRD_GROUP, SERVICE_NAME -from e2e.bootstrap_resources import get_bootstrap_resources - -from acktest.k8s import resource as k8s - - -@helper.register_resource_helper(resource_kind="ReplicationGroup", resource_plural="ReplicationGroups") -class ReplicationGroupHelper(helper.ResourceHelper): - """ - Helper for replication group scenarios. - Overrides methods as required for custom resources. - """ - - def wait_for_delete(self, reference: k8s.CustomResourceReference): - logging.debug(f"ReplicationGroupHelper - wait_for_delete()") - ec = boto3.client("elasticache") - waiter = ec.get_waiter('replication_group_deleted') - # throws exception if wait fails - waiter.wait(ReplicationGroupId=reference.name) - - -@pytest.fixture(scope="session") -def input_replacements(): - """ - provides input replacements for test scenarios. - """ - resource_replacements = get_bootstrap_resources().replacement_dict() - replacements = { - "CRD_VERSION": CRD_VERSION, - "CRD_GROUP": CRD_GROUP, - "SERVICE_NAME": SERVICE_NAME - } - yield {**resource_replacements, **replacements} - - -@pytest.fixture(params=loader.list_scenarios(scenarios_directory), ids=loader.idfn) -def scenario(request, input_replacements): - """ - Parameterized pytest fixture - Provides test scenarios to execute - Supports parallel execution of test scenarios - """ - scenario_file_path = request.param - scenario = loader.load_scenario(scenario_file_path, resource_directory, input_replacements) - yield scenario - runner.teardown(scenario) - - -@service_marker -class TestScenarios: - """ - Declarative scenarios based test suite - """ - def test_scenario(self, scenario): - runner.run(scenario) diff --git a/test/e2e/tests/test_serverless_cache.py b/test/e2e/tests/test_serverless_cache.py new file mode 100644 index 00000000..e25078fa --- /dev/null +++ b/test/e2e/tests/test_serverless_cache.py @@ -0,0 +1,223 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Integration tests for the Elasticache ServerlessCache resource +""" + +import pytest +import boto3 +import logging +from time import sleep + +from acktest.resources import random_suffix_name +from acktest.k8s import resource as k8s +from acktest.k8s import condition +from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_elasticache_resource +from e2e.replacement_values import REPLACEMENT_VALUES + +RESOURCE_PLURAL = "serverlesscaches" +MODIFY_WAIT_AFTER_SECONDS = 120 +CHECK_STATUS_WAIT_SECONDS = 120 + + +def wait_for_serverless_cache_available(elasticache_client, serverless_cache_name): + """Wait for serverless cache to reach 'available' state using boto3 waiter. + """ + waiter = elasticache_client.get_waiter('serverless_cache_available') + waiter.config.delay = 5 + waiter.config.max_attempts = 240 + waiter.wait(ServerlessCacheName=serverless_cache_name) + + +def wait_until_deleted(elasticache_client, serverless_cache_name): + """Wait for serverless cache to be fully deleted using boto3 waiter. + """ + waiter = elasticache_client.get_waiter('serverless_cache_deleted') + waiter.config.delay = 5 + waiter.config.max_attempts = 240 + waiter.wait(ServerlessCacheName=serverless_cache_name) + + +def get_and_assert_status(ref: k8s.CustomResourceReference, expected_status: str, expected_synced: bool): + """Get the serverless cache status and assert it matches the expected status. + """ + cr = k8s.get_resource(ref) + assert cr is not None + assert 'status' in cr + + assert cr['status']['status'] == expected_status + + if expected_synced: + condition.assert_synced(ref) + else: + condition.assert_not_synced(ref) + + +@pytest.fixture(scope="module") +def elasticache_client(): + return boto3.client('elasticache') + + +def _create_serverless_cache(elasticache_client, name_prefix): + serverless_cache_name = random_suffix_name(name_prefix, 32) + + replacements = REPLACEMENT_VALUES.copy() + replacements["SC_NAME"] = serverless_cache_name + replacements["ENGINE"] = "redis" + replacements["MAJOR_ENGINE_VERSION"] = "7" + replacements["ECPU_MIN"] = "10000" + replacements["ECPU_MAX"] = "100000" + + resource_data = load_elasticache_resource( + "serverless_cache_basic", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + serverless_cache_name, namespace="default", + ) + _ = k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + return ref, cr + + +@pytest.fixture +def simple_serverless_cache(elasticache_client): + ref, cr = _create_serverless_cache(elasticache_client, "simple-serverless-cache") + yield ref, cr + + # Teardown + _ = k8s.delete_custom_resource(ref) + try: + serverless_cache_name = cr["spec"]["serverlessCacheName"] + wait_until_deleted(elasticache_client, serverless_cache_name) + except Exception as e: + logging.warning(f"Failed to wait for serverless cache deletion: {e}") + + +@pytest.fixture +def upgrade_serverless_cache(elasticache_client): + ref, cr = _create_serverless_cache(elasticache_client, "upgrade-serverless-cache") + yield ref, cr + + # Teardown + _ = k8s.delete_custom_resource(ref) + try: + serverless_cache_name = cr["spec"]["serverlessCacheName"] + wait_until_deleted(elasticache_client, serverless_cache_name) + except Exception as e: + logging.warning(f"Failed to wait for serverless cache deletion: {e}") + + +@service_marker +class TestServerlessCache: + def test_create_update_delete_serverless_cache(self, simple_serverless_cache, elasticache_client): + (ref, _) = simple_serverless_cache + + assert k8s.wait_on_condition( + ref, "ACK.ResourceSynced", "True", wait_periods=90 + ) + get_and_assert_status(ref, "available", True) + + cr = k8s.get_resource(ref) + serverless_cache_name = cr["spec"]["serverlessCacheName"] + + try: + wait_for_serverless_cache_available(elasticache_client, serverless_cache_name) + except Exception as e: + logging.warning(f"Failed to wait for serverless cache availability: {e}") + + # Test update - modify description, change max to 90000, and add a tag + new_description = "Updated serverless cache description" + patch = { + "spec": { + "description": new_description, + "cacheUsageLimits": { + "eCPUPerSecond": { + "minimum": 10000, + "maximum": 90000 + } + }, + "tags": [ + {"key": "Environment", "value": "test"} + ] + } + } + _ = k8s.patch_custom_resource(ref, patch) + sleep(MODIFY_WAIT_AFTER_SECONDS) + + # Wait for update to be synced + assert k8s.wait_on_condition( + ref, "ACK.ResourceSynced", "True", wait_periods=90 + ) + + # Verify the update was applied + cr = k8s.get_resource(ref) + assert cr["spec"]["description"] == new_description + assert cr["spec"]["cacheUsageLimits"]["eCPUPerSecond"]["maximum"] == 90000 + assert len(cr["spec"]["tags"]) == 1 + assert cr["spec"]["tags"][0]["key"] == "Environment" + assert cr["spec"]["tags"][0]["value"] == "test" + + def test_upgrade_redis_to_valkey(self, upgrade_serverless_cache, elasticache_client): + (ref, _) = upgrade_serverless_cache + + # Wait for the serverless cache to be created and become available + assert k8s.wait_on_condition( + ref, "ACK.ResourceSynced", "True", wait_periods=90 + ) + get_and_assert_status(ref, "available", True) + + cr = k8s.get_resource(ref) + serverless_cache_name = cr["spec"]["serverlessCacheName"] + + # Verify initial state - Redis 7 + assert cr["spec"]["engine"] == "redis" + assert cr["spec"]["majorEngineVersion"] == "7" + + try: + wait_for_serverless_cache_available(elasticache_client, serverless_cache_name) + except Exception as e: + logging.warning(f"Failed to wait for serverless cache availability: {e}") + + # Upgrade from Redis 7 to Valkey 8 + patch = { + "spec": { + "engine": "valkey", + "majorEngineVersion": "8" + } + } + _ = k8s.patch_custom_resource(ref, patch) + sleep(MODIFY_WAIT_AFTER_SECONDS) + + # Wait for upgrade to be synced + assert k8s.wait_on_condition( + ref, "ACK.ResourceSynced", "True", wait_periods=90 + ) + + # Wait for it to be available again after upgrade + get_and_assert_status(ref, "available", True) + + try: + wait_for_serverless_cache_available(elasticache_client, serverless_cache_name) + except Exception as e: + logging.warning(f"Failed to wait for serverless cache availability after upgrade: {e}") + + # Verify the upgrade was applied + cr = k8s.get_resource(ref) + assert cr["spec"]["engine"] == "valkey" + assert cr["spec"]["majorEngineVersion"] == "8" \ No newline at end of file diff --git a/test/e2e/tests/test_serverless_cache_snapshot.py b/test/e2e/tests/test_serverless_cache_snapshot.py new file mode 100644 index 00000000..c645d34f --- /dev/null +++ b/test/e2e/tests/test_serverless_cache_snapshot.py @@ -0,0 +1,139 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Integration tests for the Elasticache ServerlessCacheSnapshot resource +""" + +import pytest +import boto3 +import logging +import time + +from acktest.resources import random_suffix_name +from acktest.k8s import resource as k8s +from acktest import tags as tagutil +from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_elasticache_resource +from e2e.replacement_values import REPLACEMENT_VALUES + +RESOURCE_PLURAL = "serverlesscachesnapshots" +SERVERLESS_CACHE_PLURAL = "serverlesscaches" +UPDATE_WAIT_SECS = 180 + + +@pytest.fixture(scope="module") +def elasticache_client(): + return boto3.client('elasticache') + + +@pytest.fixture +def serverless_cache_for_snapshot(elasticache_client): + """Fixture to create a serverless cache for snapshot testing""" + serverless_cache_name = random_suffix_name("snapshot-test-sc", 32) + + replacements = REPLACEMENT_VALUES.copy() + replacements["SC_NAME"] = serverless_cache_name + replacements["ENGINE"] = "redis" + replacements["MAJOR_ENGINE_VERSION"] = "7" + replacements["ECPU_MIN"] = "10000" + replacements["ECPU_MAX"] = "100000" + + resource_data = load_elasticache_resource( + "serverless_cache_basic", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, SERVERLESS_CACHE_PLURAL, + serverless_cache_name, namespace="default", + ) + _ = k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + + # Wait for serverless cache to be available + assert k8s.wait_on_condition( + ref, "ACK.ResourceSynced", "True", wait_periods=90 + ) + + yield ref, cr + + # Teardown + _ = k8s.delete_custom_resource(ref) + + +@pytest.fixture +def simple_serverless_cache_snapshot(elasticache_client, serverless_cache_for_snapshot): + """Fixture to create a simple serverless cache snapshot for testing""" + sc_ref, sc_cr = serverless_cache_for_snapshot + serverless_cache_name = sc_cr["spec"]["serverlessCacheName"] + + snapshot_name = random_suffix_name("simple-snapshot", 32) + + replacements = REPLACEMENT_VALUES.copy() + replacements["SNAPSHOT_NAME"] = snapshot_name + replacements["SC_NAME"] = serverless_cache_name + + resource_data = load_elasticache_resource( + "serverless_cache_snapshot_basic", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + snapshot_name, namespace="default", + ) + _ = k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + yield ref, cr + + # Teardown + _ = k8s.delete_custom_resource(ref) + + +@service_marker +class TestServerlessCacheSnapshot: + def test_create_delete_serverless_cache_snapshot(self, simple_serverless_cache_snapshot, elasticache_client): + """Test basic creation and deletion of a serverless cache snapshot""" + (ref, _) = simple_serverless_cache_snapshot + + assert k8s.wait_on_condition( + ref, "ACK.ResourceSynced", "True", wait_periods=120 + ) + + tag_updates = { + "spec": { + "tags": [ + {"key": "Environment", "value": "test"}, + {"key": "Purpose", "value": "e2e-testing"} + ] + } + } + + k8s.patch_custom_resource(ref, tag_updates) + + time.sleep(UPDATE_WAIT_SECS) + + final_cr = k8s.get_resource(ref) + snapshot_arn = final_cr['status']['ackResourceMetadata']['arn'] + + tag_list = elasticache_client.list_tags_for_resource(ResourceName=snapshot_arn) + aws_tags = tagutil.clean(tag_list['TagList']) + + expected_tags = [{"Key": "Environment", "Value": "test"}, {"Key": "Purpose", "Value": "e2e-testing"}] + assert len(aws_tags) == 2 + assert aws_tags == expected_tags \ No newline at end of file diff --git a/test/e2e/tests/test_user.py b/test/e2e/tests/test_user.py index f85a1852..b0f5a9e4 100644 --- a/test/e2e/tests/test_user.py +++ b/test/e2e/tests/test_user.py @@ -128,8 +128,6 @@ def test_user_nopass(self, user_nopass, user_nopass_input): resource = k8s.get_resource(reference) assert resource["status"]["lastRequestedAccessString"] == new_access_string - #TODO: add terminal condition checks - # test creation with Passwords specified (as k8s secrets) def test_user_password(self, user_password, user_password_input): (reference, resource) = user_password diff --git a/test/e2e/util.py b/test/e2e/util.py index c01c8a59..a1412256 100644 --- a/test/e2e/util.py +++ b/test/e2e/util.py @@ -22,8 +22,8 @@ def wait_usergroup_active(usergroup_id: str, - wait_periods: int = 10, - period_length: int = 60) -> bool: + wait_periods: int = 10, + period_length: int = 60) -> bool: for i in range(wait_periods): logging.debug(f"Waiting for user group {usergroup_id} to be active ({i})") response = ec.describe_user_groups(UserGroupId=usergroup_id) @@ -91,16 +91,6 @@ def assert_user_deletion(user_id: str): except ec.exceptions.UserNotFoundFault: pass # we only expect this particular exception (if deletion has already completed) - -# given "rg" (the k8s object representing a replication group), assert that: -# 1) there are non-zero amount of node groups -# 2) the number of replicas in every node group equals desired_replica_count -def assert_even_shards_replica_count(rg, desired_replica_count): - assert len(rg['status']['nodeGroups']) != 0 - for ng in rg['status']['nodeGroups']: - assert len(ng['nodeGroupMembers']) == (desired_replica_count + 1) - - # TODO: move to common repository # given the latest state of the resource, assert that the terminal condition is set def assert_terminal_condition_set(resource): @@ -112,6 +102,7 @@ def assert_terminal_condition_set(resource): assert terminal is not None assert terminal['status'] == "True" + # given the latest state of the resource, assert that the recoverable condition is set def assert_recoverable_condition_set(resource): recoverable = None @@ -122,10 +113,11 @@ def assert_recoverable_condition_set(resource): assert recoverable is not None assert recoverable['status'] == "True" + # provide a basic nodeGroupConfiguration object of desired size def provide_node_group_configuration(size: int): ngc = [] - for i in range(1, size+1): + for i in range(1, size + 1): ngc.append({"nodeGroupID": str(i).rjust(4, '0')}) return ngc @@ -143,4 +135,11 @@ def retrieve_cache_cluster(rg_id: str): def retrieve_replication_group(rg_id: str): rg_response = ec.describe_replication_groups(ReplicationGroupId=rg_id) - return rg_response['ReplicationGroups'][0] \ No newline at end of file + return rg_response['ReplicationGroups'][0] + + +def retrieve_replication_group_tags(rg_arn: str): + taglist_response = ec.list_tags_for_resource(ResourceName=rg_arn) + return taglist_response['TagList'] + +