From 80ba57b259e0437f380226897d51e59826f4dd34 Mon Sep 17 00:00:00 2001 From: Wilma Wang Date: Tue, 3 Sep 2024 10:55:40 -0700 Subject: [PATCH 1/4] fix: wait for async instance update --- .../resource_ibm_resource_instance.go | 48 +++++++++++-------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/ibm/service/resourcecontroller/resource_ibm_resource_instance.go b/ibm/service/resourcecontroller/resource_ibm_resource_instance.go index 26cadf6549..9ea23f88f1 100644 --- a/ibm/service/resourcecontroller/resource_ibm_resource_instance.go +++ b/ibm/service/resourcecontroller/resource_ibm_resource_instance.go @@ -15,7 +15,7 @@ import ( rc "github.com/IBM/platform-services-go-sdk/resourcecontrollerv2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/IBM-Cloud/bluemix-go/models" @@ -25,13 +25,14 @@ import ( ) const ( - RsInstanceSuccessStatus = "active" - RsInstanceProgressStatus = "in progress" - RsInstanceProvisioningStatus = "provisioning" - RsInstanceInactiveStatus = "inactive" - RsInstanceFailStatus = "failed" - RsInstanceRemovedStatus = "removed" - RsInstanceReclamation = "pending_reclamation" + RsInstanceSuccessStatus = "active" + RsInstanceProgressStatus = "in progress" + RsInstanceProvisioningStatus = "provisioning" + RsInstanceInactiveStatus = "inactive" + RsInstanceFailStatus = "failed" + RsInstanceRemovedStatus = "removed" + RsInstanceReclamation = "pending_reclamation" + RsInstanceUpdateSuccessStatus = "succeeded" ) func ResourceIBMResourceInstance() *schema.Resource { @@ -855,7 +856,7 @@ func waitForResourceInstanceCreate(d *schema.ResourceData, meta interface{}) (in ID: &instanceID, } - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{RsInstanceProgressStatus, RsInstanceInactiveStatus, RsInstanceProvisioningStatus}, Target: []string{RsInstanceSuccessStatus}, Refresh: func() (interface{}, string, error) { @@ -867,7 +868,7 @@ func waitForResourceInstanceCreate(d *schema.ResourceData, meta interface{}) (in return nil, "", fmt.Errorf("[ERROR] Get the resource instance %s failed with resp code: %s, err: %v", d.Id(), resp, err) } if *instance.State == RsInstanceFailStatus { - return instance, *instance.State, fmt.Errorf("[ERROR] The resource instance %s failed: %v", d.Id(), err) + return instance, *instance.State, fmt.Errorf("[ERROR] The resource instance '%s' creation failed: %v", d.Id(), err) } return instance, *instance.State, nil }, @@ -876,7 +877,7 @@ func waitForResourceInstanceCreate(d *schema.ResourceData, meta interface{}) (in MinTimeout: 10 * time.Second, } - return stateConf.WaitForState() + return stateConf.WaitForStateContext(context.Background()) } func waitForResourceInstanceUpdate(d *schema.ResourceData, meta interface{}) (interface{}, error) { @@ -889,9 +890,9 @@ func waitForResourceInstanceUpdate(d *schema.ResourceData, meta interface{}) (in ID: &instanceID, } - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{RsInstanceProgressStatus, RsInstanceInactiveStatus}, - Target: []string{RsInstanceSuccessStatus}, + Target: []string{RsInstanceSuccessStatus, RsInstanceUpdateSuccessStatus}, Refresh: func() (interface{}, string, error) { instance, resp, err := rsConClient.GetResourceInstance(&resourceInstanceGet) if err != nil { @@ -900,17 +901,24 @@ func waitForResourceInstanceUpdate(d *schema.ResourceData, meta interface{}) (in } return nil, "", fmt.Errorf("[ERROR] Get the resource instance %s failed with resp code: %s, err: %v", d.Id(), resp, err) } - if *instance.State == RsInstanceFailStatus { - return instance, *instance.State, fmt.Errorf("[ERROR] The resource instance %s failed: %v", d.Id(), err) + if *instance.LastOperation.Async { + if *instance.LastOperation.State == RsInstanceFailStatus { + return instance, *instance.LastOperation.State, fmt.Errorf("[ERROR] The resource instance '%s' update failed: %v", d.Id(), err) + } + return instance, *instance.LastOperation.State, nil + } else { + if *instance.State == RsInstanceFailStatus { + return instance, *instance.State, fmt.Errorf("[ERROR] The resource instance '%s' update failed: %v", d.Id(), err) + } + return instance, *instance.State, nil } - return instance, *instance.State, nil }, Timeout: d.Timeout(schema.TimeoutUpdate), Delay: 10 * time.Second, MinTimeout: 10 * time.Second, } - return stateConf.WaitForState() + return stateConf.WaitForStateContext(context.Background()) } func waitForResourceInstanceDelete(d *schema.ResourceData, meta interface{}) (interface{}, error) { @@ -922,7 +930,7 @@ func waitForResourceInstanceDelete(d *schema.ResourceData, meta interface{}) (in resourceInstanceGet := rc.GetResourceInstanceOptions{ ID: &instanceID, } - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{RsInstanceProgressStatus, RsInstanceInactiveStatus, RsInstanceSuccessStatus}, Target: []string{RsInstanceRemovedStatus, RsInstanceReclamation}, Refresh: func() (interface{}, string, error) { @@ -934,7 +942,7 @@ func waitForResourceInstanceDelete(d *schema.ResourceData, meta interface{}) (in return nil, "", fmt.Errorf("[ERROR] Get the resource instance %s failed with resp code: %s, err: %v", d.Id(), resp, err) } if *instance.State == RsInstanceFailStatus { - return instance, *instance.State, fmt.Errorf("[ERROR] The resource instance %s failed to delete: %v", d.Id(), err) + return instance, *instance.State, fmt.Errorf("[ERROR] The resource instance '%s' deletion failed: %v", d.Id(), err) } return instance, *instance.State, nil }, @@ -943,7 +951,7 @@ func waitForResourceInstanceDelete(d *schema.ResourceData, meta interface{}) (in MinTimeout: 10 * time.Second, } - return stateConf.WaitForState() + return stateConf.WaitForStateContext(context.Background()) } func FilterDeployments(deployments []models.ServiceDeployment, location string) ([]models.ServiceDeployment, map[string]bool) { From f78fb641d7f528df537532695c818400f2c4fbea Mon Sep 17 00:00:00 2001 From: Kenneth Cox Date: Mon, 2 Sep 2024 19:30:24 -0500 Subject: [PATCH 2/4] feat: document tagging and enhanced metrics --- examples/ibm-event-streams/README.md | 194 ++++++++++++++------ examples/ibm-event-streams/main.tf | 108 +++++------ examples/ibm-event-streams/provider.tf | 6 +- examples/ibm-event-streams/terraform.tfvars | 4 +- 4 files changed, 197 insertions(+), 115 deletions(-) diff --git a/examples/ibm-event-streams/README.md b/examples/ibm-event-streams/README.md index 7a1aa0679e..2e3dab13b1 100644 --- a/examples/ibm-event-streams/README.md +++ b/examples/ibm-event-streams/README.md @@ -1,70 +1,101 @@ # IBM Event Streams examples -This example shows 3 usage scenarios. +This example shows several Event Streams usage scenarios. -#### Scenario 1: Create an Event Streams service instance and topic. +## Creating Event Streams instances + +Event Streams service instances are created with the `"ibm_resource_instance"` resource type. + +The following `"ibm_resource_instance"` arguments are required: + +- `name`: The service instance name, as it will appear in the Event Streams UI and CLI. + +- `service`: Use `"messagehub"` for an Event Streams instance. + +- `plan`: One of `"lite"`, `"standard"`, or `"enterprise-3nodes-2tb"`. For more information about the plans, see [Choosing your plan](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-plan_choose). Note: `"enterprise-3nodes-2tb"` selects the Enterprise plan. + +- `location`: The region where the service instance will be provisioned. For a list of regions, see [Region and data center locations](https://cloud.ibm.com/docs/overview?topic=overview-locations). + +- `resource_group_id`: The ID of the resource group in which the instance will be provisioned. For more information about resource groups, see [Managing resource groups](https://cloud.ibm.com/docs/account?topic=account-rgs). + +The `parameters` argument is optional and provides additional provision or update options. Supported parameters are: + +- `throughput`: One of `"150"` (the default), `"300"`, `"450"`. The maximum capacity in MB/s for producing or consuming messages. For more information see [Scaling Enterprise plan capacity](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-ES_scaling_capacity). *Note:* See [Scaling combinations](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-ES_scaling_capacity#ES_scaling_combinations) for allowed combinations of `throughput` and `storage_size`. + - Example: `throughput = "300"` + +- `storage_size`: One of `"2048"` (the default), `"4096"`, `"6144"`, `"8192"`, `"10240"`, or `"12288"`. The amount of storage capacity in GB. For more information see [Scaling Enterprise plan capacity](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-ES_scaling_capacity). *Note:* See [Scaling combinations](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-ES_scaling_capacity#ES_scaling_combinations) for allowed combinations of `throughput` and `storage_size`. + - Example: `storage_size = "4096"` + +- `service-endpoints`: One of `"public"` (the default), `"private"`, or `"public-and-private"`. For enterprise instance only. For more information see [Restricting network access](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-restrict_access). + - Example: `service-endpoints = "private"` + +- `private_ip_allowlist`: **Deprecated** An array of CIDRs specifying a private IP allowlist. For enterprise instance only. For more information see [Specifying an IP allowlist](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-restrict_access#specify_allowlist). This feature has been deprecated in favor of context-based restrictions. + - Example: `private_ip_allowlist = "[10.0.0.0/32,10.0.0.1/32]"` + +- `metrics`: An array of strings, allowed values are `"topic"`, `"partition"`, and `"consumers"`. Enables additional enhanced metrics for the instance. For enterprise instance only. For more information on enhanced metrics, see [Enabling enhanced Event Streams metrics](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-metrics#opt_in_enhanced_metrics). + - Example: `metrics = "[topic,partition]"` + +- `kms_key_crn`: The CRN (as a string) of a customer-managed root key provisioned with an IBM Cloud Key Protect or Hyper Protect Crypto Service. If provided, this key is used to encrypt all data at rest. For more information on customer-managed encryption, see [Managing encryption in Event Streams](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-managing_encryption). + - Example: `kms_key_crn = "crn:v1:prod:public:kms:us-south:a/6db1b0d0b5c54ee5c201552547febcd8:20adf7eb-e095-4dec-08cf-0b7d81e32db6:key:3fa9d921-d3b6-3516-a1ec-d54e27e7638b"` + +The `timeouts` argument is used to specify how long the IBM Cloud terraform provider will wait for the provision, update, or deprovision of the service instance. Values of 15 minutes are sufficient for standard and lite plans. For enterprise plans: +- Use "3h" for create. Add an additional 1 hour for each level of non-default throughput, and an additional 30 minutes for each level of non-default storage size. For example with `throughput = "300"` (one level over default) and `storage_size = "8192"` (three levels over default), use 3 hours + 1 * 1 hour + 3 * 30 minutes = 5.5 hours. +- Use "1h" for update. If increasing the throughput or storage size, add an additional 1 hour for each level of non-default throughput, and an additional 30 minutes for each level of non-default storage size. +- Use "1h" for delete. + +## Scenarios + +#### Scenario 1: Create an Event Streams standard-plan service instance. + +This creates a standard plan instance in us-south. ```terraform resource "ibm_resource_instance" "es_instance_1" { name = "terraform-integration-1" service = "messagehub" - plan = "standard" # "lite", "enterprise-3nodes-2tb" - location = "us-south" # "us-east", "eu-gb", "eu-de", "jp-tok", "au-syd" + plan = "standard" + location = "us-south" resource_group_id = data.ibm_resource_group.group.id - # parameters = { - # service-endpoints = "private" # for enterprise instance only, Options are: "public", "public-and-private", "private". Default is "public" when not specified. - # private_ip_allowlist = "[10.0.0.0/32,10.0.0.1/32]" # for enterprise instance only. Specify 1 or more IP range in CIDR format - # # document about using private service endpoint and IP allowlist to restrict access: https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-restrict_access - - # throughput = "150" # for enterprise instance only. Options are: "150", "300", "450". Default is "150" when not specified. - # storage_size = "2048" # for enterprise instance only. Options are: "2048", "4096", "6144", "8192", "10240", "12288". Default is "2048" when not specified. - # # Note: when throughput is "300", storage_size starts from "4096", when throughput is "450", storage_size starts from "6144" - # # document about supported combinations of throughput and storage_size: https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-ES_scaling_capacity#ES_scaling_combinations - # } - - # timeouts { - # create = "15m" # use 3h when creating enterprise instance, add additional 1h for each level of non-default throughput, add additional 30m for each level of non-default storage_size - # update = "15m" # use 1h when updating enterprise instance, add additional 1h for each level of non-default throughput, add additional 30m for each level of non-default storage_size - # delete = "15m" - # } -} - -resource "ibm_event_streams_topic" "es_topic_1" { - resource_instance_id = ibm_resource_instance.es_instance_1.id - name = "my-es-topic" - partitions = 1 - config = { - "cleanup.policy" = "compact,delete" - "retention.ms" = "86400000" - "retention.bytes" = "1073741824" - "segment.bytes" = "536870912" + timeouts { + create = "15m" + update = "15m" + delete = "15m" } } ``` -#### Scenario 2: Create a topic on an existing Event Streams instance. +#### Scenario 2: Create an Event Streams enterprise service instance with non-default attributes + +This creates an enterprise plan instance in us-east with 300 MB/s throughput, 4 TB storage, private endpoints with an allowlist, and enhanced metrics for topics and consumer groups. The timeouts are calculated as described above. ```terraform -data "ibm_resource_instance" "es_instance_2" { +resource "ibm_resource_instance" "es_instance_2" { name = "terraform-integration-2" + service = "messagehub" + plan = "enterprise-3nodes-2tb" + location = "us-east" resource_group_id = data.ibm_resource_group.group.id -} -resource "ibm_event_streams_topic" "es_topic_2" { - resource_instance_id = data.ibm_resource_instance.es_instance_2.id - name = "my-es-topic" - partitions = 1 - config = { - "cleanup.policy" = "compact,delete" - "retention.ms" = "86400000" - "retention.bytes" = "1073741824" - "segment.bytes" = "536870912" + parameters = { + throughput = "300" + storage_size = "4096" + service-endpoints = "private" + private_ip_allowlist = "[10.0.0.0/32,10.0.0.1/32]" + metrics = "[topic,consumers]" + } + + timeouts { + create = "330m" # 5.5h + update = "210m" # 3.5h + delete = "1h" } } ``` -#### Scenario 3: Create a kafka consumer application connecting to an existing Event Streams instance and its topics. +#### Scenario 3: Create a topic on an existing Event Streams instance. + +For more information on topics and topic parameters, see [Topics and partitions](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-apache_kafka&interface=ui#kafka_topics_partitions) and [Using the administration Kafka Java client API](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-kafka_java_api). ```terraform data "ibm_resource_instance" "es_instance_3" { @@ -72,20 +103,23 @@ data "ibm_resource_instance" "es_instance_3" { resource_group_id = data.ibm_resource_group.group.id } -data "ibm_event_streams_topic" "es_topic_3" { +resource "ibm_event_streams_topic" "es_topic_3" { resource_instance_id = data.ibm_resource_instance.es_instance_3.id name = "my-es-topic" -} - -resource "kafka_consumer_app" "es_kafka_app" { - bootstrap_server = lookup(data.ibm_resource_instance.es_instance_3.extensions, "kafka_brokers_sasl", []) - topics = [data.ibm_event_streams_topic.es_topic_3.name] - apikey = var.es_reader_api_key + partitions = 1 + config = { + "cleanup.policy" = "compact,delete" + "retention.ms" = "86400000" + "retention.bytes" = "1073741824" + "segment.bytes" = "536870912" + } } ``` #### Scenario 4: Create a schema on an existing Event Streams Enterprise instance +For more information on the Event Streams schema registry, see [Using Event Streams Schema Registry](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-ES_schema_registry). + ```terraform data "ibm_resource_instance" "es_instance_4" { name = "terraform-integration-4" @@ -108,6 +142,60 @@ resource "ibm_event_streams_schema" "es_schema" { } ``` +#### Scenario 5: Apply access tags to an Event Streams service instance + +Tags are applied using the `"ibm_resource_tag"` terraform resource. +For more information about tagging, see the documentation for the `"ibm_resource_tag"` resource and [Tagging](https://cloud.ibm.com/apidocs/tagging). + +```terraform +data "ibm_resource_instance" "es_instance_5" { + name = "terraform-integration-5" + resource_group_id = data.ibm_resource_group.group.id +} + +resource "ibm_resource_tag" "tag_example_on_es" { + tags = ["example:tag"] + tag_type = "access" + resource_id = data.ibm_resource_instance.es_instance_5.id +} +``` + +#### Scenario 6: Connect to an existing Event Streams instance and its topics. + +This scenario uses a fictitious `"kafka_consumer_app"` resource to demonstrate how a consumer application could be configured. +The resource uses three configuration properties: + +1. The Kafka broker hostnames used to connect to the service instance. +2. An API key for reading from the topics. +3. The names of the topic(s) which the consumer should read. + +The broker hostnames would be required by any consumer or producer application. After the Event Streams service instance has been created, they are available in the `extensions` attribute of the service instance, as an array named `"kafka_brokers_sasl"`. This is shown in the example. + +An API key would also be required by any application. This key would typically be created with reduced permissions to restrict the operations it can perform, for example only allowing it to read from certain topics. See [Managing authentication to your Event Streams instance](https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-security) for more information on creating keys. The example assumes the key is provided as a terraform variable. + +The topic names can be provided as strings, or can be taken from topic data sources as shown in the example. + +```terraform +# Use an existing instance +data "ibm_resource_instance" "es_instance_6" { + name = "terraform-integration-6" + resource_group_id = data.ibm_resource_group.group.id +} + +# Use an existing topic on that instance +data "ibm_event_streams_topic" "es_topic_6" { + resource_instance_id = data.ibm_resource_instance.es_instance_6.id + name = "my-es-topic" +} + +# The FICTITIOUS consumer application, configured with brokers, API key, and topics +resource "kafka_consumer_app" "es_kafka_app" { + bootstrap_server = lookup(data.ibm_resource_instance.es_instance_4.extensions, "kafka_brokers_sasl", []) + apikey = var.es_reader_api_key + topics = [data.ibm_event_streams_topic.es_topic_4.name] +} +``` + ## Dependencies - The owner of the `ibmcloud_api_key` has permission to create Event Streams instance under specified resource group and has Manager role to the created instance in order to create topic. @@ -116,9 +204,7 @@ resource "ibm_event_streams_schema" "es_schema" { ## Configuration -- `ibmcloud_api_key` - An API key for IBM Cloud services. If you don't have one already, go to https://cloud.ibm.com/iam/#/apikeys and create a new key. - -- `es_reader_api_key` - An service ID API key with reduced permission in scenario 3 if user wish to scope the access to Event Streams instance and topics. +- `ibmcloud_api_key` - An API key for IBM Cloud services. If you don't have one already, go to https://cloud.ibm.com/iam/apikeys and create a new key. ## Running the configuration diff --git a/examples/ibm-event-streams/main.tf b/examples/ibm-event-streams/main.tf index 44c3e91214..a16618467b 100644 --- a/examples/ibm-event-streams/main.tf +++ b/examples/ibm-event-streams/main.tf @@ -1,83 +1,72 @@ +# This is not functional terraform code. It is intended as a template for users to remove +# unneeded scenarios and edit the other sections. + +# Replace the resource group name with the one in which your resources should be created data "ibm_resource_group" "group" { name = "Default" } -#### Scenario 1: Create Event Streams service instance and topic +#### Scenario 1: Create an Event Streams standard-plan service instance. resource "ibm_resource_instance" "es_instance_1" { name = "terraform-integration-1" service = "messagehub" - plan = "standard" # "lite", "enterprise-3nodes-2tb" - location = "us-south" # "us-east", "eu-gb", "eu-de", "jp-tok", "au-syd" + plan = "standard" + location = "us-south" resource_group_id = data.ibm_resource_group.group.id - # parameters = { - # service-endpoints = "private" # for enterprise instance only, Options are: "public", "public-and-private", "private". Default is "public" when not specified. - # private_ip_allowlist = "[10.0.0.0/32,10.0.0.1/32]" # for enterprise instance only. Specify 1 or more IP range in CIDR format - # # document about using private service endpoint and IP allowlist to restrict access: https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-restrict_access - - # throughput = "150" # for enterprise instance only. Options are: "150", "300", "450". Default is "150" when not specified. - # storage_size = "2048" # for enterprise instance only. Options are: "2048", "4096", "6144", "8192", "10240", "12288". Default is "2048" when not specified. - # kms_key_crn = "crn:v1:bluemix:public:kms:us-south:a/6db1b0d0b5c54ee5c201552547febcd8:0aa69b09-941b-41b2-bbf9-9f9f0f6a6f79:key:dd37a0b6-eff4-4708-8459-e29ae0a8f256" # for enterprise instance only. Specify the CRN of a root key from a Key Management Service instance used to encrypt disks. - # # Note: when throughput is "300", storage_size starts from "4096", when throughput is "450", storage_size starts from "6144" - # # document about supported combinations of throughput and storage_size: https://cloud.ibm.com/docs/EventStreams?topic=EventStreams-ES_scaling_capacity#ES_scaling_combinations - # } - - # timeouts { - # create = "15m" # use 3h when creating enterprise instance, add additional 1h for each level of non-default throughput, add additional 30m for each level of non-default storage_size - # update = "15m" # use 1h when updating enterprise instance, add additional 1h for each level of non-default throughput, add additional 30m for each level of non-default storage_size - # delete = "15m" - # } -} - -resource "ibm_event_streams_topic" "es_topic_1" { - resource_instance_id = ibm_resource_instance.es_instance_1.id - name = "my-es-topic" - partitions = 1 - config = { - "cleanup.policy" = "compact,delete" - "retention.ms" = "86400000" - "retention.bytes" = "1073741824" - "segment.bytes" = "536870912" + timeouts { + create = "15m" + update = "15m" + delete = "15m" } } -#### Scenario 2: Create topic on an existing Event Streams instance -data "ibm_resource_instance" "es_instance_2" { +#### Scenario 2: Create an Event Streams enterprise service instance with non-default attributes +resource "ibm_resource_instance" "es_instance_2" { name = "terraform-integration-2" + service = "messagehub" + plan = "enterprise-3nodes-2tb" + location = "us-east" resource_group_id = data.ibm_resource_group.group.id -} -resource "ibm_event_streams_topic" "es_topic_2" { - resource_instance_id = data.ibm_resource_instance.es_instance_2.id - name = "my-es-topic" - partitions = 1 - config = { - "cleanup.policy" = "compact,delete" - "retention.ms" = "86400000" - "retention.bytes" = "1073741824" - "segment.bytes" = "536870912" + parameters = { + throughput = "300" + storage_size = "4096" + service-endpoints = "private" + private_ip_allowlist = "[10.0.0.0/32,10.0.0.1/32]" + metrics = "[topic,consumers]" + } + + timeouts { + create = "330m" # 5.5h + update = "210m" # 3.5h + delete = "1h" } } -#### Scenario 3: Create a kafka consumer application connecting to an existing Event Streams instance and its topics +#### Scenario 3: Create a topic on an existing Event Streams instance. + +# the existing instance data "ibm_resource_instance" "es_instance_3" { name = "terraform-integration-3" resource_group_id = data.ibm_resource_group.group.id } -data "ibm_event_streams_topic" "es_topic_3" { +resource "ibm_event_streams_topic" "es_topic_3" { resource_instance_id = data.ibm_resource_instance.es_instance_3.id name = "my-es-topic" + partitions = 1 + config = { + "cleanup.policy" = "compact,delete" + "retention.ms" = "86400000" + "retention.bytes" = "1073741824" + "segment.bytes" = "536870912" + } } -resource "kafka_consumer_app" "es_kafka_app" { - bootstrap_server = lookup(data.ibm_resource_instance.es_instance_3.extensions, "kafka_brokers_sasl", []) - topics = [data.ibm_event_streams_topic.es_topic_3.name] - apikey = var.es_reader_api_key -} +#### Scenario 4: Create a schema on an existing Event Streams Enterprise instance -#### Scenario 4 Create a schema on an existing Event Streams Enterprise instance data "ibm_resource_instance" "es_instance_4" { name = "terraform-integration-4" resource_group_id = data.ibm_resource_group.group.id @@ -85,7 +74,7 @@ data "ibm_resource_instance" "es_instance_4" { resource "ibm_event_streams_schema" "es_schema" { resource_instance_id = data.ibm_resource_instance.es_instance_4.id - schema_id = "my-es-schema" + schema_id = "tf_schema" schema = < Date: Wed, 4 Sep 2024 07:46:24 +0200 Subject: [PATCH 3/4] move wait_till logic into function, integrate it into cluster datasource (#5540) * move wait_till logic into function, integrate it into cluster datasource * add state to doc * review comments --------- Co-authored-by: Zoltan Illes --- .../data_source_ibm_container_cluster.go | 43 ++++++ .../data_source_ibm_container_cluster_test.go | 54 +++++++- .../resource_ibm_container_cluster.go | 131 +++++------------- .../resource_ibm_container_cluster_feature.go | 20 ++- .../resource_ibm_container_cluster_test.go | 28 ++-- .../docs/d/container_cluster.html.markdown | 3 + 6 files changed, 168 insertions(+), 111 deletions(-) diff --git a/ibm/service/kubernetes/data_source_ibm_container_cluster.go b/ibm/service/kubernetes/data_source_ibm_container_cluster.go index 12f0e640de..7e9e51c66b 100644 --- a/ibm/service/kubernetes/data_source_ibm_container_cluster.go +++ b/ibm/service/kubernetes/data_source_ibm_container_cluster.go @@ -7,11 +7,13 @@ import ( "fmt" "log" "strings" + "time" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex" "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func DataSourceIBMContainerCluster() *schema.Resource { @@ -35,6 +37,23 @@ func DataSourceIBMContainerCluster() *schema.Resource { "ibm_container_cluster", "name"), }, + "wait_till": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{oneWorkerNodeReady, clusterNormal}, true), + Description: "wait_till can be configured for Master Ready, One worker Ready, Ingress Ready or Normal", + }, + "wait_till_timeout": { + Type: schema.TypeInt, + Optional: true, + Default: "20", + Description: "timeout for wait_till in minutes", + RequiredWith: []string{"wait_till"}, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, "worker_count": { Description: "Number of workers", Type: schema.TypeInt, @@ -390,6 +409,16 @@ func dataSourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("name"); ok { name = v.(string) } + + // timeoutStage will define the timeout stage + var timeoutStage string + var timeout time.Duration = 20 * time.Minute + if v, ok := d.GetOk("wait_till"); ok { + timeoutStage = strings.ToLower(v.(string)) + timeoutInt := d.Get("wait_till_timeout").(int) + timeout = time.Duration(timeoutInt) * time.Minute + } + clusterFields, err := csAPI.Find(name, targetEnv) if err != nil { return fmt.Errorf("[ERROR] Error retrieving cluster: %s", err) @@ -434,6 +463,20 @@ func dataSourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) filteredAlbs := flex.FlattenAlbs(albs, filterType) d.SetId(clusterFields.ID) + + if timeoutStage != "" { + err = waitForCluster(d, timeoutStage, timeout, meta) + if err != nil { + return err + } + + clusterFields, err = csAPI.Find(name, targetEnv) + if err != nil { + return fmt.Errorf("[ERROR] Error retrieving cluster after waitForCluster: %s", err) + } + } + + d.Set("state", clusterFields.State) d.Set("worker_count", clusterFields.WorkerCount) d.Set("workers", workers) d.Set("region", clusterFields.Region) diff --git a/ibm/service/kubernetes/data_source_ibm_container_cluster_test.go b/ibm/service/kubernetes/data_source_ibm_container_cluster_test.go index 07a1b5252c..55b9ca36dd 100644 --- a/ibm/service/kubernetes/data_source_ibm_container_cluster_test.go +++ b/ibm/service/kubernetes/data_source_ibm_container_cluster_test.go @@ -16,6 +16,34 @@ import ( ) func TestAccIBMContainerClusterDataSource_basic(t *testing.T) { + clusterName := fmt.Sprintf("tf-cluster-%d", acctest.RandIntRange(10, 100)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + Providers: acc.TestAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerClusterDataSourceBasic(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet( + "data.ibm_container_cluster.testacc_ds_cluster", "id"), + resource.TestCheckResourceAttr( + "data.ibm_container_cluster.testacc_ds_cluster", "state", "deploying"), + ), + }, + { + Config: testAccCheckIBMContainerClusterDataSourceBasic_update(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet( + "data.ibm_container_cluster.testacc_ds_cluster", "id"), + resource.TestCheckResourceAttr( + "data.ibm_container_cluster.testacc_ds_cluster", "state", "normal"), + ), + }, + }, + }) +} + +func TestAccIBMContainerClusterDataSourceBindServiceBasic(t *testing.T) { clusterName := fmt.Sprintf("tf-cluster-%d", acctest.RandIntRange(10, 100)) serviceName := fmt.Sprintf("tf-cluster-%d", acctest.RandIntRange(10, 100)) resource.Test(t, resource.TestCase{ @@ -23,7 +51,7 @@ func TestAccIBMContainerClusterDataSource_basic(t *testing.T) { Providers: acc.TestAccProviders, Steps: []resource.TestStep{ { - Config: testAccCheckIBMContainerClusterDataSource(clusterName, serviceName), + Config: testAccCheckIBMContainerClusterDataSourceBindServiceBasic(clusterName, serviceName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet( "data.ibm_container_cluster.testacc_ds_cluster", "id"), @@ -56,14 +84,34 @@ func testAccIBMClusterVlansCheck(n string) resource.TestCheckFunc { return nil } } -func testAccCheckIBMContainerClusterDataSource(clusterName, serviceName string) string { + +func testAccCheckIBMContainerClusterDataSourceBasic(clusterName string) string { + return testAccCheckIBMContainerClusterBasic(clusterName, "IngressReady") + ` + data "ibm_container_cluster" "testacc_ds_cluster" { + cluster_name_id = ibm_container_cluster.testacc_cluster.id + list_bounded_services = "false" + } + ` +} + +func testAccCheckIBMContainerClusterDataSourceBasic_update(clusterName string) string { + return testAccCheckIBMContainerClusterBasic(clusterName, "IngressReady") + ` + data "ibm_container_cluster" "testacc_ds_cluster" { + cluster_name_id = ibm_container_cluster.testacc_cluster.id + list_bounded_services = "false" + wait_till = "normal" + } + ` +} + +func testAccCheckIBMContainerClusterDataSourceBindServiceBasic(clusterName, serviceName string) string { return testAccCheckIBMContainerBindServiceBasic(clusterName, serviceName) + ` data "ibm_container_cluster" "testacc_ds_cluster" { cluster_name_id = ibm_container_cluster.testacc_cluster.id } data "ibm_container_bind_service" "bind_service" { cluster_name_id = ibm_container_bind_service.bind_service.cluster_name_id - service_instance_id = ibm_container_bind_service.bind_service.service_instance_id + service_instance_id = ibm_container_bind_service.bind_service.service_instance_id namespace_id = "default" } ` diff --git a/ibm/service/kubernetes/resource_ibm_container_cluster.go b/ibm/service/kubernetes/resource_ibm_container_cluster.go index c82a01bb9d..99445c8a96 100644 --- a/ibm/service/kubernetes/resource_ibm_container_cluster.go +++ b/ibm/service/kubernetes/resource_ibm_container_cluster.go @@ -701,26 +701,15 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{}) } } - _, err = waitForClusterMasterAvailable(d, meta) + _, err = waitForClusterMasterAvailable(d, meta, d.Timeout(schema.TimeoutCreate)) if err != nil { return err } - waitForState := strings.ToLower(d.Get("wait_till").(string)) - - switch waitForState { - case strings.ToLower(oneWorkerNodeReady): - _, err = waitForClusterOneWorkerAvailable(d, meta) - if err != nil { - return err - } - - case strings.ToLower(clusterNormal): - pendingStates := []string{clusterDeploying, clusterRequested, clusterPending, clusterDeployed, clusterCritical, clusterWarning} - _, err = waitForClusterState(d, meta, waitForState, pendingStates) - if err != nil { - return err - } + timeoutStage := strings.ToLower(d.Get("wait_till").(string)) + err = waitForCluster(d, timeoutStage, d.Timeout(schema.TimeoutCreate), meta) + if err != nil { + return err } d.Set("force_delete_storage", d.Get("force_delete_storage").(bool)) @@ -759,6 +748,31 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{}) return resourceIBMContainerClusterUpdate(d, meta) } +func waitForCluster(d *schema.ResourceData, timeoutStage string, timeout time.Duration, meta interface{}) error { + switch timeoutStage { + case strings.ToLower(masterNodeReady): + _, err := waitForClusterMasterAvailable(d, meta, timeout) + if err != nil { + return err + } + + case strings.ToLower(oneWorkerNodeReady): + _, err := waitForClusterOneWorkerAvailable(d, meta, timeout) + if err != nil { + return err + } + + case clusterNormal: + pendingStates := []string{clusterDeploying, clusterRequested, clusterPending, clusterDeployed, clusterCritical, clusterWarning} + _, err := waitForClusterState(d, meta, clusterNormal, pendingStates, timeout) + if err != nil { + return err + } + } + + return nil +} + func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) error { csClient, err := meta.(conns.ClientSession).ContainerAPI() if err != nil { @@ -1275,46 +1289,8 @@ func waitForClusterDelete(d *schema.ResourceData, meta interface{}) (interface{} return stateConf.WaitForState() } -// WaitForClusterAvailable Waits for cluster creation -func WaitForClusterAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { - csClient, err := meta.(conns.ClientSession).ContainerAPI() - if err != nil { - return nil, err - } - log.Printf("Waiting for cluster (%s) to be available.", d.Id()) - id := d.Id() - - stateConf := &resource.StateChangeConf{ - Pending: []string{"retry", clusterProvisioning}, - Target: []string{clusterNormal}, - Refresh: clusterStateRefreshFunc(csClient.Clusters(), id, target), - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 10 * time.Second, - } - - return stateConf.WaitForState() -} - -func clusterStateRefreshFunc(client v1.Clusters, instanceID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - clusterFields, err := client.FindWithOutShowResourcesCompatible(instanceID, target) - if err != nil { - return nil, "", fmt.Errorf("[ERROR] clusterStateRefreshFunc Error retrieving cluster: %s", err) - } - // Check active transactions - log.Println("Checking cluster") - //Check for cluster state to be normal - log.Println("Checking cluster state", strings.Compare(clusterFields.State, clusterNormal)) - if strings.Compare(clusterFields.State, clusterNormal) != 0 { - return clusterFields, clusterProvisioning, nil - } - return clusterFields, clusterNormal, nil - } -} - // waitForClusterMasterAvailable Waits for cluster creation -func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { +func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}, timeout time.Duration) (interface{}, error) { targetEnv, err := getClusterTargetHeader(d, meta) if err != nil { return nil, err @@ -1339,7 +1315,7 @@ func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (in } return clusterFields, deployInProgress, nil }, - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: timeout, Delay: 10 * time.Second, MinTimeout: 10 * time.Second, } @@ -1347,7 +1323,7 @@ func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (in return stateConf.WaitForState() } -func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState string, pendingState []string) (interface{}, error) { +func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState string, pendingState []string, timeout time.Duration) (interface{}, error) { targetEnv, err := getClusterTargetHeader(d, meta) if err != nil { return nil, err @@ -1376,7 +1352,7 @@ func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState return cls, cls.State, nil }, - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: timeout, Delay: 10 * time.Second, MinTimeout: 10 * time.Second, } @@ -1385,7 +1361,7 @@ func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState } // waitForClusterOneWorkerAvailable Waits for cluster creation -func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { +func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}, timeout time.Duration) (interface{}, error) { targetEnv, err := getClusterTargetHeader(d, meta) if err != nil { return nil, err @@ -1435,7 +1411,7 @@ func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}) } return nil, normal, nil }, - Timeout: d.Timeout(schema.TimeoutCreate), + Timeout: timeout, Delay: 10 * time.Second, MinTimeout: 10 * time.Second, } @@ -1483,41 +1459,6 @@ func workerStateRefreshFunc(client v1.Workers, instanceID string, target v1.Clus } } -func WaitForClusterCreation(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { - csClient, err := meta.(conns.ClientSession).ContainerAPI() - if err != nil { - return nil, err - } - log.Printf("Waiting for cluster (%s) to be available.", d.Id()) - ClusterID := d.Id() - - stateConf := &resource.StateChangeConf{ - Pending: []string{"retry", clusterProvisioning}, - Target: []string{clusterNormal}, - Refresh: func() (interface{}, string, error) { - workerFields, err := csClient.Workers().List(ClusterID, target) - log.Println("Total workers: ", len(workerFields)) - if err != nil { - return nil, "", fmt.Errorf("[ERROR] Error retrieving workers for cluster: %s", err) - } - log.Println("Checking workers...") - //verifying for atleast sing node to be in normal state - for _, e := range workerFields { - log.Println("Worker node status: ", e.State) - if e.State == workerNormal { - return workerFields, workerNormal, nil - } - } - return workerFields, workerProvisioning, nil - }, - Timeout: d.Timeout(schema.TimeoutCreate), - Delay: 10 * time.Second, - MinTimeout: 10 * time.Second, - } - - return stateConf.WaitForState() -} - func WaitForSubnetAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { csClient, err := meta.(conns.ClientSession).ContainerAPI() if err != nil { diff --git a/ibm/service/kubernetes/resource_ibm_container_cluster_feature.go b/ibm/service/kubernetes/resource_ibm_container_cluster_feature.go index d1079a132c..72c718705e 100644 --- a/ibm/service/kubernetes/resource_ibm_container_cluster_feature.go +++ b/ibm/service/kubernetes/resource_ibm_container_cluster_feature.go @@ -6,6 +6,7 @@ package kubernetes import ( "fmt" "log" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -326,7 +327,7 @@ func resourceIBMContainerClusterFeatureUpdate(d *schema.ResourceData, meta inter return resourceIBMContainerClusterFeatureRead(d, meta) } -// WaitForClusterAvailable Waits for cluster creation +// WaitForClusterAvailableForFeatureUpdate Waits for cluster creation func WaitForClusterAvailableForFeatureUpdate(cluster string, timeout time.Duration, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { csClient, err := meta.(conns.ClientSession).ContainerAPI() if err != nil { @@ -347,6 +348,23 @@ func WaitForClusterAvailableForFeatureUpdate(cluster string, timeout time.Durati return stateConf.WaitForState() } +func clusterStateRefreshFunc(client v1.Clusters, instanceID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + clusterFields, err := client.FindWithOutShowResourcesCompatible(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("[ERROR] clusterStateRefreshFunc Error retrieving cluster: %s", err) + } + // Check active transactions + log.Println("Checking cluster") + //Check for cluster state to be normal + log.Println("Checking cluster state", strings.Compare(clusterFields.State, clusterNormal)) + if strings.Compare(clusterFields.State, clusterNormal) != 0 { + return clusterFields, clusterProvisioning, nil + } + return clusterFields, clusterNormal, nil + } +} + func WaitForWorkerAvailableForFeatureUpdate(cluster string, timeout time.Duration, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { csClient, err := meta.(conns.ClientSession).ContainerAPI() if err != nil { diff --git a/ibm/service/kubernetes/resource_ibm_container_cluster_test.go b/ibm/service/kubernetes/resource_ibm_container_cluster_test.go index de6aaa134c..017de8e89b 100644 --- a/ibm/service/kubernetes/resource_ibm_container_cluster_test.go +++ b/ibm/service/kubernetes/resource_ibm_container_cluster_test.go @@ -27,7 +27,7 @@ func TestAccIBMContainerCluster_basic(t *testing.T) { CheckDestroy: testAccCheckIBMContainerClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckIBMContainerClusterBasic(clusterName), + Config: testAccCheckIBMContainerClusterBasic(clusterName, "masterNodeReady"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "ibm_container_cluster.testacc_cluster", "name", clusterName), @@ -42,9 +42,9 @@ func TestAccIBMContainerCluster_basic(t *testing.T) { resource.TestCheckResourceAttr( "ibm_container_cluster.testacc_cluster", "labels.%", "2"), resource.TestCheckResourceAttr( - "ibm_container_cluster.testacc_cluster", "tags.#", "1"), + "ibm_container_cluster.testacc_cluster", "image_security_enforcement", "false"), resource.TestCheckResourceAttr( - "ibm_container_cluster.testacc_cluster", "workers_info.#", "2"), + "ibm_container_cluster.testacc_cluster", "workers_info.#", "1"), ), }, { @@ -53,7 +53,7 @@ func TestAccIBMContainerCluster_basic(t *testing.T) { resource.TestCheckResourceAttr( "ibm_container_cluster.testacc_cluster", "name", clusterName), resource.TestCheckResourceAttr( - "ibm_container_cluster.testacc_cluster", "default_pool_size", "2"), + "ibm_container_cluster.testacc_cluster", "default_pool_size", "1"), resource.TestCheckResourceAttr( "ibm_container_cluster.testacc_cluster", "hardware", "shared"), resource.TestCheckResourceAttr( @@ -61,11 +61,11 @@ func TestAccIBMContainerCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet( "ibm_container_cluster.testacc_cluster", "resource_group_id"), resource.TestCheckResourceAttr( - "ibm_container_cluster.testacc_cluster", "labels.%", "3"), + "ibm_container_cluster.testacc_cluster", "labels.%", "2"), resource.TestCheckResourceAttr( - "ibm_container_cluster.testacc_cluster", "tags.#", "2"), + "ibm_container_cluster.testacc_cluster", "image_security_enforcement", "true"), resource.TestCheckResourceAttr( - "ibm_container_cluster.testacc_cluster", "workers_info.#", "4"), + "ibm_container_cluster.testacc_cluster", "workers_info.#", "1"), ), }, }, @@ -227,7 +227,7 @@ func testAccCheckIBMContainerClusterDestroy(s *terraform.State) error { return nil } -func testAccCheckIBMContainerClusterBasic(clusterName string) string { +func testAccCheckIBMContainerClusterBasic(clusterName, wait_till string) string { return fmt.Sprintf(` data "ibm_resource_group" "testacc_ds_resource_group" { @@ -245,13 +245,17 @@ resource "ibm_container_cluster" "testacc_cluster" { public_vlan_id = "%s" private_vlan_id = "%s" no_subnet = true - tags = ["test"] + labels = { + "test" = "test-label" + "test1" = "test-label1" + } + wait_till = "%s" timeouts { create = "720m" update = "720m" } -} `, clusterName, acc.Datacenter, acc.KubeVersion, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID) +} `, clusterName, acc.Datacenter, acc.KubeVersion, acc.MachineType, acc.PublicVlanID, acc.PrivateVlanID, wait_till) } func testAccCheckIBMContainerClusterKmsEnable(clusterName, kmsInstanceName, rootKeyName string) string { @@ -331,7 +335,7 @@ data "ibm_resource_group" "testacc_ds_resource_group" { resource "ibm_container_cluster" "testacc_cluster" { name = "%s" datacenter = "%s" - default_pool_size = 2 + default_pool_size = 2 # default_pool_size is applyonce, so should not modify anything in case of update hardware = "shared" resource_group_id = data.ibm_resource_group.testacc_ds_resource_group.id kube_version = "%s" @@ -340,7 +344,7 @@ resource "ibm_container_cluster" "testacc_cluster" { private_vlan_id = "%s" no_subnet = true update_all_workers = true - tags = ["test", "once"] + image_security_enforcement = true timeouts { create = "720m" update = "720m" diff --git a/website/docs/d/container_cluster.html.markdown b/website/docs/d/container_cluster.html.markdown index fed8bf814b..21b7656011 100644 --- a/website/docs/d/container_cluster.html.markdown +++ b/website/docs/d/container_cluster.html.markdown @@ -33,6 +33,8 @@ Review the argument references that you can specify for your data source. - `name` - (Optional, String) The name or ID of the cluster. - `list_bounded_services`- (Optional, Bool) If set to **false** services which are bound to the cluster are not going to be listed. The default value is **true**. - `resource_group_id` - (Optional, String) The ID of the resource group where your cluster is provisioned into. To list resource groups, run `ibmcloud resource groups` or use the `ibm_resource_group` data source. +- `wait_till` - (Optional, String) The cluster creation happens in multi-stages. To avoid the longer wait times for resource execution. This argument in the resource will wait for the specified stage and complete the execution. The supported stages are `MasterNodeReady` Resource waits till the master node is ready. `OneWorkerNodeReady` Resource waits till one worker node is in to ready state. `Normal` Terraform marks the creation of your cluster complete when the cluster is in a [Normal](https://cloud.ibm.com/docs/containers?topic=containers-cluster-states-reference#cluster-state-normal) state. At the moment wait_till `Normal` also ignores the critical and warning states the are temporary happen during cluster creation, but cannot distinguish it from actual critical or warning states. If you do not specify this option, the provider will not wait. +- `wait_till_timeout` - ( Optional, Int ) This parameter can be used to set the `wait_till` timeout in minutes. The `wait_till_timeout` can only be used with `wait_till`. The default value is 20 minutes. **Deprecated reference** @@ -72,6 +74,7 @@ In addition to all argument reference list, you can access the following attribu - `public_service_endpoint_url` - (String) The URL of the public service endpoint for your cluster. - `private_service_endpoint` - (Bool) Indicates if the private service endpoint is enabled (**true**) or disabled (**false**) for a cluster. - `private_service_endpoint_url` - (String) The URL of the private service endpoint for your cluster. +- `state` - (String) The state of the cluster. - `vlans`- (List of objects) A list of VLANs that are attached to the cluster. Nested scheme for `vlans`: From 90f03de95c9c5ac11eba92be7ebe682de028e179 Mon Sep 17 00:00:00 2001 From: Ujjwal Kumar Date: Wed, 4 Sep 2024 08:07:34 +0530 Subject: [PATCH 4/4] Updated ibm_is_subnet_reserved_ip docs --- .../r/is_subnet_reserved_ip.html.markdown | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/website/docs/r/is_subnet_reserved_ip.html.markdown b/website/docs/r/is_subnet_reserved_ip.html.markdown index 8bdc6aab21..0cbd06732a 100644 --- a/website/docs/r/is_subnet_reserved_ip.html.markdown +++ b/website/docs/r/is_subnet_reserved_ip.html.markdown @@ -7,7 +7,7 @@ description: |- --- # ibm_is_subnet_reserved_ip -Create, update, or delete a subnet. For more information, about associated reserved IP subnet, see [reserved IP subnet](https://cloud.ibm.com/docs/vpc?topic=vpc-troubleshoot-reserved-ip). +Create, update, or delete a subnet reserved IP. For more information, about associated reserved IP subnet, see [reserved IP subnet](https://cloud.ibm.com/docs/vpc?topic=vpc-troubleshoot-reserved-ip). **Note:** VPC infrastructure services are a regional specific based endpoint, by default targets to `us-south`. Please make sure to target right region in the provider block as shown in the `provider.tf` file, if VPC service is created in region other than `us-south`. @@ -90,34 +90,36 @@ resource "ibm_is_subnet_reserved_ip" "example5" { ## Argument reference Review the argument references that you can specify for your resource. -- `address` - (Optional, Forces new resource, String) The IP address. -- `auto_delete`- (Optional, Bool) If reserved IP is auto deleted. -- `name` - (Optional, String) The name of the reserved IP. +- `address` - (Optional, Forces new resource, String) The IP address to reserve, which must not already be reserved on the subnet. If unspecified, an available address on the subnet will automatically be selected. +- `auto_delete`- (Optional, Bool) Indicates whether this reserved IP member will be automatically deleted when either target is deleted, or the reserved IP is unbound. Must be false if the reserved IP is unbound. +- `name` - (Optional, String) The name for this reserved IP. The name must not be used by another reserved IP in the subnet. Names starting with ibm- are reserved for provider-owned resources, and are not allowed. If unspecified, the name will be a hyphenated list of randomly-selected words. ~> **NOTE:** raise error if name is given with a prefix `ibm- `. - `subnet` - (Required, Forces new resource, String) The subnet ID for the reserved IP. -- `target` - (Optional, string) The ID for the target endpoint gateway for the reserved IP. - +- `target` - (Optional, string) The target to bind this reserved IP to. The target must be in the same VPC. If unspecified, the reserved IP will be created unbound. The following targets are supported: + - An endpoint gateway not already bound to a reserved IP in the subnet's zone. + - A virtual network interface. + ## Attribute reference In addition to all argument reference list, you can access the following attribute reference after your resource is created. - `created_at` - (Timestamp) The date and time that the reserved IP was created.", - `href` - (String) The URL for this reserved IP. - `id` - (String) The combination of the subnet ID and reserved IP ID, separated by **/**. -- `lifecycle_state` - (String) The lifecycle state of the reserved IP. [ deleting, failed, pending, stable, suspended, updating, waiting ] +- `lifecycle_state` - (String) The lifecycle state of the reserved IP. [ **deleting**, **failed**, **pending**, **stable**, **suspended**, **updating**, **waiting** ] - `owner` - (String) The owner of a reserved IP, defining whether it is managed by the user or the provider. -- `reserved_ip` - (String) The reserved IP. +- `reserved_ip` - (String) The unique identifier for this reserved IP. - `resource_type` - (String) The resource type. - `target` - (String) The ID for the target for the reserved IP. - `target_crn` - (String) The crn of the target for the reserved IP. ## Import -The `ibm_is_subnet_reserved_ip` and `ibm_is_subnet` resource can be imported by using subnet ID and reserved IP ID separated by **/**. +The `ibm_is_subnet_reserved_ip` resource can be imported by using subnet ID and reserved IP ID separated by **/**. **Syntax** ``` -$ terraform import ibm_is_subnet.example / +$ terraform import ibm_is_subnet_reserved_ip.example / ``` **Example**