Skip to content

Commit

Permalink
Merge pull request #5335 from powervs-ibm/resource-placement-group
Browse files Browse the repository at this point in the history
Refactor ibm_pi_placement_group resource
  • Loading branch information
yussufsh authored May 14, 2024
2 parents 9c34be4 + bd3cc03 commit 2aa8b25
Show file tree
Hide file tree
Showing 5 changed files with 197 additions and 122 deletions.
7 changes: 7 additions & 0 deletions ibm/service/power/ibm_pi_constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ const (
Arg_NetworkName = "pi_network_name"
Arg_PIInstanceSharedProcessorPool = "pi_shared_processor_pool"
Arg_PlacementGroupName = "pi_placement_group_name"
Arg_PlacementGroupPolicy = "pi_placement_group_policy"
Arg_PVMInstanceActionType = "pi_action"
Arg_PVMInstanceHealthStatus = "pi_health_status"
Arg_PVMInstanceId = "pi_instance_id"
Expand Down Expand Up @@ -320,9 +321,15 @@ const (
Attr_WWN = "wwn"
OS_IBMI = "ibmi"

// Affinty Values
Affinity = "affinity"
AntiAffinity = "anti-affinity"

// States
State_Active = "active"
State_ACTIVE = "ACTIVE"
State_Added = "added"
State_Adding = "adding"
State_Available = "available"
State_BUILD = "BUILD"
State_Creating = "creating"
Expand Down
108 changes: 90 additions & 18 deletions ibm/service/power/resource_ibm_pi_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
"time"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"

st "github.com/IBM-Cloud/power-go-client/clients/instance"
Expand Down Expand Up @@ -184,7 +184,6 @@ func ResourceIBMPIInstance() *schema.Resource {
},
helpers.PIPlacementGroupID: {
Type: schema.TypeString,
Computed: true,
Optional: true,
Description: "Placement group ID",
},
Expand Down Expand Up @@ -788,12 +787,17 @@ func resourceIBMPIInstanceUpdate(ctx context.Context, d *schema.ResourceData, me
body := &models.PlacementGroupServer{
ID: &instanceID,
}
_, err := pgClient.DeleteMember(placementGroupID, body)
pgID, err := pgClient.DeleteMember(placementGroupID, body)
if err != nil {
// ignore delete member error where the server is already not in the PG
if !strings.Contains(err.Error(), "is not part of placement-group") {
return diag.FromErr(err)
}
} else {
_, err = isWaitForPIInstancePlacementGroupDelete(ctx, pgClient, *pgID.ID, instanceID)
if err != nil {
return diag.FromErr(err)
}
}
}

Expand All @@ -803,9 +807,14 @@ func resourceIBMPIInstanceUpdate(ctx context.Context, d *schema.ResourceData, me
body := &models.PlacementGroupServer{
ID: &instanceID,
}
_, err := pgClient.AddMember(placementGroupID, body)
pgID, err := pgClient.AddMember(placementGroupID, body)
if err != nil {
return diag.FromErr(err)
} else {
_, err = isWaitForPIInstancePlacementGroupAdd(ctx, pgClient, *pgID.ID, instanceID)
if err != nil {
return diag.FromErr(err)
}
}
}
}
Expand Down Expand Up @@ -877,7 +886,7 @@ func isWaitForPIInstanceDeleted(ctx context.Context, client *st.IBMPIInstanceCli

log.Printf("Waiting for (%s) to be deleted.", id)

stateConf := &resource.StateChangeConf{
stateConf := &retry.StateChangeConf{
Pending: []string{"retry", helpers.PIInstanceDeleting},
Target: []string{helpers.PIInstanceNotFound},
Refresh: isPIInstanceDeleteRefreshFunc(client, id),
Expand All @@ -889,7 +898,7 @@ func isWaitForPIInstanceDeleted(ctx context.Context, client *st.IBMPIInstanceCli
return stateConf.WaitForStateContext(ctx)
}

func isPIInstanceDeleteRefreshFunc(client *st.IBMPIInstanceClient, id string) resource.StateRefreshFunc {
func isPIInstanceDeleteRefreshFunc(client *st.IBMPIInstanceClient, id string) retry.StateRefreshFunc {
return func() (interface{}, string, error) {
pvm, err := client.Get(id)
if err != nil {
Expand All @@ -908,7 +917,7 @@ func isWaitForPIInstanceAvailable(ctx context.Context, client *st.IBMPIInstanceC
queryTimeOut = warningTimeOut
}

stateConf := &resource.StateChangeConf{
stateConf := &retry.StateChangeConf{
Pending: []string{"PENDING", helpers.PIInstanceBuilding, helpers.PIInstanceHealthWarning},
Target: []string{helpers.PIInstanceAvailable, helpers.PIInstanceHealthOk, "ERROR", "", "SHUTOFF"},
Refresh: isPIInstanceRefreshFunc(client, id, instanceReadyStatus),
Expand All @@ -920,7 +929,7 @@ func isWaitForPIInstanceAvailable(ctx context.Context, client *st.IBMPIInstanceC
return stateConf.WaitForStateContext(ctx)
}

func isPIInstanceRefreshFunc(client *st.IBMPIInstanceClient, id, instanceReadyStatus string) resource.StateRefreshFunc {
func isPIInstanceRefreshFunc(client *st.IBMPIInstanceClient, id, instanceReadyStatus string) retry.StateRefreshFunc {
return func() (interface{}, string, error) {

pvm, err := client.Get(id)
Expand All @@ -944,12 +953,76 @@ func isPIInstanceRefreshFunc(client *st.IBMPIInstanceClient, id, instanceReadySt
}
}

func isWaitForPIInstancePlacementGroupAdd(ctx context.Context, client *st.IBMPIPlacementGroupClient, pgID string, id string) (interface{}, error) {
log.Printf("Waiting for PIInstance Placement Group (%s) to be updated ", id)

queryTimeOut := activeTimeOut

stateConf := &retry.StateChangeConf{
Pending: []string{State_Adding},
Target: []string{State_Added},
Refresh: isPIInstancePlacementGroupAddRefreshFunc(client, pgID, id),
Delay: 30 * time.Second,
MinTimeout: queryTimeOut,
Timeout: 10 * time.Minute,
}

return stateConf.WaitForStateContext(ctx)
}

func isPIInstancePlacementGroupAddRefreshFunc(client *st.IBMPIPlacementGroupClient, pgID string, id string) retry.StateRefreshFunc {
return func() (interface{}, string, error) {
pg, err := client.Get(pgID)
if err != nil {
return nil, "", err
}
for _, x := range pg.Members {
if x == id {
return pg, State_Added, nil
}
}
return pg, State_Adding, nil
}
}

func isWaitForPIInstancePlacementGroupDelete(ctx context.Context, client *st.IBMPIPlacementGroupClient, pgID string, id string) (interface{}, error) {
log.Printf("Waiting for PIInstance Placement Group (%s) to be updated ", id)

queryTimeOut := activeTimeOut

stateConf := &retry.StateChangeConf{
Pending: []string{State_Deleting},
Target: []string{State_Deleted},
Refresh: isPIInstancePlacementGroupDeleteRefreshFunc(client, pgID, id),
Delay: 30 * time.Second,
MinTimeout: queryTimeOut,
Timeout: 10 * time.Minute,
}

return stateConf.WaitForStateContext(ctx)
}

func isPIInstancePlacementGroupDeleteRefreshFunc(client *st.IBMPIPlacementGroupClient, pgID string, id string) retry.StateRefreshFunc {
return func() (interface{}, string, error) {
pg, err := client.Get(pgID)
if err != nil {
return nil, "", err
}
for _, x := range pg.Members {
if x == id {
return pg, State_Deleting, nil
}
}
return pg, State_Deleted, nil
}
}

func isWaitForPIInstanceSoftwareLicenses(ctx context.Context, client *st.IBMPIInstanceClient, id string, softwareLicenses *models.SoftwareLicenses) (interface{}, error) {
log.Printf("Waiting for PIInstance Software Licenses (%s) to be updated ", id)

queryTimeOut := activeTimeOut

stateConf := &resource.StateChangeConf{
stateConf := &retry.StateChangeConf{
Pending: []string{"notdone"},
Target: []string{"done"},
Refresh: isPIInstanceSoftwareLicensesRefreshFunc(client, id, softwareLicenses),
Expand All @@ -961,7 +1034,7 @@ func isWaitForPIInstanceSoftwareLicenses(ctx context.Context, client *st.IBMPIIn
return stateConf.WaitForStateContext(ctx)
}

func isPIInstanceSoftwareLicensesRefreshFunc(client *st.IBMPIInstanceClient, id string, softwareLicenses *models.SoftwareLicenses) resource.StateRefreshFunc {
func isPIInstanceSoftwareLicensesRefreshFunc(client *st.IBMPIInstanceClient, id string, softwareLicenses *models.SoftwareLicenses) retry.StateRefreshFunc {
return func() (interface{}, string, error) {

pvm, err := client.Get(id)
Expand Down Expand Up @@ -1005,7 +1078,7 @@ func isWaitForPIInstanceShutoff(ctx context.Context, client *st.IBMPIInstanceCli
queryTimeOut = warningTimeOut
}

stateConf := &resource.StateChangeConf{
stateConf := &retry.StateChangeConf{
Pending: []string{StatusPending, helpers.PIInstanceBuilding, helpers.PIInstanceHealthWarning},
Target: []string{helpers.PIInstanceHealthOk, StatusError, "", StatusShutoff},
Refresh: isPIInstanceShutoffRefreshFunc(client, id, instanceReadyStatus),
Expand All @@ -1016,7 +1089,8 @@ func isWaitForPIInstanceShutoff(ctx context.Context, client *st.IBMPIInstanceCli

return stateConf.WaitForStateContext(ctx)
}
func isPIInstanceShutoffRefreshFunc(client *st.IBMPIInstanceClient, id, instanceReadyStatus string) resource.StateRefreshFunc {

func isPIInstanceShutoffRefreshFunc(client *st.IBMPIInstanceClient, id, instanceReadyStatus string) retry.StateRefreshFunc {
return func() (interface{}, string, error) {

pvm, err := client.Get(id)
Expand Down Expand Up @@ -1051,7 +1125,7 @@ func encodeBase64(userData string) string {
func isWaitForPIInstanceStopped(ctx context.Context, client *st.IBMPIInstanceClient, id string) (interface{}, error) {
log.Printf("Waiting for PIInstance (%s) to be stopped and powered off ", id)

stateConf := &resource.StateChangeConf{
stateConf := &retry.StateChangeConf{
Pending: []string{"STOPPING", "RESIZE", "VERIFY_RESIZE", helpers.PIInstanceHealthWarning},
Target: []string{"OK", "SHUTOFF"},
Refresh: isPIInstanceRefreshFuncOff(client, id),
Expand All @@ -1063,7 +1137,7 @@ func isWaitForPIInstanceStopped(ctx context.Context, client *st.IBMPIInstanceCli
return stateConf.WaitForStateContext(ctx)
}

func isPIInstanceRefreshFuncOff(client *st.IBMPIInstanceClient, id string) resource.StateRefreshFunc {
func isPIInstanceRefreshFuncOff(client *st.IBMPIInstanceClient, id string) retry.StateRefreshFunc {
return func() (interface{}, string, error) {

log.Printf("Calling the check Refresh status of the pvm instance %s", id)
Expand Down Expand Up @@ -1094,7 +1168,6 @@ func stopLparForResourceChange(ctx context.Context, client *st.IBMPIInstanceClie
}

// Start the lpar

func startLparAfterResourceChange(ctx context.Context, client *st.IBMPIInstanceClient, id string) error {
body := &models.PVMInstanceAction{
Action: flex.PtrToString("start"),
Expand All @@ -1110,7 +1183,6 @@ func startLparAfterResourceChange(ctx context.Context, client *st.IBMPIInstanceC
}

// Stop / Modify / Start only when the lpar is off limits

func performChangeAndReboot(ctx context.Context, client *st.IBMPIInstanceClient, id, cloudInstanceID string, mem, procs float64) error {
/*
These are the steps
Expand Down Expand Up @@ -1156,7 +1228,7 @@ func performChangeAndReboot(ctx context.Context, client *st.IBMPIInstanceClient,
func isWaitforPIInstanceUpdate(ctx context.Context, client *st.IBMPIInstanceClient, id string) (interface{}, error) {
log.Printf("Waiting for PIInstance (%s) to be ACTIVE or SHUTOFF AFTER THE RESIZE Due to DLPAR Operation ", id)

stateConf := &resource.StateChangeConf{
stateConf := &retry.StateChangeConf{
Pending: []string{"RESIZE", "VERIFY_RESIZE"},
Target: []string{"ACTIVE", "SHUTOFF", helpers.PIInstanceHealthOk},
Refresh: isPIInstanceShutAfterResourceChange(client, id),
Expand All @@ -1168,7 +1240,7 @@ func isWaitforPIInstanceUpdate(ctx context.Context, client *st.IBMPIInstanceClie
return stateConf.WaitForStateContext(ctx)
}

func isPIInstanceShutAfterResourceChange(client *st.IBMPIInstanceClient, id string) resource.StateRefreshFunc {
func isPIInstanceShutAfterResourceChange(client *st.IBMPIInstanceClient, id string) retry.StateRefreshFunc {
return func() (interface{}, string, error) {

pvm, err := client.Get(id)
Expand Down
Loading

0 comments on commit 2aa8b25

Please sign in to comment.