blob: 5116a677382b2351828ee98408d0fd16e6e4ce77 [file] [log] [blame]
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// ----------------------------------------------------------------------------
//
// *** AUTO GENERATED CODE *** Type: MMv1 ***
//
// ----------------------------------------------------------------------------
//
// This file is automatically generated by Magic Modules and manual
// changes will be clobbered when the file is regenerated.
//
// Please read more about how to change this file in
// .github/CONTRIBUTING.md.
//
// ----------------------------------------------------------------------------
package netapp
import (
"fmt"
"log"
"reflect"
"strings"
"time"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource"
transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport"
"github.com/hashicorp/terraform-provider-google-beta/google-beta/verify"
)
// Custom function to wait for mirrorState target states
func NetAppVolumeReplicationWaitForMirror(d *schema.ResourceData, meta interface{}, targetState string) error {
config := meta.(*transport_tpg.Config)
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
if err != nil {
return err
}
url, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}")
if err != nil {
return err
}
billingProject := ""
project, err := tpgresource.GetProject(d, config)
if err != nil {
return fmt.Errorf("Error fetching project for volume replication: %s", err)
}
billingProject = project
// err == nil indicates that the billing_project value was found
if bp, err := tpgresource.GetBillingProject(d, config); err == nil {
billingProject = bp
}
for {
res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "GET",
Project: billingProject,
RawURL: url,
UserAgent: userAgent,
})
if err != nil {
return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappVolumeReplication %q", d.Id()))
}
log.Printf("[DEBUG] waiting for mirrorState. actual: %v, target: %v", res["mirrorState"], targetState)
if res["mirrorState"] == targetState {
break
}
time.Sleep(30 * time.Second)
// This method can potentially run for days, e.g. when setting up a replication for a source volume
// with dozens of TiB of data. Timeout handling yes/no?
}
return nil
}
func ResourceNetappVolumeReplication() *schema.Resource {
return &schema.Resource{
Create: resourceNetappVolumeReplicationCreate,
Read: resourceNetappVolumeReplicationRead,
Update: resourceNetappVolumeReplicationUpdate,
Delete: resourceNetappVolumeReplicationDelete,
Importer: &schema.ResourceImporter{
State: resourceNetappVolumeReplicationImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(20 * time.Minute),
Update: schema.DefaultTimeout(20 * time.Minute),
Delete: schema.DefaultTimeout(20 * time.Minute),
},
CustomizeDiff: customdiff.All(
tpgresource.SetLabelsDiff,
tpgresource.DefaultProviderProject,
),
Schema: map[string]*schema.Schema{
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `Name of region for this resource. The resource needs to be created in the region of the destination volume.`,
},
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `The name of the replication. Needs to be unique per location.`,
},
"replication_schedule": {
Type: schema.TypeString,
Required: true,
ValidateFunc: verify.ValidateEnum([]string{"EVERY_10_MINUTES", "HOURLY", "DAILY"}),
Description: `Specifies the replication interval. Possible values: ["EVERY_10_MINUTES", "HOURLY", "DAILY"]`,
},
"volume_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `The name of the existing source volume.`,
},
"description": {
Type: schema.TypeString,
Optional: true,
Description: `An description of this resource.`,
},
"destination_volume_parameters": {
Type: schema.TypeList,
Optional: true,
Description: `Destination volume parameters.`,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"storage_pool": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `Name of an existing storage pool for the destination volume with format: 'projects/{{project}}/locations/{{location}}/storagePools/{{poolId}}'`,
},
"description": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: `Description for the destination volume.`,
},
"share_name": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: true,
Description: `Share name for destination volume. If not specified, name of source volume's share name will be used.`,
},
"volume_id": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: true,
Description: `Name for the destination volume to be created. If not specified, the name of the source volume will be used.`,
},
},
},
},
"labels": {
Type: schema.TypeMap,
Optional: true,
Description: `Labels as key value pairs. Example: '{ "owner": "Bob", "department": "finance", "purpose": "testing" }'
**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field 'effective_labels' for all of the labels present on the resource.`,
Elem: &schema.Schema{Type: schema.TypeString},
},
"create_time": {
Type: schema.TypeString,
Computed: true,
Description: `Create time of the active directory. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z".`,
},
"destination_volume": {
Type: schema.TypeString,
Computed: true,
Description: `Full resource name of destination volume with format: 'projects/{{project}}/locations/{{location}}/volumes/{{volumeId}}'`,
},
"effective_labels": {
Type: schema.TypeMap,
Computed: true,
Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`,
Elem: &schema.Schema{Type: schema.TypeString},
},
"healthy": {
Type: schema.TypeBool,
Computed: true,
Description: `Condition of the relationship. Can be one of the following:
- true: The replication relationship is healthy. It has not missed the most recent scheduled transfer.
- false: The replication relationship is not healthy. It has missed the most recent scheduled transfer.`,
},
"mirror_state": {
Type: schema.TypeString,
Computed: true,
Description: `Indicates the state of the mirror between source and destination volumes. Depending on the amount of data
in your source volume, PREPARING phase can take hours or days. mirrorState = MIRRORED indicates your baseline
transfer ended and destination volume became accessible read-only. TRANSFERRING means a MIRRORED volume
currently receives an update. Updated every 5 minutes.`,
},
"role": {
Type: schema.TypeString,
Computed: true,
Description: `Reverting a replication can swap source and destination volume roles. This field indicates if the 'location' hosts
the source or destination volume. For resume and revert and resume operations it is critical to understand
which volume is the source volume, since it will overwrite changes done to the destination volume.`,
},
"source_volume": {
Type: schema.TypeString,
Computed: true,
Description: `Full resource name of source volume with format: 'projects/{{project}}/locations/{{location}}/volumes/{{volumeId}}'`,
},
"state": {
Type: schema.TypeString,
Computed: true,
Description: `Indicates the state of replication resource. State of the mirror itself is indicated in mirrorState.`,
},
"state_details": {
Type: schema.TypeString,
Computed: true,
Description: `State details of the replication resource.`,
},
"terraform_labels": {
Type: schema.TypeMap,
Computed: true,
Description: `The combination of labels configured directly on the resource
and default labels configured on the provider.`,
Elem: &schema.Schema{Type: schema.TypeString},
},
"transfer_stats": {
Type: schema.TypeList,
Computed: true,
Description: `Replication transfer statistics. All statistics are updated every 5 minutes.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"lag_duration": {
Type: schema.TypeString,
Computed: true,
Description: `The elapsed time since the creation of the snapshot on the source volume that was last replicated
to the destination volume. Lag time represents the difference in age of the destination volume
data in relation to the source volume data.`,
},
"last_transfer_bytes": {
Type: schema.TypeString,
Computed: true,
Description: `Size of last completed transfer in bytes.`,
},
"last_transfer_duration": {
Type: schema.TypeString,
Computed: true,
Description: `Time taken during last completed transfer.`,
},
"last_transfer_end_time": {
Type: schema.TypeString,
Computed: true,
Description: `Time when last transfer completed. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z".`,
},
"last_transfer_error": {
Type: schema.TypeString,
Computed: true,
Description: `A message describing the cause of the last transfer failure.`,
},
"total_transfer_duration": {
Type: schema.TypeString,
Computed: true,
Description: `Total time taken so far during current transfer.`,
},
"transfer_bytes": {
Type: schema.TypeString,
Computed: true,
Description: `Number of bytes transferred so far in current transfer.`,
},
"update_time": {
Type: schema.TypeString,
Computed: true,
Description: `Time when progress was updated last. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z".`,
},
},
},
},
"delete_destination_volume": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: `A destination volume is created as part of replication creation. The destination volume will not became
under Terraform management unless you import it manually. If you delete the replication, this volume
will remain.
Setting this parameter to true will delete the *current* destination volume when destroying the
replication. If you reversed the replication direction, this will be your former source volume!
For production use, it is recommended to keep this parameter false to avoid accidental volume
deletion. Handle with care. Default is false.`,
},
"replication_enabled": {
Type: schema.TypeBool,
Optional: true,
Default: true,
Description: `Set to false to stop/break the mirror. Stopping the mirror makes the destination volume read-write
and act independently from the source volume.
Set to true to enable/resume the mirror. WARNING: Resuming a mirror overwrites any changes
done to the destination volume with the content of the source volume.`,
},
"force_stopping": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: `Only replications with mirror_state=MIRRORED can be stopped. A replication in mirror_state=TRANSFERRING
currently receives an update and stopping the update might be undesirable. Set this parameter to true
to stop anyway. All data transferred to the destination will be discarded and content of destination
volume will remain at the state of the last successful update. Default is false.`,
},
"wait_for_mirror": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: `Replication resource state is independent of mirror_state. With enough data, it can take many hours
for mirror_state to reach MIRRORED. If you want Terraform to wait for the mirror to finish on
create/stop/resume operations, set this parameter to true. Default is false.`,
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
UseJSONNumber: true,
}
}
func resourceNetappVolumeReplicationCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*transport_tpg.Config)
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
if err != nil {
return err
}
obj := make(map[string]interface{})
replicationScheduleProp, err := expandNetappVolumeReplicationReplicationSchedule(d.Get("replication_schedule"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("replication_schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(replicationScheduleProp)) && (ok || !reflect.DeepEqual(v, replicationScheduleProp)) {
obj["replicationSchedule"] = replicationScheduleProp
}
destinationVolumeParametersProp, err := expandNetappVolumeReplicationDestinationVolumeParameters(d.Get("destination_volume_parameters"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("destination_volume_parameters"); !tpgresource.IsEmptyValue(reflect.ValueOf(destinationVolumeParametersProp)) && (ok || !reflect.DeepEqual(v, destinationVolumeParametersProp)) {
obj["destinationVolumeParameters"] = destinationVolumeParametersProp
}
descriptionProp, err := expandNetappVolumeReplicationDescription(d.Get("description"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
obj["description"] = descriptionProp
}
labelsProp, err := expandNetappVolumeReplicationEffectiveLabels(d.Get("effective_labels"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
obj["labels"] = labelsProp
}
url, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications?replicationId={{name}}")
if err != nil {
return err
}
log.Printf("[DEBUG] Creating new VolumeReplication: %#v", obj)
billingProject := ""
project, err := tpgresource.GetProject(d, config)
if err != nil {
return fmt.Errorf("Error fetching project for VolumeReplication: %s", err)
}
billingProject = project
// err == nil indicates that the billing_project value was found
if bp, err := tpgresource.GetBillingProject(d, config); err == nil {
billingProject = bp
}
res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "POST",
Project: billingProject,
RawURL: url,
UserAgent: userAgent,
Body: obj,
Timeout: d.Timeout(schema.TimeoutCreate),
})
if err != nil {
return fmt.Errorf("Error creating VolumeReplication: %s", err)
}
// Store the ID now
id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}")
if err != nil {
return fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
err = NetappOperationWaitTime(
config, res, project, "Creating VolumeReplication", userAgent,
d.Timeout(schema.TimeoutCreate))
if err != nil {
// The resource didn't actually create
d.SetId("")
return fmt.Errorf("Error waiting to create VolumeReplication: %s", err)
}
if d.Get("wait_for_mirror").(bool) == true {
// Wait for mirrorState=MIRRORED before treating the resource as created
err = NetAppVolumeReplicationWaitForMirror(d, meta, "MIRRORED")
if err != nil {
return fmt.Errorf("Error waiting for volume replication to reach mirror_state==MIRRORED: %s", err)
}
}
log.Printf("[DEBUG] Finished creating VolumeReplication %q: %#v", d.Id(), res)
return resourceNetappVolumeReplicationRead(d, meta)
}
func resourceNetappVolumeReplicationRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*transport_tpg.Config)
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
if err != nil {
return err
}
url, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}")
if err != nil {
return err
}
billingProject := ""
project, err := tpgresource.GetProject(d, config)
if err != nil {
return fmt.Errorf("Error fetching project for VolumeReplication: %s", err)
}
billingProject = project
// err == nil indicates that the billing_project value was found
if bp, err := tpgresource.GetBillingProject(d, config); err == nil {
billingProject = bp
}
res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "GET",
Project: billingProject,
RawURL: url,
UserAgent: userAgent,
})
if err != nil {
return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NetappVolumeReplication %q", d.Id()))
}
// Explicitly set virtual fields to default values if unset
if _, ok := d.GetOkExists("delete_destination_volume"); !ok {
if err := d.Set("delete_destination_volume", false); err != nil {
return fmt.Errorf("Error setting delete_destination_volume: %s", err)
}
}
if _, ok := d.GetOkExists("replication_enabled"); !ok {
if err := d.Set("replication_enabled", true); err != nil {
return fmt.Errorf("Error setting replication_enabled: %s", err)
}
}
if _, ok := d.GetOkExists("force_stopping"); !ok {
if err := d.Set("force_stopping", false); err != nil {
return fmt.Errorf("Error setting force_stopping: %s", err)
}
}
if _, ok := d.GetOkExists("wait_for_mirror"); !ok {
if err := d.Set("wait_for_mirror", false); err != nil {
return fmt.Errorf("Error setting wait_for_mirror: %s", err)
}
}
if err := d.Set("project", project); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("state", flattenNetappVolumeReplicationState(res["state"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("state_details", flattenNetappVolumeReplicationStateDetails(res["stateDetails"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("role", flattenNetappVolumeReplicationRole(res["role"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("replication_schedule", flattenNetappVolumeReplicationReplicationSchedule(res["replicationSchedule"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("mirror_state", flattenNetappVolumeReplicationMirrorState(res["mirrorState"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("create_time", flattenNetappVolumeReplicationCreateTime(res["createTime"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("destination_volume", flattenNetappVolumeReplicationDestinationVolume(res["destinationVolume"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("transfer_stats", flattenNetappVolumeReplicationTransferStats(res["transferStats"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("labels", flattenNetappVolumeReplicationLabels(res["labels"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("source_volume", flattenNetappVolumeReplicationSourceVolume(res["sourceVolume"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("healthy", flattenNetappVolumeReplicationHealthy(res["healthy"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("description", flattenNetappVolumeReplicationDescription(res["description"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("terraform_labels", flattenNetappVolumeReplicationTerraformLabels(res["labels"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
if err := d.Set("effective_labels", flattenNetappVolumeReplicationEffectiveLabels(res["labels"], d, config)); err != nil {
return fmt.Errorf("Error reading VolumeReplication: %s", err)
}
return nil
}
func resourceNetappVolumeReplicationUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*transport_tpg.Config)
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
if err != nil {
return err
}
billingProject := ""
project, err := tpgresource.GetProject(d, config)
if err != nil {
return fmt.Errorf("Error fetching project for VolumeReplication: %s", err)
}
billingProject = project
obj := make(map[string]interface{})
replicationScheduleProp, err := expandNetappVolumeReplicationReplicationSchedule(d.Get("replication_schedule"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("replication_schedule"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, replicationScheduleProp)) {
obj["replicationSchedule"] = replicationScheduleProp
}
destinationVolumeParametersProp, err := expandNetappVolumeReplicationDestinationVolumeParameters(d.Get("destination_volume_parameters"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("destination_volume_parameters"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationVolumeParametersProp)) {
obj["destinationVolumeParameters"] = destinationVolumeParametersProp
}
descriptionProp, err := expandNetappVolumeReplicationDescription(d.Get("description"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
obj["description"] = descriptionProp
}
labelsProp, err := expandNetappVolumeReplicationEffectiveLabels(d.Get("effective_labels"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
obj["labels"] = labelsProp
}
url, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}")
if err != nil {
return err
}
log.Printf("[DEBUG] Updating VolumeReplication %q: %#v", d.Id(), obj)
updateMask := []string{}
if d.HasChange("replication_schedule") {
updateMask = append(updateMask, "replicationSchedule")
}
if d.HasChange("destination_volume_parameters") {
updateMask = append(updateMask, "destinationVolumeParameters")
}
if d.HasChange("description") {
updateMask = append(updateMask, "description")
}
if d.HasChange("effective_labels") {
updateMask = append(updateMask, "labels")
}
// updateMask is a URL parameter but not present in the schema, so ReplaceVars
// won't set it
url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")})
if err != nil {
return err
}
// err == nil indicates that the billing_project value was found
if bp, err := tpgresource.GetBillingProject(d, config); err == nil {
billingProject = bp
}
// if updateMask is empty we are not updating anything so skip the post
if len(updateMask) > 0 {
res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "PATCH",
Project: billingProject,
RawURL: url,
UserAgent: userAgent,
Body: obj,
Timeout: d.Timeout(schema.TimeoutUpdate),
})
if err != nil {
return fmt.Errorf("Error updating VolumeReplication %q: %s", d.Id(), err)
} else {
log.Printf("[DEBUG] Finished updating VolumeReplication %q: %#v", d.Id(), res)
}
err = NetappOperationWaitTime(
config, res, project, "Updating VolumeReplication", userAgent,
d.Timeout(schema.TimeoutUpdate))
if err != nil {
return err
}
}
// Manage stopping and resuming a mirror
var obj2 map[string]interface{}
do_change := false
var action string
var targetState string
// state transitions
// there can be a glitch is a transfer starts/ends between reading mirrorState
// and sending the action. This will be very rare. No workaround.
if d.Get("replication_enabled").(bool) == true {
switch d.Get("mirror_state").(string) {
case "STOPPED":
// replication_enabled==true, mirrorState==STOPPED -> resume
action = "resume"
targetState = "MIRRORED"
do_change = true
default:
// replication_enabled==true, mirrorState!=STOPPED -> NOOP
do_change = false
}
} else {
switch d.Get("mirror_state").(string) {
case "MIRRORED":
// replication_enabled==false, mirrorState==MIRRORED -> stop
action = "stop"
targetState = "STOPPED"
do_change = true
case "TRANSFERRING":
// replication_enabled==false, mirrorState==TRANSFERRING -> force stop
// User needs to add force_stopping = true, otherwise will receive error
action = "stop"
targetState = "STOPPED"
do_change = true
case "PREPARING":
// replication_enabled==false, mirrorState==PREPARING -> stop
// Currently cannot be stopped. User will receive following error:
// Error code 3, message: invalid request error: "Replication in preparing state. Please wait until replication is in 'READY' STATE and try again later.".
// User needs to wait until mirrorState=MIRRORED
action = "stop"
targetState = "STOPPED"
do_change = true
default:
// replication_enabled==false, mirrorState==STOPPED -> NOOP
do_change = false
}
if do_change == true && d.Get("force_stopping").(bool) == true {
obj2 = map[string]interface{}{
"force": true,
}
}
}
if do_change {
// We need to send STOP/RESUME API calls
rawurl, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}:"+action)
if err != nil {
return err
}
res2, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "POST",
Project: billingProject,
RawURL: rawurl,
UserAgent: userAgent,
Body: obj2,
Timeout: d.Timeout(schema.TimeoutUpdate),
})
if err != nil {
return fmt.Errorf("Error stopping/resuming replication %q: %s", d.Id(), err)
}
err = NetappOperationWaitTime(
config, res2, project, "volume replication "+action, userAgent,
d.Timeout(schema.TimeoutDelete))
if err != nil {
return err
}
// If user specified to wait for mirror operations, wait to reach target state
if d.Get("wait_for_mirror").(bool) == true {
err = NetAppVolumeReplicationWaitForMirror(d, meta, targetState)
if err != nil {
return fmt.Errorf("Error waiting for volume replication to reach mirror_state==%s: %s", targetState, err)
}
}
}
return resourceNetappVolumeReplicationRead(d, meta)
}
func resourceNetappVolumeReplicationDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*transport_tpg.Config)
userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent)
if err != nil {
return err
}
billingProject := ""
project, err := tpgresource.GetProject(d, config)
if err != nil {
return fmt.Errorf("Error fetching project for VolumeReplication: %s", err)
}
billingProject = project
url, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}")
if err != nil {
return err
}
var obj map[string]interface{}
// A replication can only be deleted if mirrorState==STOPPED
// We are about to delete the replication and need to stop the mirror before.
// FYI: Stopping a PREPARING mirror currently doesn't work. User have to wait until
// mirror reaches MIRRORED.
if d.Get("mirror_state") != "STOPPED" {
rawurl, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}:stop")
if err != nil {
return err
}
reso, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "POST",
Project: billingProject,
RawURL: rawurl,
UserAgent: userAgent,
// We delete anyway, so lets always use force stop
Body: map[string]interface{}{
"force": true,
},
Timeout: d.Timeout(schema.TimeoutUpdate),
})
if err != nil {
return fmt.Errorf("Error stopping volume replication %q before deleting it: %s", d.Id(), err)
}
err = NetappOperationWaitTime(
config, reso, project, "Deleting volume replication", userAgent,
d.Timeout(schema.TimeoutDelete))
if err != nil {
return err
}
}
log.Printf("[DEBUG] Deleting VolumeReplication %q", d.Id())
// err == nil indicates that the billing_project value was found
if bp, err := tpgresource.GetBillingProject(d, config); err == nil {
billingProject = bp
}
res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "DELETE",
Project: billingProject,
RawURL: url,
UserAgent: userAgent,
Body: obj,
Timeout: d.Timeout(schema.TimeoutDelete),
})
if err != nil {
return transport_tpg.HandleNotFoundError(err, d, "VolumeReplication")
}
err = NetappOperationWaitTime(
config, res, project, "Deleting VolumeReplication", userAgent,
d.Timeout(schema.TimeoutDelete))
if err != nil {
return err
}
// A replication CREATE also created a destination volume
// A user can chooses to delete the destination volume after deleting the replication
if d.Get("delete_destination_volume").(bool) == true {
log.Printf("[DEBUG] delete_destination_volume is true. Deleting destination volume %v", d.Get("destination_volume"))
destination_volume := d.Get("destination_volume").(string)
del_url, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}"+destination_volume+"?force=true")
if err != nil {
return err
}
var obj map[string]interface{}
res_del, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "DELETE",
Project: billingProject,
RawURL: del_url,
UserAgent: userAgent,
Body: obj,
Timeout: d.Timeout(schema.TimeoutDelete),
})
if err != nil {
return transport_tpg.HandleNotFoundError(err, d, "Volume")
}
err = NetappOperationWaitTime(
config, res_del, project, "Deleting destination volume", userAgent,
d.Timeout(schema.TimeoutDelete))
if err != nil {
return err
}
log.Printf("[DEBUG] Finished deleting destination Volume %q: %#v", destination_volume, res_del)
}
log.Printf("[DEBUG] Finished deleting VolumeReplication %q: %#v", d.Id(), res)
return nil
}
func resourceNetappVolumeReplicationImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*transport_tpg.Config)
if err := tpgresource.ParseImportId([]string{
"^projects/(?P<project>[^/]+)/locations/(?P<location>[^/]+)/volumes/(?P<volume_name>[^/]+)/replications/(?P<name>[^/]+)$",
"^(?P<project>[^/]+)/(?P<location>[^/]+)/(?P<volume_name>[^/]+)/(?P<name>[^/]+)$",
"^(?P<location>[^/]+)/(?P<volume_name>[^/]+)/(?P<name>[^/]+)$",
}, d, config); err != nil {
return nil, err
}
// Replace import id for the resource id
id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/volumes/{{volume_name}}/replications/{{name}}")
if err != nil {
return nil, fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
// Explicitly set virtual fields to default values on import
if err := d.Set("delete_destination_volume", false); err != nil {
return nil, fmt.Errorf("Error setting delete_destination_volume: %s", err)
}
if err := d.Set("replication_enabled", true); err != nil {
return nil, fmt.Errorf("Error setting replication_enabled: %s", err)
}
if err := d.Set("force_stopping", false); err != nil {
return nil, fmt.Errorf("Error setting force_stopping: %s", err)
}
if err := d.Set("wait_for_mirror", false); err != nil {
return nil, fmt.Errorf("Error setting wait_for_mirror: %s", err)
}
return []*schema.ResourceData{d}, nil
}
func flattenNetappVolumeReplicationState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationStateDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationReplicationSchedule(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationMirrorState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
// Actual state of replication_enabled depends on mirrorState. let's update it.
// This is to pickup manual user STOP/RESUME operations on the replication.
if v == nil {
return v
}
if v.(string) == "STOPPED" {
if err := d.Set("replication_enabled", false); err != nil {
return fmt.Errorf("Error setting replication_enabled: %s", err)
}
} else {
if err := d.Set("replication_enabled", true); err != nil {
return fmt.Errorf("Error setting replication_enabled: %s", err)
}
}
log.Printf("[DEBUG] value of replication_state : %v", d.Get("replication_enabled"))
return v
}
func flattenNetappVolumeReplicationCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationDestinationVolume(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStats(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
if len(original) == 0 {
return nil
}
transformed := make(map[string]interface{})
transformed["transfer_bytes"] =
flattenNetappVolumeReplicationTransferStatsTransferBytes(original["transferBytes"], d, config)
transformed["total_transfer_duration"] =
flattenNetappVolumeReplicationTransferStatsTotalTransferDuration(original["totalTransferDuration"], d, config)
transformed["last_transfer_bytes"] =
flattenNetappVolumeReplicationTransferStatsLastTransferBytes(original["lastTransferBytes"], d, config)
transformed["last_transfer_duration"] =
flattenNetappVolumeReplicationTransferStatsLastTransferDuration(original["lastTransferDuration"], d, config)
transformed["lag_duration"] =
flattenNetappVolumeReplicationTransferStatsLagDuration(original["lagDuration"], d, config)
transformed["update_time"] =
flattenNetappVolumeReplicationTransferStatsUpdateTime(original["updateTime"], d, config)
transformed["last_transfer_end_time"] =
flattenNetappVolumeReplicationTransferStatsLastTransferEndTime(original["lastTransferEndTime"], d, config)
transformed["last_transfer_error"] =
flattenNetappVolumeReplicationTransferStatsLastTransferError(original["lastTransferError"], d, config)
return []interface{}{transformed}
}
func flattenNetappVolumeReplicationTransferStatsTransferBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStatsTotalTransferDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStatsLastTransferBytes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStatsLastTransferDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStatsLagDuration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStatsUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStatsLastTransferEndTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTransferStatsLastTransferError(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
if v == nil {
return v
}
transformed := make(map[string]interface{})
if l, ok := d.GetOkExists("labels"); ok {
for k := range l.(map[string]interface{}) {
transformed[k] = v.(map[string]interface{})[k]
}
}
return transformed
}
func flattenNetappVolumeReplicationSourceVolume(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationHealthy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func flattenNetappVolumeReplicationTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
if v == nil {
return v
}
transformed := make(map[string]interface{})
if l, ok := d.GetOkExists("terraform_labels"); ok {
for k := range l.(map[string]interface{}) {
transformed[k] = v.(map[string]interface{})[k]
}
}
return transformed
}
func flattenNetappVolumeReplicationEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}
func expandNetappVolumeReplicationReplicationSchedule(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}
func expandNetappVolumeReplicationDestinationVolumeParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
l := v.([]interface{})
if len(l) == 0 || l[0] == nil {
return nil, nil
}
raw := l[0]
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedStoragePool, err := expandNetappVolumeReplicationDestinationVolumeParametersStoragePool(original["storage_pool"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedStoragePool); val.IsValid() && !tpgresource.IsEmptyValue(val) {
transformed["storagePool"] = transformedStoragePool
}
transformedVolumeId, err := expandNetappVolumeReplicationDestinationVolumeParametersVolumeId(original["volume_id"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedVolumeId); val.IsValid() && !tpgresource.IsEmptyValue(val) {
transformed["volumeId"] = transformedVolumeId
}
transformedShareName, err := expandNetappVolumeReplicationDestinationVolumeParametersShareName(original["share_name"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedShareName); val.IsValid() && !tpgresource.IsEmptyValue(val) {
transformed["shareName"] = transformedShareName
}
transformedDescription, err := expandNetappVolumeReplicationDestinationVolumeParametersDescription(original["description"], d, config)
if err != nil {
return nil, err
} else if val := reflect.ValueOf(transformedDescription); val.IsValid() && !tpgresource.IsEmptyValue(val) {
transformed["description"] = transformedDescription
}
return transformed, nil
}
func expandNetappVolumeReplicationDestinationVolumeParametersStoragePool(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}
func expandNetappVolumeReplicationDestinationVolumeParametersVolumeId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}
func expandNetappVolumeReplicationDestinationVolumeParametersShareName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}
func expandNetappVolumeReplicationDestinationVolumeParametersDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}
func expandNetappVolumeReplicationDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}
func expandNetappVolumeReplicationEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) {
if v == nil {
return map[string]string{}, nil
}
m := make(map[string]string)
for k, val := range v.(map[string]interface{}) {
m[k] = val.(string)
}
return m, nil
}