| // Copyright (c) HashiCorp, Inc. |
| // SPDX-License-Identifier: MPL-2.0 |
| |
| // ---------------------------------------------------------------------------- |
| // |
| // *** AUTO GENERATED CODE *** Type: MMv1 *** |
| // |
| // ---------------------------------------------------------------------------- |
| // |
| // This file is automatically generated by Magic Modules and manual |
| // changes will be clobbered when the file is regenerated. |
| // |
| // Please read more about how to change this file in |
| // .github/CONTRIBUTING.md. |
| // |
| // ---------------------------------------------------------------------------- |
| |
| package containerattached |
| |
| import ( |
| "fmt" |
| "log" |
| "net/http" |
| "reflect" |
| "strings" |
| "time" |
| |
| "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" |
| "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" |
| |
| "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" |
| transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" |
| "github.com/hashicorp/terraform-provider-google-beta/google-beta/verify" |
| ) |
| |
| func suppressAttachedClustersLoggingConfigDiff(_, old, new string, d *schema.ResourceData) bool { |
| if old == new { |
| return true |
| } |
| _, n := d.GetChange("logging_config.0.component_config.0.enable_components") |
| if tpgresource.IsEmptyValue(reflect.ValueOf(n)) { |
| return true |
| } |
| return false |
| } |
| |
| func ResourceContainerAttachedCluster() *schema.Resource { |
| return &schema.Resource{ |
| Create: resourceContainerAttachedClusterCreate, |
| Read: resourceContainerAttachedClusterRead, |
| Update: resourceContainerAttachedClusterUpdate, |
| Delete: resourceContainerAttachedClusterDelete, |
| |
| Importer: &schema.ResourceImporter{ |
| State: resourceContainerAttachedClusterImport, |
| }, |
| |
| Timeouts: &schema.ResourceTimeout{ |
| Create: schema.DefaultTimeout(20 * time.Minute), |
| Update: schema.DefaultTimeout(20 * time.Minute), |
| Delete: schema.DefaultTimeout(20 * time.Minute), |
| }, |
| |
| CustomizeDiff: customdiff.All( |
| tpgresource.SetAnnotationsDiff, |
| tpgresource.DefaultProviderProject, |
| ), |
| |
| Schema: map[string]*schema.Schema{ |
| "distribution": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| Description: `The Kubernetes distribution of the underlying attached cluster. Supported values: |
| "eks", "aks", "generic". The generic distribution provides the ability to register |
| or migrate any CNCF conformant cluster.`, |
| }, |
| "fleet": { |
| Type: schema.TypeList, |
| Required: true, |
| Description: `Fleet configuration.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "project": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| ValidateFunc: verify.ValidateRegexp(`^projects/[0-9]+$`), |
| Description: `The number of the Fleet host project where this cluster will be registered.`, |
| }, |
| "membership": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `The name of the managed Hub Membership resource associated to this |
| cluster. Membership names are formatted as |
| projects/<project-number>/locations/global/membership/<cluster-id>.`, |
| }, |
| }, |
| }, |
| }, |
| "location": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| Description: `The location for the resource`, |
| }, |
| "name": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| Description: `The name of this resource.`, |
| }, |
| "oidc_config": { |
| Type: schema.TypeList, |
| Required: true, |
| Description: `OIDC discovery information of the target cluster. |
| |
| Kubernetes Service Account (KSA) tokens are JWT tokens signed by the cluster |
| API server. This fields indicates how GCP services |
| validate KSA tokens in order to allow system workloads (such as GKE Connect |
| and telemetry agents) to authenticate back to GCP. |
| |
| Both clusters with public and private issuer URLs are supported. |
| Clusters with public issuers only need to specify the 'issuer_url' field |
| while clusters with private issuers need to provide both |
| 'issuer_url' and 'jwks'.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "issuer_url": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://'`, |
| }, |
| "jwks": { |
| Type: schema.TypeString, |
| Optional: true, |
| ForceNew: true, |
| Description: `OIDC verification keys in JWKS format (RFC 7517).`, |
| }, |
| }, |
| }, |
| }, |
| "platform_version": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The platform version for the cluster (e.g. '1.23.0-gke.1').`, |
| }, |
| "annotations": { |
| Type: schema.TypeMap, |
| Optional: true, |
| Description: `Optional. Annotations on the cluster. This field has the same |
| restrictions as Kubernetes annotations. The total size of all keys and |
| values combined is limited to 256k. Key can have 2 segments: prefix (optional) |
| and name (required), separated by a slash (/). Prefix must be a DNS subdomain. |
| Name must be 63 characters or less, begin and end with alphanumerics, |
| with dashes (-), underscores (_), dots (.), and alphanumerics between. |
| |
| |
| **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. |
| Please refer to the field 'effective_annotations' for all of the annotations present on the resource.`, |
| Elem: &schema.Schema{Type: schema.TypeString}, |
| }, |
| "authorization": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `Configuration related to the cluster RBAC settings.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "admin_groups": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `Groups that can perform operations as a cluster admin. A managed |
| ClusterRoleBinding will be created to grant the 'cluster-admin' ClusterRole |
| to the groups. Up to ten admin groups can be provided. |
| |
| For more info on RBAC, see |
| https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles`, |
| Elem: &schema.Schema{ |
| Type: schema.TypeString, |
| }, |
| }, |
| "admin_users": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `Users that can perform operations as a cluster admin. A managed |
| ClusterRoleBinding will be created to grant the 'cluster-admin' ClusterRole |
| to the users. Up to ten admin users can be provided. |
| |
| For more info on RBAC, see |
| https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles`, |
| Elem: &schema.Schema{ |
| Type: schema.TypeString, |
| }, |
| }, |
| }, |
| }, |
| }, |
| "binary_authorization": { |
| Type: schema.TypeList, |
| Computed: true, |
| Optional: true, |
| Description: `Binary Authorization configuration.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "evaluation_mode": { |
| Type: schema.TypeString, |
| Optional: true, |
| ValidateFunc: verify.ValidateEnum([]string{"DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE", ""}), |
| Description: `Configure Binary Authorization evaluation mode. Possible values: ["DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE"]`, |
| }, |
| }, |
| }, |
| }, |
| "description": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `A human readable description of this attached cluster. Cannot be longer |
| than 255 UTF-8 encoded bytes.`, |
| }, |
| "logging_config": { |
| Type: schema.TypeList, |
| Optional: true, |
| DiffSuppressFunc: suppressAttachedClustersLoggingConfigDiff, |
| Description: `Logging configuration.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "component_config": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `The configuration of the logging components`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "enable_components": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `The components to be enabled. Possible values: ["SYSTEM_COMPONENTS", "WORKLOADS"]`, |
| Elem: &schema.Schema{ |
| Type: schema.TypeString, |
| ValidateFunc: verify.ValidateEnum([]string{"SYSTEM_COMPONENTS", "WORKLOADS"}), |
| }, |
| }, |
| }, |
| }, |
| }, |
| }, |
| }, |
| }, |
| "monitoring_config": { |
| Type: schema.TypeList, |
| Computed: true, |
| Optional: true, |
| Description: `Monitoring configuration.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "managed_prometheus_config": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `Enable Google Cloud Managed Service for Prometheus in the cluster.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "enabled": { |
| Type: schema.TypeBool, |
| Optional: true, |
| Description: `Enable Managed Collection.`, |
| }, |
| }, |
| }, |
| }, |
| }, |
| }, |
| }, |
| "proxy_config": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `Support for proxy configuration.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "kubernetes_secret": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `The Kubernetes Secret resource that contains the HTTP(S) proxy configuration.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "name": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `Name of the kubernetes secret containing the proxy config.`, |
| }, |
| "namespace": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `Namespace of the kubernetes secret containing the proxy config.`, |
| }, |
| }, |
| }, |
| }, |
| }, |
| }, |
| }, |
| "cluster_region": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `Output only. The region where this cluster runs. |
| |
| For EKS clusters, this is an AWS region. For AKS clusters, |
| this is an Azure region.`, |
| }, |
| "create_time": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `Output only. The time at which this cluster was created.`, |
| }, |
| "effective_annotations": { |
| Type: schema.TypeMap, |
| Computed: true, |
| Description: `All of annotations (key/value pairs) present on the resource in GCP, including the annotations configured through Terraform, other clients and services.`, |
| Elem: &schema.Schema{Type: schema.TypeString}, |
| }, |
| "errors": { |
| Type: schema.TypeList, |
| Computed: true, |
| Description: `A set of errors found in the cluster.`, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "message": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `Human-friendly description of the error.`, |
| }, |
| }, |
| }, |
| }, |
| "kubernetes_version": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `The Kubernetes version of the cluster.`, |
| }, |
| "reconciling": { |
| Type: schema.TypeBool, |
| Computed: true, |
| Description: `If set, there are currently changes in flight to the cluster.`, |
| }, |
| "state": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `The current state of the cluster. Possible values: |
| STATE_UNSPECIFIED, PROVISIONING, RUNNING, RECONCILING, STOPPING, ERROR, |
| DEGRADED`, |
| }, |
| "uid": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `A globally unique identifier for the cluster.`, |
| }, |
| "update_time": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `The time at which this cluster was last updated.`, |
| }, |
| "workload_identity_config": { |
| Type: schema.TypeList, |
| Computed: true, |
| Description: `Workload Identity settings.`, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "identity_provider": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `The ID of the OIDC Identity Provider (IdP) associated to |
| the Workload Identity Pool.`, |
| }, |
| "issuer_uri": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `The OIDC issuer URL for this cluster.`, |
| }, |
| "workload_pool": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `The Workload Identity Pool associated to the cluster.`, |
| }, |
| }, |
| }, |
| }, |
| "deletion_policy": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS`, |
| Default: "DELETE", |
| }, |
| "project": { |
| Type: schema.TypeString, |
| Optional: true, |
| Computed: true, |
| ForceNew: true, |
| }, |
| }, |
| UseJSONNumber: true, |
| } |
| } |
| |
| func resourceContainerAttachedClusterCreate(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| obj := make(map[string]interface{}) |
| nameProp, err := expandContainerAttachedClusterName(d.Get("name"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { |
| obj["name"] = nameProp |
| } |
| descriptionProp, err := expandContainerAttachedClusterDescription(d.Get("description"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { |
| obj["description"] = descriptionProp |
| } |
| oidcConfigProp, err := expandContainerAttachedClusterOidcConfig(d.Get("oidc_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("oidc_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(oidcConfigProp)) && (ok || !reflect.DeepEqual(v, oidcConfigProp)) { |
| obj["oidcConfig"] = oidcConfigProp |
| } |
| platformVersionProp, err := expandContainerAttachedClusterPlatformVersion(d.Get("platform_version"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("platform_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(platformVersionProp)) && (ok || !reflect.DeepEqual(v, platformVersionProp)) { |
| obj["platformVersion"] = platformVersionProp |
| } |
| distributionProp, err := expandContainerAttachedClusterDistribution(d.Get("distribution"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("distribution"); !tpgresource.IsEmptyValue(reflect.ValueOf(distributionProp)) && (ok || !reflect.DeepEqual(v, distributionProp)) { |
| obj["distribution"] = distributionProp |
| } |
| fleetProp, err := expandContainerAttachedClusterFleet(d.Get("fleet"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("fleet"); !tpgresource.IsEmptyValue(reflect.ValueOf(fleetProp)) && (ok || !reflect.DeepEqual(v, fleetProp)) { |
| obj["fleet"] = fleetProp |
| } |
| loggingConfigProp, err := expandContainerAttachedClusterLoggingConfig(d.Get("logging_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("logging_config"); ok || !reflect.DeepEqual(v, loggingConfigProp) { |
| obj["loggingConfig"] = loggingConfigProp |
| } |
| authorizationProp, err := expandContainerAttachedClusterAuthorization(d.Get("authorization"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(authorizationProp)) && (ok || !reflect.DeepEqual(v, authorizationProp)) { |
| obj["authorization"] = authorizationProp |
| } |
| monitoringConfigProp, err := expandContainerAttachedClusterMonitoringConfig(d.Get("monitoring_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("monitoring_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(monitoringConfigProp)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { |
| obj["monitoringConfig"] = monitoringConfigProp |
| } |
| binaryAuthorizationProp, err := expandContainerAttachedClusterBinaryAuthorization(d.Get("binary_authorization"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("binary_authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(binaryAuthorizationProp)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { |
| obj["binaryAuthorization"] = binaryAuthorizationProp |
| } |
| proxyConfigProp, err := expandContainerAttachedClusterProxyConfig(d.Get("proxy_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("proxy_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(proxyConfigProp)) && (ok || !reflect.DeepEqual(v, proxyConfigProp)) { |
| obj["proxyConfig"] = proxyConfigProp |
| } |
| annotationsProp, err := expandContainerAttachedClusterEffectiveAnnotations(d.Get("effective_annotations"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("effective_annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(annotationsProp)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { |
| obj["annotations"] = annotationsProp |
| } |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters?attached_cluster_id={{name}}") |
| if err != nil { |
| return err |
| } |
| |
| log.Printf("[DEBUG] Creating new Cluster: %#v", obj) |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Cluster: %s", err) |
| } |
| billingProject = project |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| headers := make(http.Header) |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "POST", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Body: obj, |
| Timeout: d.Timeout(schema.TimeoutCreate), |
| Headers: headers, |
| }) |
| if err != nil { |
| return fmt.Errorf("Error creating Cluster: %s", err) |
| } |
| |
| // Store the ID now |
| id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") |
| if err != nil { |
| return fmt.Errorf("Error constructing id: %s", err) |
| } |
| d.SetId(id) |
| |
| // Use the resource in the operation response to populate |
| // identity fields and d.Id() before read |
| var opRes map[string]interface{} |
| err = ContainerAttachedOperationWaitTimeWithResponse( |
| config, res, &opRes, project, "Creating Cluster", userAgent, |
| d.Timeout(schema.TimeoutCreate)) |
| if err != nil { |
| // The resource didn't actually create |
| d.SetId("") |
| |
| return fmt.Errorf("Error waiting to create Cluster: %s", err) |
| } |
| |
| if err := d.Set("name", flattenContainerAttachedClusterName(opRes["name"], d, config)); err != nil { |
| return err |
| } |
| |
| // This may have caused the ID to update - update it if so. |
| id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") |
| if err != nil { |
| return fmt.Errorf("Error constructing id: %s", err) |
| } |
| d.SetId(id) |
| |
| log.Printf("[DEBUG] Finished creating Cluster %q: %#v", d.Id(), res) |
| |
| return resourceContainerAttachedClusterRead(d, meta) |
| } |
| |
| func resourceContainerAttachedClusterRead(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") |
| if err != nil { |
| return err |
| } |
| |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Cluster: %s", err) |
| } |
| billingProject = project |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| headers := make(http.Header) |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "GET", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Headers: headers, |
| }) |
| if err != nil { |
| return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ContainerAttachedCluster %q", d.Id())) |
| } |
| |
| // Explicitly set virtual fields to default values if unset |
| if _, ok := d.GetOkExists("deletion_policy"); !ok { |
| if err := d.Set("deletion_policy", "DELETE"); err != nil { |
| return fmt.Errorf("Error setting deletion_policy: %s", err) |
| } |
| } |
| if err := d.Set("project", project); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| |
| if err := d.Set("name", flattenContainerAttachedClusterName(res["name"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("description", flattenContainerAttachedClusterDescription(res["description"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("oidc_config", flattenContainerAttachedClusterOidcConfig(res["oidcConfig"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("platform_version", flattenContainerAttachedClusterPlatformVersion(res["platformVersion"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("distribution", flattenContainerAttachedClusterDistribution(res["distribution"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("cluster_region", flattenContainerAttachedClusterClusterRegion(res["clusterRegion"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("fleet", flattenContainerAttachedClusterFleet(res["fleet"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("state", flattenContainerAttachedClusterState(res["state"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("uid", flattenContainerAttachedClusterUid(res["uid"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("reconciling", flattenContainerAttachedClusterReconciling(res["reconciling"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("create_time", flattenContainerAttachedClusterCreateTime(res["createTime"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("update_time", flattenContainerAttachedClusterUpdateTime(res["updateTime"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("kubernetes_version", flattenContainerAttachedClusterKubernetesVersion(res["kubernetesVersion"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("annotations", flattenContainerAttachedClusterAnnotations(res["annotations"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("workload_identity_config", flattenContainerAttachedClusterWorkloadIdentityConfig(res["workloadIdentityConfig"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("logging_config", flattenContainerAttachedClusterLoggingConfig(res["loggingConfig"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("errors", flattenContainerAttachedClusterErrors(res["errors"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("authorization", flattenContainerAttachedClusterAuthorization(res["authorization"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("monitoring_config", flattenContainerAttachedClusterMonitoringConfig(res["monitoringConfig"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("binary_authorization", flattenContainerAttachedClusterBinaryAuthorization(res["binaryAuthorization"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("proxy_config", flattenContainerAttachedClusterProxyConfig(res["proxyConfig"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| if err := d.Set("effective_annotations", flattenContainerAttachedClusterEffectiveAnnotations(res["annotations"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Cluster: %s", err) |
| } |
| |
| return nil |
| } |
| |
| func resourceContainerAttachedClusterUpdate(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Cluster: %s", err) |
| } |
| billingProject = project |
| |
| obj := make(map[string]interface{}) |
| descriptionProp, err := expandContainerAttachedClusterDescription(d.Get("description"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { |
| obj["description"] = descriptionProp |
| } |
| oidcConfigProp, err := expandContainerAttachedClusterOidcConfig(d.Get("oidc_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("oidc_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, oidcConfigProp)) { |
| obj["oidcConfig"] = oidcConfigProp |
| } |
| platformVersionProp, err := expandContainerAttachedClusterPlatformVersion(d.Get("platform_version"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("platform_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, platformVersionProp)) { |
| obj["platformVersion"] = platformVersionProp |
| } |
| fleetProp, err := expandContainerAttachedClusterFleet(d.Get("fleet"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("fleet"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, fleetProp)) { |
| obj["fleet"] = fleetProp |
| } |
| loggingConfigProp, err := expandContainerAttachedClusterLoggingConfig(d.Get("logging_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("logging_config"); ok || !reflect.DeepEqual(v, loggingConfigProp) { |
| obj["loggingConfig"] = loggingConfigProp |
| } |
| authorizationProp, err := expandContainerAttachedClusterAuthorization(d.Get("authorization"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, authorizationProp)) { |
| obj["authorization"] = authorizationProp |
| } |
| monitoringConfigProp, err := expandContainerAttachedClusterMonitoringConfig(d.Get("monitoring_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("monitoring_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) { |
| obj["monitoringConfig"] = monitoringConfigProp |
| } |
| binaryAuthorizationProp, err := expandContainerAttachedClusterBinaryAuthorization(d.Get("binary_authorization"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("binary_authorization"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { |
| obj["binaryAuthorization"] = binaryAuthorizationProp |
| } |
| proxyConfigProp, err := expandContainerAttachedClusterProxyConfig(d.Get("proxy_config"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("proxy_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, proxyConfigProp)) { |
| obj["proxyConfig"] = proxyConfigProp |
| } |
| annotationsProp, err := expandContainerAttachedClusterEffectiveAnnotations(d.Get("effective_annotations"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("effective_annotations"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, annotationsProp)) { |
| obj["annotations"] = annotationsProp |
| } |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") |
| if err != nil { |
| return err |
| } |
| |
| log.Printf("[DEBUG] Updating Cluster %q: %#v", d.Id(), obj) |
| headers := make(http.Header) |
| updateMask := []string{} |
| |
| if d.HasChange("description") { |
| updateMask = append(updateMask, "description") |
| } |
| |
| if d.HasChange("oidc_config") { |
| updateMask = append(updateMask, "oidcConfig") |
| } |
| |
| if d.HasChange("platform_version") { |
| updateMask = append(updateMask, "platformVersion") |
| } |
| |
| if d.HasChange("fleet") { |
| updateMask = append(updateMask, "fleet") |
| } |
| |
| if d.HasChange("logging_config") { |
| updateMask = append(updateMask, "loggingConfig") |
| } |
| |
| if d.HasChange("authorization") { |
| updateMask = append(updateMask, "authorization") |
| } |
| |
| if d.HasChange("monitoring_config") { |
| updateMask = append(updateMask, "monitoringConfig") |
| } |
| |
| if d.HasChange("binary_authorization") { |
| updateMask = append(updateMask, "binaryAuthorization") |
| } |
| |
| if d.HasChange("proxy_config") { |
| updateMask = append(updateMask, "proxyConfig") |
| } |
| |
| if d.HasChange("effective_annotations") { |
| updateMask = append(updateMask, "annotations") |
| } |
| // updateMask is a URL parameter but not present in the schema, so ReplaceVars |
| // won't set it |
| url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) |
| if err != nil { |
| return err |
| } |
| // The generated code sets the wrong masks for the following fields. |
| newUpdateMask := []string{} |
| if d.HasChange("authorization.0.admin_users") { |
| newUpdateMask = append(newUpdateMask, "authorization.admin_users") |
| } |
| if d.HasChange("authorization.0.admin_groups") { |
| newUpdateMask = append(newUpdateMask, "authorization.admin_groups") |
| } |
| if d.HasChange("logging_config") { |
| newUpdateMask = append(newUpdateMask, "logging_config.component_config.enable_components") |
| } |
| if d.HasChange("monitoring_config") { |
| newUpdateMask = append(newUpdateMask, "monitoring_config.managed_prometheus_config.enabled") |
| } |
| if d.HasChange("binary_authorization") { |
| newUpdateMask = append(newUpdateMask, "binary_authorization.evaluation_mode") |
| } |
| if d.HasChange("proxy_config") { |
| newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.name") |
| newUpdateMask = append(newUpdateMask, "proxy_config.kubernetes_secret.namespace") |
| } |
| // Pull out any other set fields from the generated mask. |
| for _, mask := range updateMask { |
| if mask == "authorization" || mask == "loggingConfig" || mask == "monitoringConfig" || mask == "binaryAuthorization" || mask == "proxyConfig" { |
| continue |
| } |
| newUpdateMask = append(newUpdateMask, mask) |
| } |
| // Overwrite the previously set mask. |
| url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) |
| if err != nil { |
| return err |
| } |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| // if updateMask is empty we are not updating anything so skip the post |
| if len(updateMask) > 0 { |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "PATCH", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Body: obj, |
| Timeout: d.Timeout(schema.TimeoutUpdate), |
| Headers: headers, |
| }) |
| |
| if err != nil { |
| return fmt.Errorf("Error updating Cluster %q: %s", d.Id(), err) |
| } else { |
| log.Printf("[DEBUG] Finished updating Cluster %q: %#v", d.Id(), res) |
| } |
| |
| err = ContainerAttachedOperationWaitTime( |
| config, res, project, "Updating Cluster", userAgent, |
| d.Timeout(schema.TimeoutUpdate)) |
| |
| if err != nil { |
| return err |
| } |
| } |
| |
| return resourceContainerAttachedClusterRead(d, meta) |
| } |
| |
| func resourceContainerAttachedClusterDelete(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Cluster: %s", err) |
| } |
| billingProject = project |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{ContainerAttachedBasePath}}projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") |
| if err != nil { |
| return err |
| } |
| |
| var obj map[string]interface{} |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| headers := make(http.Header) |
| if v, ok := d.GetOk("deletion_policy"); ok { |
| if v == "DELETE_IGNORE_ERRORS" { |
| url, err = transport_tpg.AddQueryParams(url, map[string]string{"ignore_errors": "true"}) |
| if err != nil { |
| return err |
| } |
| } |
| } |
| |
| log.Printf("[DEBUG] Deleting Cluster %q", d.Id()) |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "DELETE", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Body: obj, |
| Timeout: d.Timeout(schema.TimeoutDelete), |
| Headers: headers, |
| }) |
| if err != nil { |
| return transport_tpg.HandleNotFoundError(err, d, "Cluster") |
| } |
| |
| err = ContainerAttachedOperationWaitTime( |
| config, res, project, "Deleting Cluster", userAgent, |
| d.Timeout(schema.TimeoutDelete)) |
| |
| if err != nil { |
| return err |
| } |
| |
| log.Printf("[DEBUG] Finished deleting Cluster %q: %#v", d.Id(), res) |
| return nil |
| } |
| |
| func resourceContainerAttachedClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { |
| config := meta.(*transport_tpg.Config) |
| if err := tpgresource.ParseImportId([]string{ |
| "^projects/(?P<project>[^/]+)/locations/(?P<location>[^/]+)/attachedClusters/(?P<name>[^/]+)$", |
| "^(?P<project>[^/]+)/(?P<location>[^/]+)/(?P<name>[^/]+)$", |
| "^(?P<location>[^/]+)/(?P<name>[^/]+)$", |
| }, d, config); err != nil { |
| return nil, err |
| } |
| |
| // Replace import id for the resource id |
| id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/attachedClusters/{{name}}") |
| if err != nil { |
| return nil, fmt.Errorf("Error constructing id: %s", err) |
| } |
| d.SetId(id) |
| |
| // Explicitly set virtual fields to default values on import |
| if err := d.Set("deletion_policy", "DELETE"); err != nil { |
| return nil, fmt.Errorf("Error setting deletion_policy: %s", err) |
| } |
| |
| return []*schema.ResourceData{d}, nil |
| } |
| |
| func flattenContainerAttachedClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return v |
| } |
| return tpgresource.NameFromSelfLinkStateFunc(v) |
| } |
| |
| func flattenContainerAttachedClusterDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterOidcConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["issuer_url"] = |
| flattenContainerAttachedClusterOidcConfigIssuerUrl(original["issuerUrl"], d, config) |
| transformed["jwks"] = |
| flattenContainerAttachedClusterOidcConfigJwks(original["jwks"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterOidcConfigIssuerUrl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterOidcConfigJwks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterPlatformVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterDistribution(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterClusterRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterFleet(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["membership"] = |
| flattenContainerAttachedClusterFleetMembership(original["membership"], d, config) |
| transformed["project"] = |
| flattenContainerAttachedClusterFleetProject(original["project"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterFleetMembership(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterFleetProject(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterReconciling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterKubernetesVersion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return v |
| } |
| |
| transformed := make(map[string]interface{}) |
| if l, ok := d.GetOkExists("annotations"); ok { |
| for k := range l.(map[string]interface{}) { |
| transformed[k] = v.(map[string]interface{})[k] |
| } |
| } |
| |
| return transformed |
| } |
| |
| func flattenContainerAttachedClusterWorkloadIdentityConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["identity_provider"] = |
| flattenContainerAttachedClusterWorkloadIdentityConfigIdentityProvider(original["identityProvider"], d, config) |
| transformed["issuer_uri"] = |
| flattenContainerAttachedClusterWorkloadIdentityConfigIssuerUri(original["issuerUri"], d, config) |
| transformed["workload_pool"] = |
| flattenContainerAttachedClusterWorkloadIdentityConfigWorkloadPool(original["workloadPool"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterWorkloadIdentityConfigIdentityProvider(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterWorkloadIdentityConfigIssuerUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterWorkloadIdentityConfigWorkloadPool(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterLoggingConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| transformed["component_config"] = |
| flattenContainerAttachedClusterLoggingConfigComponentConfig(original["componentConfig"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| transformed["enable_components"] = |
| flattenContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(original["enableComponents"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterErrors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return v |
| } |
| l := v.([]interface{}) |
| transformed := make([]interface{}, 0, len(l)) |
| for _, raw := range l { |
| original := raw.(map[string]interface{}) |
| if len(original) < 1 { |
| // Do not include empty json objects coming back from the api |
| continue |
| } |
| transformed = append(transformed, map[string]interface{}{ |
| "message": flattenContainerAttachedClusterErrorsMessage(original["message"], d, config), |
| }) |
| } |
| return transformed |
| } |
| func flattenContainerAttachedClusterErrorsMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| // The custom expander transforms input into something like this: |
| // |
| // authorization { |
| // admin_users [ |
| // { username = "user1" }, |
| // { username = "user2" } |
| // ] |
| // admin_groups [ |
| // { group = "group1" }, |
| // { group = "group2" }, |
| // ] |
| // } |
| // |
| // The custom flattener transforms input back into something like this: |
| // |
| // authorization { |
| // admin_users = [ |
| // "user1", |
| // "user2" |
| // ] |
| // admin_groups = [ |
| // "group1", |
| // "group2" |
| // ], |
| // } |
| func flattenContainerAttachedClusterAuthorization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil || len(v.(map[string]interface{})) == 0 { |
| return nil |
| } |
| |
| transformed := make(map[string][]string) |
| if v.(map[string]interface{})["adminUsers"] != nil { |
| orig := v.(map[string]interface{})["adminUsers"].([]interface{}) |
| transformed["admin_users"] = make([]string, len(orig)) |
| for i, u := range orig { |
| if u != nil { |
| transformed["admin_users"][i] = u.(map[string]interface{})["username"].(string) |
| } |
| } |
| } |
| if v.(map[string]interface{})["adminGroups"] != nil { |
| orig := v.(map[string]interface{})["adminGroups"].([]interface{}) |
| transformed["admin_groups"] = make([]string, len(orig)) |
| for i, u := range orig { |
| if u != nil { |
| transformed["admin_groups"][i] = u.(map[string]interface{})["group"].(string) |
| } |
| } |
| } |
| |
| return []interface{}{transformed} |
| } |
| |
| func flattenContainerAttachedClusterMonitoringConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| transformed["managed_prometheus_config"] = |
| flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(original["managedPrometheusConfig"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| transformed["enabled"] = |
| flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(original["enabled"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterBinaryAuthorization(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| transformed["evaluation_mode"] = |
| flattenContainerAttachedClusterBinaryAuthorizationEvaluationMode(original["evaluationMode"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterBinaryAuthorizationEvaluationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterProxyConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["kubernetes_secret"] = |
| flattenContainerAttachedClusterProxyConfigKubernetesSecret(original["kubernetesSecret"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterProxyConfigKubernetesSecret(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["name"] = |
| flattenContainerAttachedClusterProxyConfigKubernetesSecretName(original["name"], d, config) |
| transformed["namespace"] = |
| flattenContainerAttachedClusterProxyConfigKubernetesSecretNamespace(original["namespace"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenContainerAttachedClusterProxyConfigKubernetesSecretName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterProxyConfigKubernetesSecretNamespace(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenContainerAttachedClusterEffectiveAnnotations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func expandContainerAttachedClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterOidcConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedIssuerUrl, err := expandContainerAttachedClusterOidcConfigIssuerUrl(original["issuer_url"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedIssuerUrl); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["issuerUrl"] = transformedIssuerUrl |
| } |
| |
| transformedJwks, err := expandContainerAttachedClusterOidcConfigJwks(original["jwks"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedJwks); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["jwks"] = transformedJwks |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterOidcConfigIssuerUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterOidcConfigJwks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterPlatformVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterDistribution(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterFleet(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedMembership, err := expandContainerAttachedClusterFleetMembership(original["membership"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedMembership); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["membership"] = transformedMembership |
| } |
| |
| transformedProject, err := expandContainerAttachedClusterFleetProject(original["project"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedProject); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["project"] = transformedProject |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterFleetMembership(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterFleetProject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterLoggingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| transformed := make(map[string]interface{}) |
| return transformed, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedComponentConfig, err := expandContainerAttachedClusterLoggingConfigComponentConfig(original["component_config"], d, config) |
| if err != nil { |
| return nil, err |
| } else { |
| transformed["componentConfig"] = transformedComponentConfig |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterLoggingConfigComponentConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 { |
| return nil, nil |
| } |
| |
| if l[0] == nil { |
| transformed := make(map[string]interface{}) |
| return transformed, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedEnableComponents, err := expandContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(original["enable_components"], d, config) |
| if err != nil { |
| return nil, err |
| } else { |
| transformed["enableComponents"] = transformedEnableComponents |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterLoggingConfigComponentConfigEnableComponents(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| type attachedClusterUser struct { |
| Username string `json:"username"` |
| } |
| |
| type attachedClusterGroup struct { |
| Group string `json:"group"` |
| } |
| |
| // The custom expander transforms input into something like this: |
| // |
| // authorization { |
| // admin_users [ |
| // { username = "user1" }, |
| // { username = "user2" } |
| // ] |
| // admin_groups [ |
| // { group = "group1" }, |
| // { group = "group2" }, |
| // ] |
| // } |
| // |
| // The custom flattener transforms input back into something like this: |
| // |
| // authorization { |
| // admin_users = [ |
| // "user1", |
| // "user2" |
| // ] |
| // admin_groups = [ |
| // "group1", |
| // "group2" |
| // ], |
| // } |
| func expandContainerAttachedClusterAuthorization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| orig := raw.(map[string]interface{})["admin_users"].([]interface{}) |
| transformed := make(map[string][]interface{}) |
| transformed["admin_users"] = make([]interface{}, len(orig)) |
| for i, u := range orig { |
| if u != nil { |
| transformed["admin_users"][i] = attachedClusterUser{Username: u.(string)} |
| } |
| } |
| orig = raw.(map[string]interface{})["admin_groups"].([]interface{}) |
| transformed["admin_groups"] = make([]interface{}, len(orig)) |
| for i, u := range orig { |
| if u != nil { |
| transformed["admin_groups"][i] = attachedClusterGroup{Group: u.(string)} |
| } |
| } |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterMonitoringConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 { |
| return nil, nil |
| } |
| |
| if l[0] == nil { |
| transformed := make(map[string]interface{}) |
| return transformed, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedManagedPrometheusConfig, err := expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(original["managed_prometheus_config"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedManagedPrometheusConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["managedPrometheusConfig"] = transformedManagedPrometheusConfig |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 { |
| return nil, nil |
| } |
| |
| if l[0] == nil { |
| transformed := make(map[string]interface{}) |
| return transformed, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedEnabled, err := expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(original["enabled"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["enabled"] = transformedEnabled |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterMonitoringConfigManagedPrometheusConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterBinaryAuthorization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 { |
| return nil, nil |
| } |
| |
| if l[0] == nil { |
| transformed := make(map[string]interface{}) |
| return transformed, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedEvaluationMode, err := expandContainerAttachedClusterBinaryAuthorizationEvaluationMode(original["evaluation_mode"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["evaluationMode"] = transformedEvaluationMode |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterBinaryAuthorizationEvaluationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterProxyConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedKubernetesSecret, err := expandContainerAttachedClusterProxyConfigKubernetesSecret(original["kubernetes_secret"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedKubernetesSecret); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["kubernetesSecret"] = transformedKubernetesSecret |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterProxyConfigKubernetesSecret(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedName, err := expandContainerAttachedClusterProxyConfigKubernetesSecretName(original["name"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["name"] = transformedName |
| } |
| |
| transformedNamespace, err := expandContainerAttachedClusterProxyConfigKubernetesSecretNamespace(original["namespace"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedNamespace); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["namespace"] = transformedNamespace |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandContainerAttachedClusterProxyConfigKubernetesSecretName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterProxyConfigKubernetesSecretNamespace(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandContainerAttachedClusterEffectiveAnnotations(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { |
| if v == nil { |
| return map[string]string{}, nil |
| } |
| m := make(map[string]string) |
| for k, val := range v.(map[string]interface{}) { |
| m[k] = val.(string) |
| } |
| return m, nil |
| } |