| // Copyright (c) HashiCorp, Inc. |
| // SPDX-License-Identifier: MPL-2.0 |
| |
| // ---------------------------------------------------------------------------- |
| // |
| // *** AUTO GENERATED CODE *** Type: MMv1 *** |
| // |
| // ---------------------------------------------------------------------------- |
| // |
| // This file is automatically generated by Magic Modules and manual |
| // changes will be clobbered when the file is regenerated. |
| // |
| // Please read more about how to change this file in |
| // .github/CONTRIBUTING.md. |
| // |
| // ---------------------------------------------------------------------------- |
| |
| package bigquery |
| |
| import ( |
| "fmt" |
| "log" |
| "net/http" |
| "reflect" |
| "regexp" |
| "time" |
| |
| "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" |
| "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" |
| |
| "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" |
| transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" |
| |
| "google.golang.org/api/googleapi" |
| ) |
| |
| const datasetIdRegexp = `^[0-9A-Za-z_]+$` |
| |
| func validateDatasetId(v interface{}, k string) (ws []string, errors []error) { |
| value := v.(string) |
| if !regexp.MustCompile(datasetIdRegexp).MatchString(value) { |
| errors = append(errors, fmt.Errorf( |
| "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) |
| } |
| |
| if len(value) > 1024 { |
| errors = append(errors, fmt.Errorf( |
| "%q cannot be greater than 1,024 characters", k)) |
| } |
| |
| return |
| } |
| |
| func validateDefaultTableExpirationMs(v interface{}, k string) (ws []string, errors []error) { |
| value := v.(int) |
| if value < 3600000 { |
| errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) |
| } |
| |
| return |
| } |
| |
| func ResourceBigQueryDataset() *schema.Resource { |
| return &schema.Resource{ |
| Create: resourceBigQueryDatasetCreate, |
| Read: resourceBigQueryDatasetRead, |
| Update: resourceBigQueryDatasetUpdate, |
| Delete: resourceBigQueryDatasetDelete, |
| |
| Importer: &schema.ResourceImporter{ |
| State: resourceBigQueryDatasetImport, |
| }, |
| |
| Timeouts: &schema.ResourceTimeout{ |
| Create: schema.DefaultTimeout(20 * time.Minute), |
| Update: schema.DefaultTimeout(20 * time.Minute), |
| Delete: schema.DefaultTimeout(20 * time.Minute), |
| }, |
| |
| CustomizeDiff: customdiff.All( |
| tpgresource.SetLabelsDiff, |
| tpgresource.DefaultProviderProject, |
| ), |
| |
| Schema: map[string]*schema.Schema{ |
| "dataset_id": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| ValidateFunc: validateDatasetId, |
| Description: `A unique ID for this dataset, without the project name. The ID |
| must contain only letters (a-z, A-Z), numbers (0-9), or |
| underscores (_). The maximum length is 1,024 characters.`, |
| }, |
| |
| "access": { |
| Type: schema.TypeSet, |
| Computed: true, |
| Optional: true, |
| Description: `An array of objects that define dataset access for one or more entities.`, |
| Elem: bigqueryDatasetAccessSchema(), |
| // Default schema.HashSchema is used. |
| }, |
| "default_collation": { |
| Type: schema.TypeString, |
| Computed: true, |
| Optional: true, |
| Description: `Defines the default collation specification of future tables created |
| in the dataset. If a table is created in this dataset without table-level |
| default collation, then the table inherits the dataset default collation, |
| which is applied to the string fields that do not have explicit collation |
| specified. A change to this field affects only tables created afterwards, |
| and does not alter the existing tables. |
| |
| The following values are supported: |
| - 'und:ci': undetermined locale, case insensitive. |
| - '': empty string. Default to case-sensitive behavior.`, |
| }, |
| "default_encryption_configuration": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `The default encryption key for all tables in the dataset. Once this property is set, |
| all newly-created partitioned tables in the dataset will have encryption key set to |
| this value, unless table creation request (or query) overrides the key.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "kms_key_name": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `Describes the Cloud KMS encryption key that will be used to protect destination |
| BigQuery table. The BigQuery Service Account associated with your project requires |
| access to this encryption key.`, |
| }, |
| }, |
| }, |
| }, |
| "default_partition_expiration_ms": { |
| Type: schema.TypeInt, |
| Optional: true, |
| Description: `The default partition expiration for all partitioned tables in |
| the dataset, in milliseconds. |
| Once this property is set, all newly-created partitioned tables in |
| the dataset will have an 'expirationMs' property in the 'timePartitioning' |
| settings set to this value, and changing the value will only |
| affect new tables, not existing ones. The storage in a partition will |
| have an expiration time of its partition time plus this value. |
| Setting this property overrides the use of 'defaultTableExpirationMs' |
| for partitioned tables: only one of 'defaultTableExpirationMs' and |
| 'defaultPartitionExpirationMs' will be used for any new partitioned |
| table. If you provide an explicit 'timePartitioning.expirationMs' when |
| creating or updating a partitioned table, that value takes precedence |
| over the default partition expiration time indicated by this property.`, |
| }, |
| "default_table_expiration_ms": { |
| Type: schema.TypeInt, |
| Optional: true, |
| ValidateFunc: validateDefaultTableExpirationMs, |
| Description: `The default lifetime of all tables in the dataset, in milliseconds. |
| The minimum value is 3600000 milliseconds (one hour). |
| Once this property is set, all newly-created tables in the dataset |
| will have an 'expirationTime' property set to the creation time plus |
| the value in this property, and changing the value will only affect |
| new tables, not existing ones. When the 'expirationTime' for a given |
| table is reached, that table will be deleted automatically. |
| If a table's 'expirationTime' is modified or removed before the |
| table expires, or if you provide an explicit 'expirationTime' when |
| creating a table, that value takes precedence over the default |
| expiration time indicated by this property.`, |
| }, |
| "description": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `A user-friendly description of the dataset`, |
| }, |
| "external_dataset_reference": { |
| Type: schema.TypeList, |
| Optional: true, |
| ForceNew: true, |
| Description: `Information about the external metadata storage where the dataset is defined.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "connection": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| Description: `The connection id that is used to access the externalSource. |
| Format: projects/{projectId}/locations/{locationId}/connections/{connectionId}`, |
| }, |
| "external_source": { |
| Type: schema.TypeString, |
| Required: true, |
| ForceNew: true, |
| Description: `External source that backs this dataset.`, |
| }, |
| }, |
| }, |
| }, |
| "friendly_name": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `A descriptive name for the dataset`, |
| }, |
| "is_case_insensitive": { |
| Type: schema.TypeBool, |
| Computed: true, |
| Optional: true, |
| Description: `TRUE if the dataset and its table names are case-insensitive, otherwise FALSE. |
| By default, this is FALSE, which means the dataset and its table names are |
| case-sensitive. This field does not affect routine references.`, |
| }, |
| "labels": { |
| Type: schema.TypeMap, |
| Optional: true, |
| Description: `The labels associated with this dataset. You can use these to |
| organize and group your datasets. |
| |
| |
| **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. |
| Please refer to the field 'effective_labels' for all of the labels present on the resource.`, |
| Elem: &schema.Schema{Type: schema.TypeString}, |
| }, |
| "location": { |
| Type: schema.TypeString, |
| Optional: true, |
| ForceNew: true, |
| DiffSuppressFunc: tpgresource.CaseDiffSuppress, |
| Description: `The geographic location where the dataset should reside. |
| See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). |
| There are two types of locations, regional or multi-regional. A regional |
| location is a specific geographic place, such as Tokyo, and a multi-regional |
| location is a large geographic area, such as the United States, that |
| contains at least two geographic places. |
| The default value is multi-regional location 'US'. |
| Changing this forces a new resource to be created.`, |
| Default: "US", |
| }, |
| "max_time_travel_hours": { |
| Type: schema.TypeString, |
| Computed: true, |
| Optional: true, |
| Description: `Defines the time travel window in hours. The value can be from 48 to 168 hours (2 to 7 days).`, |
| }, |
| "resource_tags": { |
| Type: schema.TypeMap, |
| Optional: true, |
| Description: `The tags attached to this table. Tag keys are globally unique. Tag key is expected to be |
| in the namespaced format, for example "123456789012/environment" where 123456789012 is the |
| ID of the parent organization or project resource for this tag key. Tag value is expected |
| to be the short name, for example "Production". See [Tag definitions](/iam/docs/tags-access-control#definitions) |
| for more details.`, |
| Elem: &schema.Schema{Type: schema.TypeString}, |
| }, |
| "storage_billing_model": { |
| Type: schema.TypeString, |
| Computed: true, |
| Optional: true, |
| Description: `Specifies the storage billing model for the dataset. |
| Set this flag value to LOGICAL to use logical bytes for storage billing, |
| or to PHYSICAL to use physical bytes instead. |
| |
| LOGICAL is the default if this flag isn't specified.`, |
| }, |
| "creation_time": { |
| Type: schema.TypeInt, |
| Computed: true, |
| Description: `The time when this dataset was created, in milliseconds since the |
| epoch.`, |
| }, |
| "effective_labels": { |
| Type: schema.TypeMap, |
| Computed: true, |
| Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, |
| Elem: &schema.Schema{Type: schema.TypeString}, |
| }, |
| "etag": { |
| Type: schema.TypeString, |
| Computed: true, |
| Description: `A hash of the resource.`, |
| }, |
| "last_modified_time": { |
| Type: schema.TypeInt, |
| Computed: true, |
| Description: `The date when this dataset or any of its tables was last modified, in |
| milliseconds since the epoch.`, |
| }, |
| "terraform_labels": { |
| Type: schema.TypeMap, |
| Computed: true, |
| Description: `The combination of labels configured directly on the resource |
| and default labels configured on the provider.`, |
| Elem: &schema.Schema{Type: schema.TypeString}, |
| }, |
| "delete_contents_on_destroy": { |
| Type: schema.TypeBool, |
| Optional: true, |
| Description: `If set to 'true', delete all the tables in the |
| dataset when destroying the resource; otherwise, |
| destroying the resource will fail if tables are present.`, |
| Default: false, |
| }, |
| "project": { |
| Type: schema.TypeString, |
| Optional: true, |
| Computed: true, |
| ForceNew: true, |
| }, |
| "self_link": { |
| Type: schema.TypeString, |
| Computed: true, |
| }, |
| }, |
| UseJSONNumber: true, |
| } |
| } |
| |
| func bigqueryDatasetAccessSchema() *schema.Resource { |
| return &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "dataset": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `Grants all resources of particular types in a particular dataset read access to the current dataset.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "dataset": { |
| Type: schema.TypeList, |
| Required: true, |
| Description: `The dataset this entry applies to`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "dataset_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the dataset containing this table.`, |
| }, |
| "project_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the project containing this table.`, |
| }, |
| }, |
| }, |
| }, |
| "target_types": { |
| Type: schema.TypeList, |
| Required: true, |
| Description: `Which resources in the dataset this entry applies to. Currently, only views are supported, |
| but additional target types may be added in the future. Possible values: VIEWS`, |
| Elem: &schema.Schema{ |
| Type: schema.TypeString, |
| }, |
| }, |
| }, |
| }, |
| }, |
| "domain": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `A domain to grant access to. Any users signed in with the |
| domain specified will be granted the specified access`, |
| }, |
| "group_by_email": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `An email address of a Google Group to grant access to.`, |
| }, |
| "iam_member": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `Some other type of member that appears in the IAM Policy but isn't a user, |
| group, domain, or special group. For example: 'allUsers'`, |
| }, |
| "role": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `Describes the rights granted to the user specified by the other |
| member of the access object. Basic, predefined, and custom roles |
| are supported. Predefined roles that have equivalent basic roles |
| are swapped by the API to their basic counterparts. See |
| [official docs](https://cloud.google.com/bigquery/docs/access-control).`, |
| }, |
| "routine": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `A routine from a different dataset to grant access to. Queries |
| executed against that routine will have read access to tables in |
| this dataset. The role field is not required when this field is |
| set. If that routine is updated by any user, access to the routine |
| needs to be granted again via an update operation.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "dataset_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the dataset containing this table.`, |
| }, |
| "project_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the project containing this table.`, |
| }, |
| "routine_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the routine. The ID must contain only letters (a-z, |
| A-Z), numbers (0-9), or underscores (_). The maximum length |
| is 256 characters.`, |
| }, |
| }, |
| }, |
| }, |
| "special_group": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `A special group to grant access to. Possible values include: |
| * 'projectOwners': Owners of the enclosing project. |
| * 'projectReaders': Readers of the enclosing project. |
| * 'projectWriters': Writers of the enclosing project. |
| * 'allAuthenticatedUsers': All authenticated BigQuery users.`, |
| }, |
| "user_by_email": { |
| Type: schema.TypeString, |
| Optional: true, |
| Description: `An email address of a user to grant access to. For example: |
| fred@example.com`, |
| }, |
| "view": { |
| Type: schema.TypeList, |
| Optional: true, |
| Description: `A view from a different dataset to grant access to. Queries |
| executed against that view will have read access to tables in |
| this dataset. The role field is not required when this field is |
| set. If that view is updated by any user, access to the view |
| needs to be granted again via an update operation.`, |
| MaxItems: 1, |
| Elem: &schema.Resource{ |
| Schema: map[string]*schema.Schema{ |
| "dataset_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the dataset containing this table.`, |
| }, |
| "project_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the project containing this table.`, |
| }, |
| "table_id": { |
| Type: schema.TypeString, |
| Required: true, |
| Description: `The ID of the table. The ID must contain only letters (a-z, |
| A-Z), numbers (0-9), or underscores (_). The maximum length |
| is 1,024 characters.`, |
| }, |
| }, |
| }, |
| }, |
| }, |
| } |
| } |
| |
| func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| obj := make(map[string]interface{}) |
| maxTimeTravelHoursProp, err := expandBigQueryDatasetMaxTimeTravelHours(d.Get("max_time_travel_hours"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("max_time_travel_hours"); !tpgresource.IsEmptyValue(reflect.ValueOf(maxTimeTravelHoursProp)) && (ok || !reflect.DeepEqual(v, maxTimeTravelHoursProp)) { |
| obj["maxTimeTravelHours"] = maxTimeTravelHoursProp |
| } |
| accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("access"); !tpgresource.IsEmptyValue(reflect.ValueOf(accessProp)) && (ok || !reflect.DeepEqual(v, accessProp)) { |
| obj["access"] = accessProp |
| } |
| datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) |
| if err != nil { |
| return err |
| } else if !tpgresource.IsEmptyValue(reflect.ValueOf(datasetReferenceProp)) { |
| obj["datasetReference"] = datasetReferenceProp |
| } |
| defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultTableExpirationMsProp)) && (ok || !reflect.DeepEqual(v, defaultTableExpirationMsProp)) { |
| obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp |
| } |
| defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultPartitionExpirationMsProp)) && (ok || !reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { |
| obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp |
| } |
| descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { |
| obj["description"] = descriptionProp |
| } |
| externalDatasetReferenceProp, err := expandBigQueryDatasetExternalDatasetReference(d.Get("external_dataset_reference"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("external_dataset_reference"); !tpgresource.IsEmptyValue(reflect.ValueOf(externalDatasetReferenceProp)) && (ok || !reflect.DeepEqual(v, externalDatasetReferenceProp)) { |
| obj["externalDatasetReference"] = externalDatasetReferenceProp |
| } |
| friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("friendly_name"); ok || !reflect.DeepEqual(v, friendlyNameProp) { |
| obj["friendlyName"] = friendlyNameProp |
| } |
| locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { |
| obj["location"] = locationProp |
| } |
| defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_encryption_configuration"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultEncryptionConfigurationProp)) && (ok || !reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { |
| obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp |
| } |
| isCaseInsensitiveProp, err := expandBigQueryDatasetIsCaseInsensitive(d.Get("is_case_insensitive"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("is_case_insensitive"); !tpgresource.IsEmptyValue(reflect.ValueOf(isCaseInsensitiveProp)) && (ok || !reflect.DeepEqual(v, isCaseInsensitiveProp)) { |
| obj["isCaseInsensitive"] = isCaseInsensitiveProp |
| } |
| defaultCollationProp, err := expandBigQueryDatasetDefaultCollation(d.Get("default_collation"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_collation"); !tpgresource.IsEmptyValue(reflect.ValueOf(defaultCollationProp)) && (ok || !reflect.DeepEqual(v, defaultCollationProp)) { |
| obj["defaultCollation"] = defaultCollationProp |
| } |
| storageBillingModelProp, err := expandBigQueryDatasetStorageBillingModel(d.Get("storage_billing_model"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("storage_billing_model"); !tpgresource.IsEmptyValue(reflect.ValueOf(storageBillingModelProp)) && (ok || !reflect.DeepEqual(v, storageBillingModelProp)) { |
| obj["storageBillingModel"] = storageBillingModelProp |
| } |
| resourceTagsProp, err := expandBigQueryDatasetResourceTags(d.Get("resource_tags"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("resource_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourceTagsProp)) && (ok || !reflect.DeepEqual(v, resourceTagsProp)) { |
| obj["resourceTags"] = resourceTagsProp |
| } |
| labelsProp, err := expandBigQueryDatasetEffectiveLabels(d.Get("effective_labels"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { |
| obj["labels"] = labelsProp |
| } |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets") |
| if err != nil { |
| return err |
| } |
| |
| log.Printf("[DEBUG] Creating new Dataset: %#v", obj) |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Dataset: %s", err) |
| } |
| billingProject = project |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| headers := make(http.Header) |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "POST", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Body: obj, |
| Timeout: d.Timeout(schema.TimeoutCreate), |
| Headers: headers, |
| }) |
| if err != nil { |
| return fmt.Errorf("Error creating Dataset: %s", err) |
| } |
| |
| // Store the ID now |
| id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") |
| if err != nil { |
| return fmt.Errorf("Error constructing id: %s", err) |
| } |
| d.SetId(id) |
| |
| log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) |
| |
| return resourceBigQueryDatasetRead(d, meta) |
| } |
| |
| func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") |
| if err != nil { |
| return err |
| } |
| |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Dataset: %s", err) |
| } |
| billingProject = project |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| headers := make(http.Header) |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "GET", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Headers: headers, |
| }) |
| if err != nil { |
| return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQueryDataset %q", d.Id())) |
| } |
| |
| // Explicitly set virtual fields to default values if unset |
| if _, ok := d.GetOkExists("delete_contents_on_destroy"); !ok { |
| if err := d.Set("delete_contents_on_destroy", false); err != nil { |
| return fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) |
| } |
| } |
| if err := d.Set("project", project); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| |
| if err := d.Set("max_time_travel_hours", flattenBigQueryDatasetMaxTimeTravelHours(res["maxTimeTravelHours"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("access", flattenBigQueryDatasetAccess(res["access"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("creation_time", flattenBigQueryDatasetCreationTime(res["creationTime"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| // Terraform must set the top level schema field, but since this object contains collapsed properties |
| // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. |
| if flattenedProp := flattenBigQueryDatasetDatasetReference(res["datasetReference"], d, config); flattenedProp != nil { |
| if gerr, ok := flattenedProp.(*googleapi.Error); ok { |
| return fmt.Errorf("Error reading Dataset: %s", gerr) |
| } |
| casted := flattenedProp.([]interface{})[0] |
| if casted != nil { |
| for k, v := range casted.(map[string]interface{}) { |
| if err := d.Set(k, v); err != nil { |
| return fmt.Errorf("Error setting %s: %s", k, err) |
| } |
| } |
| } |
| } |
| if err := d.Set("default_table_expiration_ms", flattenBigQueryDatasetDefaultTableExpirationMs(res["defaultTableExpirationMs"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("default_partition_expiration_ms", flattenBigQueryDatasetDefaultPartitionExpirationMs(res["defaultPartitionExpirationMs"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("description", flattenBigQueryDatasetDescription(res["description"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("etag", flattenBigQueryDatasetEtag(res["etag"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("external_dataset_reference", flattenBigQueryDatasetExternalDatasetReference(res["externalDatasetReference"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("friendly_name", flattenBigQueryDatasetFriendlyName(res["friendlyName"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("labels", flattenBigQueryDatasetLabels(res["labels"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("last_modified_time", flattenBigQueryDatasetLastModifiedTime(res["lastModifiedTime"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("location", flattenBigQueryDatasetLocation(res["location"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("default_encryption_configuration", flattenBigQueryDatasetDefaultEncryptionConfiguration(res["defaultEncryptionConfiguration"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("is_case_insensitive", flattenBigQueryDatasetIsCaseInsensitive(res["isCaseInsensitive"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("default_collation", flattenBigQueryDatasetDefaultCollation(res["defaultCollation"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("storage_billing_model", flattenBigQueryDatasetStorageBillingModel(res["storageBillingModel"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("resource_tags", flattenBigQueryDatasetResourceTags(res["resourceTags"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("terraform_labels", flattenBigQueryDatasetTerraformLabels(res["labels"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("effective_labels", flattenBigQueryDatasetEffectiveLabels(res["labels"], d, config)); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { |
| return fmt.Errorf("Error reading Dataset: %s", err) |
| } |
| |
| return nil |
| } |
| |
| func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Dataset: %s", err) |
| } |
| billingProject = project |
| |
| obj := make(map[string]interface{}) |
| maxTimeTravelHoursProp, err := expandBigQueryDatasetMaxTimeTravelHours(d.Get("max_time_travel_hours"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("max_time_travel_hours"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maxTimeTravelHoursProp)) { |
| obj["maxTimeTravelHours"] = maxTimeTravelHoursProp |
| } |
| accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("access"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, accessProp)) { |
| obj["access"] = accessProp |
| } |
| datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) |
| if err != nil { |
| return err |
| } else if !tpgresource.IsEmptyValue(reflect.ValueOf(datasetReferenceProp)) { |
| obj["datasetReference"] = datasetReferenceProp |
| } |
| defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultTableExpirationMsProp)) { |
| obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp |
| } |
| defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { |
| obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp |
| } |
| descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { |
| obj["description"] = descriptionProp |
| } |
| externalDatasetReferenceProp, err := expandBigQueryDatasetExternalDatasetReference(d.Get("external_dataset_reference"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("external_dataset_reference"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, externalDatasetReferenceProp)) { |
| obj["externalDatasetReference"] = externalDatasetReferenceProp |
| } |
| friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("friendly_name"); ok || !reflect.DeepEqual(v, friendlyNameProp) { |
| obj["friendlyName"] = friendlyNameProp |
| } |
| locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, locationProp)) { |
| obj["location"] = locationProp |
| } |
| defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_encryption_configuration"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { |
| obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp |
| } |
| isCaseInsensitiveProp, err := expandBigQueryDatasetIsCaseInsensitive(d.Get("is_case_insensitive"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("is_case_insensitive"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isCaseInsensitiveProp)) { |
| obj["isCaseInsensitive"] = isCaseInsensitiveProp |
| } |
| defaultCollationProp, err := expandBigQueryDatasetDefaultCollation(d.Get("default_collation"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("default_collation"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, defaultCollationProp)) { |
| obj["defaultCollation"] = defaultCollationProp |
| } |
| storageBillingModelProp, err := expandBigQueryDatasetStorageBillingModel(d.Get("storage_billing_model"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("storage_billing_model"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, storageBillingModelProp)) { |
| obj["storageBillingModel"] = storageBillingModelProp |
| } |
| resourceTagsProp, err := expandBigQueryDatasetResourceTags(d.Get("resource_tags"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("resource_tags"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, resourceTagsProp)) { |
| obj["resourceTags"] = resourceTagsProp |
| } |
| labelsProp, err := expandBigQueryDatasetEffectiveLabels(d.Get("effective_labels"), d, config) |
| if err != nil { |
| return err |
| } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { |
| obj["labels"] = labelsProp |
| } |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") |
| if err != nil { |
| return err |
| } |
| |
| log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) |
| headers := make(http.Header) |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "PUT", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Body: obj, |
| Timeout: d.Timeout(schema.TimeoutUpdate), |
| Headers: headers, |
| }) |
| |
| if err != nil { |
| return fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) |
| } else { |
| log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) |
| } |
| |
| return resourceBigQueryDatasetRead(d, meta) |
| } |
| |
| func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { |
| config := meta.(*transport_tpg.Config) |
| userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) |
| if err != nil { |
| return err |
| } |
| |
| billingProject := "" |
| |
| project, err := tpgresource.GetProject(d, config) |
| if err != nil { |
| return fmt.Errorf("Error fetching project for Dataset: %s", err) |
| } |
| billingProject = project |
| |
| url, err := tpgresource.ReplaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}?deleteContents={{delete_contents_on_destroy}}") |
| if err != nil { |
| return err |
| } |
| |
| var obj map[string]interface{} |
| |
| // err == nil indicates that the billing_project value was found |
| if bp, err := tpgresource.GetBillingProject(d, config); err == nil { |
| billingProject = bp |
| } |
| |
| headers := make(http.Header) |
| |
| log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) |
| res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ |
| Config: config, |
| Method: "DELETE", |
| Project: billingProject, |
| RawURL: url, |
| UserAgent: userAgent, |
| Body: obj, |
| Timeout: d.Timeout(schema.TimeoutDelete), |
| Headers: headers, |
| }) |
| if err != nil { |
| return transport_tpg.HandleNotFoundError(err, d, "Dataset") |
| } |
| |
| log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) |
| return nil |
| } |
| |
| func resourceBigQueryDatasetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { |
| config := meta.(*transport_tpg.Config) |
| if err := tpgresource.ParseImportId([]string{ |
| "^projects/(?P<project>[^/]+)/datasets/(?P<dataset_id>[^/]+)$", |
| "^(?P<project>[^/]+)/(?P<dataset_id>[^/]+)$", |
| "^(?P<dataset_id>[^/]+)$", |
| }, d, config); err != nil { |
| return nil, err |
| } |
| |
| // Replace import id for the resource id |
| id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") |
| if err != nil { |
| return nil, fmt.Errorf("Error constructing id: %s", err) |
| } |
| d.SetId(id) |
| |
| // Explicitly set virtual fields to default values on import |
| if err := d.Set("delete_contents_on_destroy", false); err != nil { |
| return nil, fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) |
| } |
| |
| return []*schema.ResourceData{d}, nil |
| } |
| |
| func flattenBigQueryDatasetMaxTimeTravelHours(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccess(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return v |
| } |
| l := v.([]interface{}) |
| transformed := schema.NewSet(schema.HashResource(bigqueryDatasetAccessSchema()), []interface{}{}) |
| for _, raw := range l { |
| original := raw.(map[string]interface{}) |
| if len(original) < 1 { |
| // Do not include empty json objects coming back from the api |
| continue |
| } |
| transformed.Add(map[string]interface{}{ |
| "domain": flattenBigQueryDatasetAccessDomain(original["domain"], d, config), |
| "group_by_email": flattenBigQueryDatasetAccessGroupByEmail(original["groupByEmail"], d, config), |
| "role": flattenBigQueryDatasetAccessRole(original["role"], d, config), |
| "special_group": flattenBigQueryDatasetAccessSpecialGroup(original["specialGroup"], d, config), |
| "iam_member": flattenBigQueryDatasetAccessIamMember(original["iamMember"], d, config), |
| "user_by_email": flattenBigQueryDatasetAccessUserByEmail(original["userByEmail"], d, config), |
| "view": flattenBigQueryDatasetAccessView(original["view"], d, config), |
| "dataset": flattenBigQueryDatasetAccessDataset(original["dataset"], d, config), |
| "routine": flattenBigQueryDatasetAccessRoutine(original["routine"], d, config), |
| }) |
| } |
| return transformed |
| } |
| func flattenBigQueryDatasetAccessDomain(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessGroupByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessSpecialGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessIamMember(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessUserByEmail(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessView(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["dataset_id"] = |
| flattenBigQueryDatasetAccessViewDatasetId(original["datasetId"], d, config) |
| transformed["project_id"] = |
| flattenBigQueryDatasetAccessViewProjectId(original["projectId"], d, config) |
| transformed["table_id"] = |
| flattenBigQueryDatasetAccessViewTableId(original["tableId"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenBigQueryDatasetAccessViewDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessViewProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessViewTableId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["dataset"] = |
| flattenBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) |
| transformed["target_types"] = |
| flattenBigQueryDatasetAccessDatasetTargetTypes(original["targetTypes"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenBigQueryDatasetAccessDatasetDataset(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["dataset_id"] = |
| flattenBigQueryDatasetAccessDatasetDatasetDatasetId(original["datasetId"], d, config) |
| transformed["project_id"] = |
| flattenBigQueryDatasetAccessDatasetDatasetProjectId(original["projectId"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessRoutine(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["dataset_id"] = |
| flattenBigQueryDatasetAccessRoutineDatasetId(original["datasetId"], d, config) |
| transformed["project_id"] = |
| flattenBigQueryDatasetAccessRoutineProjectId(original["projectId"], d, config) |
| transformed["routine_id"] = |
| flattenBigQueryDatasetAccessRoutineRoutineId(original["routineId"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenBigQueryDatasetAccessRoutineDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessRoutineProjectId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetAccessRoutineRoutineId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetCreationTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| // Handles the string fixed64 format |
| if strVal, ok := v.(string); ok { |
| if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { |
| return intVal |
| } |
| } |
| |
| // number values are represented as float64 |
| if floatVal, ok := v.(float64); ok { |
| intVal := int(floatVal) |
| return intVal |
| } |
| |
| return v // let terraform core handle it otherwise |
| } |
| |
| func flattenBigQueryDatasetDatasetReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["dataset_id"] = |
| flattenBigQueryDatasetDatasetReferenceDatasetId(original["datasetId"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetDefaultTableExpirationMs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| // Handles the string fixed64 format |
| if strVal, ok := v.(string); ok { |
| if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { |
| return intVal |
| } |
| } |
| |
| // number values are represented as float64 |
| if floatVal, ok := v.(float64); ok { |
| intVal := int(floatVal) |
| return intVal |
| } |
| |
| return v // let terraform core handle it otherwise |
| } |
| |
| func flattenBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| // Handles the string fixed64 format |
| if strVal, ok := v.(string); ok { |
| if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { |
| return intVal |
| } |
| } |
| |
| // number values are represented as float64 |
| if floatVal, ok := v.(float64); ok { |
| intVal := int(floatVal) |
| return intVal |
| } |
| |
| return v // let terraform core handle it otherwise |
| } |
| |
| func flattenBigQueryDatasetDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetEtag(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetExternalDatasetReference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["external_source"] = |
| flattenBigQueryDatasetExternalDatasetReferenceExternalSource(original["externalSource"], d, config) |
| transformed["connection"] = |
| flattenBigQueryDatasetExternalDatasetReferenceConnection(original["connection"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenBigQueryDatasetExternalDatasetReferenceExternalSource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetExternalDatasetReferenceConnection(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetFriendlyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return v |
| } |
| |
| transformed := make(map[string]interface{}) |
| if l, ok := d.GetOkExists("labels"); ok { |
| for k := range l.(map[string]interface{}) { |
| transformed[k] = v.(map[string]interface{})[k] |
| } |
| } |
| |
| return transformed |
| } |
| |
| func flattenBigQueryDatasetLastModifiedTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| // Handles the string fixed64 format |
| if strVal, ok := v.(string); ok { |
| if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { |
| return intVal |
| } |
| } |
| |
| // number values are represented as float64 |
| if floatVal, ok := v.(float64); ok { |
| intVal := int(floatVal) |
| return intVal |
| } |
| |
| return v // let terraform core handle it otherwise |
| } |
| |
| // Older Datasets in BigQuery have no Location set in the API response. This may be an issue when importing |
| // datasets created before BigQuery was available in multiple zones. We can safely assume that these datasets |
| // are in the US, as this was the default at the time. |
| func flattenBigQueryDatasetLocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return "US" |
| } |
| return v |
| } |
| |
| func flattenBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return nil |
| } |
| original := v.(map[string]interface{}) |
| if len(original) == 0 { |
| return nil |
| } |
| transformed := make(map[string]interface{}) |
| transformed["kms_key_name"] = |
| flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kmsKeyName"], d, config) |
| return []interface{}{transformed} |
| } |
| func flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetIsCaseInsensitive(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetDefaultCollation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetStorageBillingModel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetResourceTags(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func flattenBigQueryDatasetTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| if v == nil { |
| return v |
| } |
| |
| transformed := make(map[string]interface{}) |
| if l, ok := d.GetOkExists("terraform_labels"); ok { |
| for k := range l.(map[string]interface{}) { |
| transformed[k] = v.(map[string]interface{})[k] |
| } |
| } |
| |
| return transformed |
| } |
| |
| func flattenBigQueryDatasetEffectiveLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { |
| return v |
| } |
| |
| func expandBigQueryDatasetMaxTimeTravelHours(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccess(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| v = v.(*schema.Set).List() |
| l := v.([]interface{}) |
| req := make([]interface{}, 0, len(l)) |
| for _, raw := range l { |
| if raw == nil { |
| continue |
| } |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedDomain, err := expandBigQueryDatasetAccessDomain(original["domain"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedDomain); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["domain"] = transformedDomain |
| } |
| |
| transformedGroupByEmail, err := expandBigQueryDatasetAccessGroupByEmail(original["group_by_email"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedGroupByEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["groupByEmail"] = transformedGroupByEmail |
| } |
| |
| transformedRole, err := expandBigQueryDatasetAccessRole(original["role"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedRole); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["role"] = transformedRole |
| } |
| |
| transformedSpecialGroup, err := expandBigQueryDatasetAccessSpecialGroup(original["special_group"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedSpecialGroup); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["specialGroup"] = transformedSpecialGroup |
| } |
| |
| transformedIamMember, err := expandBigQueryDatasetAccessIamMember(original["iam_member"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedIamMember); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["iamMember"] = transformedIamMember |
| } |
| |
| transformedUserByEmail, err := expandBigQueryDatasetAccessUserByEmail(original["user_by_email"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedUserByEmail); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["userByEmail"] = transformedUserByEmail |
| } |
| |
| transformedView, err := expandBigQueryDatasetAccessView(original["view"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedView); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["view"] = transformedView |
| } |
| |
| transformedDataset, err := expandBigQueryDatasetAccessDataset(original["dataset"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["dataset"] = transformedDataset |
| } |
| |
| transformedRoutine, err := expandBigQueryDatasetAccessRoutine(original["routine"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedRoutine); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["routine"] = transformedRoutine |
| } |
| |
| req = append(req, transformed) |
| } |
| return req, nil |
| } |
| |
| func expandBigQueryDatasetAccessDomain(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessGroupByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessSpecialGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessIamMember(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessUserByEmail(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessView(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedDatasetId, err := expandBigQueryDatasetAccessViewDatasetId(original["dataset_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["datasetId"] = transformedDatasetId |
| } |
| |
| transformedProjectId, err := expandBigQueryDatasetAccessViewProjectId(original["project_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["projectId"] = transformedProjectId |
| } |
| |
| transformedTableId, err := expandBigQueryDatasetAccessViewTableId(original["table_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedTableId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["tableId"] = transformedTableId |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandBigQueryDatasetAccessViewDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessViewProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessViewTableId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedDataset, err := expandBigQueryDatasetAccessDatasetDataset(original["dataset"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedDataset); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["dataset"] = transformedDataset |
| } |
| |
| transformedTargetTypes, err := expandBigQueryDatasetAccessDatasetTargetTypes(original["target_types"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedTargetTypes); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["targetTypes"] = transformedTargetTypes |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandBigQueryDatasetAccessDatasetDataset(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedDatasetId, err := expandBigQueryDatasetAccessDatasetDatasetDatasetId(original["dataset_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["datasetId"] = transformedDatasetId |
| } |
| |
| transformedProjectId, err := expandBigQueryDatasetAccessDatasetDatasetProjectId(original["project_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["projectId"] = transformedProjectId |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandBigQueryDatasetAccessDatasetDatasetDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessDatasetDatasetProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessDatasetTargetTypes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessRoutine(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedDatasetId, err := expandBigQueryDatasetAccessRoutineDatasetId(original["dataset_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["datasetId"] = transformedDatasetId |
| } |
| |
| transformedProjectId, err := expandBigQueryDatasetAccessRoutineProjectId(original["project_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedProjectId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["projectId"] = transformedProjectId |
| } |
| |
| transformedRoutineId, err := expandBigQueryDatasetAccessRoutineRoutineId(original["routine_id"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedRoutineId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["routineId"] = transformedRoutineId |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandBigQueryDatasetAccessRoutineDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessRoutineProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetAccessRoutineRoutineId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetDatasetReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| transformed := make(map[string]interface{}) |
| transformedDatasetId, err := expandBigQueryDatasetDatasetReferenceDatasetId(d.Get("dataset_id"), d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["datasetId"] = transformedDatasetId |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetDefaultTableExpirationMs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetExternalDatasetReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedExternalSource, err := expandBigQueryDatasetExternalDatasetReferenceExternalSource(original["external_source"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedExternalSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["externalSource"] = transformedExternalSource |
| } |
| |
| transformedConnection, err := expandBigQueryDatasetExternalDatasetReferenceConnection(original["connection"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedConnection); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["connection"] = transformedConnection |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandBigQueryDatasetExternalDatasetReferenceExternalSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetExternalDatasetReferenceConnection(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetFriendlyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| l := v.([]interface{}) |
| if len(l) == 0 || l[0] == nil { |
| return nil, nil |
| } |
| raw := l[0] |
| original := raw.(map[string]interface{}) |
| transformed := make(map[string]interface{}) |
| |
| transformedKmsKeyName, err := expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) |
| if err != nil { |
| return nil, err |
| } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { |
| transformed["kmsKeyName"] = transformedKmsKeyName |
| } |
| |
| return transformed, nil |
| } |
| |
| func expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetIsCaseInsensitive(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetDefaultCollation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetStorageBillingModel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { |
| return v, nil |
| } |
| |
| func expandBigQueryDatasetResourceTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { |
| if v == nil { |
| return map[string]string{}, nil |
| } |
| m := make(map[string]string) |
| for k, val := range v.(map[string]interface{}) { |
| m[k] = val.(string) |
| } |
| return m, nil |
| } |
| |
| func expandBigQueryDatasetEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { |
| if v == nil { |
| return map[string]string{}, nil |
| } |
| m := make(map[string]string) |
| for k, val := range v.(map[string]interface{}) { |
| m[k] = val.(string) |
| } |
| return m, nil |
| } |