blob: d7e975b729ca4e2470f1114fed31764d86ba31b0 [file] [log] [blame] [edit]
---
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** Type: MMv1 ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file in
# .github/CONTRIBUTING.md.
#
# ----------------------------------------------------------------------------
subcategory: "Datastream"
description: |-
A resource representing streaming data from a source to a destination.
---
# google_datastream_stream
A resource representing streaming data from a source to a destination.
To get more information about Stream, see:
* [API documentation](https://cloud.google.com/datastream/docs/reference/rest/v1/projects.locations.streams)
* How-to Guides
* [Official Documentation](https://cloud.google.com/datastream/docs/create-a-stream)
<div class = "oics-button" style="float: right; margin: 0 0 -15px">
<a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_image=gcr.io%2Fcloudshell-images%2Fcloudshell%3Alatest&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md&cloudshell_working_dir=datastream_stream_full&open_in_editor=main.tf" target="_blank">
<img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;">
</a>
</div>
## Example Usage - Datastream Stream Full
```hcl
data "google_project" "project" {
}
resource "google_sql_database_instance" "instance" {
name = "my-instance"
database_version = "MYSQL_8_0"
region = "us-central1"
settings {
tier = "db-f1-micro"
backup_configuration {
enabled = true
binary_log_enabled = true
}
ip_configuration {
// Datastream IPs will vary by region.
authorized_networks {
value = "34.71.242.81"
}
authorized_networks {
value = "34.72.28.29"
}
authorized_networks {
value = "34.67.6.157"
}
authorized_networks {
value = "34.67.234.134"
}
authorized_networks {
value = "34.72.239.218"
}
}
}
deletion_protection = true
}
resource "google_sql_database" "db" {
instance = google_sql_database_instance.instance.name
name = "db"
}
resource "random_password" "pwd" {
length = 16
special = false
}
resource "google_sql_user" "user" {
name = "user"
instance = google_sql_database_instance.instance.name
host = "%"
password = random_password.pwd.result
}
resource "google_datastream_connection_profile" "source_connection_profile" {
display_name = "Source connection profile"
location = "us-central1"
connection_profile_id = "source-profile"
mysql_profile {
hostname = google_sql_database_instance.instance.public_ip_address
username = google_sql_user.user.name
password = google_sql_user.user.password
}
}
resource "google_storage_bucket" "bucket" {
name = "my-bucket"
location = "US"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_iam_member" "viewer" {
bucket = google_storage_bucket.bucket.name
role = "roles/storage.objectViewer"
member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-datastream.iam.gserviceaccount.com"
}
resource "google_storage_bucket_iam_member" "creator" {
bucket = google_storage_bucket.bucket.name
role = "roles/storage.objectCreator"
member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-datastream.iam.gserviceaccount.com"
}
resource "google_storage_bucket_iam_member" "reader" {
bucket = google_storage_bucket.bucket.name
role = "roles/storage.legacyBucketReader"
member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-datastream.iam.gserviceaccount.com"
}
resource "google_kms_crypto_key_iam_member" "key_user" {
crypto_key_id = "kms-name"
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-datastream.iam.gserviceaccount.com"
}
resource "google_datastream_connection_profile" "destination_connection_profile" {
display_name = "Connection profile"
location = "us-central1"
connection_profile_id = "destination-profile"
gcs_profile {
bucket = google_storage_bucket.bucket.name
root_path = "/path"
}
}
resource "google_datastream_stream" "default" {
depends_on = [
google_kms_crypto_key_iam_member.key_user
]
stream_id = "my-stream"
desired_state = "NOT_STARTED"
location = "us-central1"
display_name = "my stream"
labels = {
key = "value"
}
source_config {
source_connection_profile = google_datastream_connection_profile.source_connection_profile.id
mysql_source_config {
include_objects {
mysql_databases {
database = "my-database"
mysql_tables {
table = "includedTable"
mysql_columns {
column = "includedColumn"
data_type = "VARCHAR"
collation = "utf8mb4"
primary_key = false
nullable = false
ordinal_position = 0
}
}
mysql_tables {
table = "includedTable_2"
}
}
}
exclude_objects {
mysql_databases {
database = "my-database"
mysql_tables {
table = "excludedTable"
mysql_columns {
column = "excludedColumn"
data_type = "VARCHAR"
collation = "utf8mb4"
primary_key = false
nullable = false
ordinal_position = 0
}
}
}
}
max_concurrent_cdc_tasks = 5
}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination_connection_profile.id
gcs_destination_config {
path = "mydata"
file_rotation_mb = 200
file_rotation_interval = "60s"
json_file_format {
schema_file_format = "NO_SCHEMA_FILE"
compression = "GZIP"
}
}
}
backfill_all {
mysql_excluded_objects {
mysql_databases {
database = "my-database"
mysql_tables {
table = "excludedTable"
mysql_columns {
column = "excludedColumn"
data_type = "VARCHAR"
collation = "utf8mb4"
primary_key = false
nullable = false
ordinal_position = 0
}
}
}
}
}
customer_managed_encryption_key = "kms-name"
}
```
## Example Usage - Datastream Stream Postgresql
```hcl
resource "google_datastream_connection_profile" "source" {
display_name = "Postgresql Source"
location = "us-central1"
connection_profile_id = "source-profile"
postgresql_profile {
hostname = "hostname"
port = 3306
username = "user"
password = "pass"
database = "postgres"
}
}
resource "google_datastream_connection_profile" "destination" {
display_name = "BigQuery Destination"
location = "us-central1"
connection_profile_id = "destination-profile"
bigquery_profile {}
}
resource "google_datastream_stream" "default" {
display_name = "Postgres to BigQuery"
location = "us-central1"
stream_id = "my-stream"
desired_state = "RUNNING"
source_config {
source_connection_profile = google_datastream_connection_profile.source.id
postgresql_source_config {
max_concurrent_backfill_tasks = 12
publication = "publication"
replication_slot = "replication_slot"
include_objects {
postgresql_schemas {
schema = "schema"
postgresql_tables {
table = "table"
postgresql_columns {
column = "column"
}
}
}
}
exclude_objects {
postgresql_schemas {
schema = "schema"
postgresql_tables {
table = "table"
postgresql_columns {
column = "column"
}
}
}
}
}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination.id
bigquery_destination_config {
data_freshness = "900s"
source_hierarchy_datasets {
dataset_template {
location = "us-central1"
}
}
}
}
backfill_all {
postgresql_excluded_objects {
postgresql_schemas {
schema = "schema"
postgresql_tables {
table = "table"
postgresql_columns {
column = "column"
}
}
}
}
}
}
```
## Example Usage - Datastream Stream Oracle
```hcl
resource "google_datastream_connection_profile" "source" {
display_name = "Oracle Source"
location = "us-central1"
connection_profile_id = "source-profile"
oracle_profile {
hostname = "hostname"
port = 1521
username = "user"
password = "pass"
database_service = "ORCL"
}
}
resource "google_datastream_connection_profile" "destination" {
display_name = "BigQuery Destination"
location = "us-central1"
connection_profile_id = "destination-profile"
bigquery_profile {}
}
resource "google_datastream_stream" "stream5" {
display_name = "Oracle to BigQuery"
location = "us-central1"
stream_id = "my-stream"
desired_state = "RUNNING"
source_config {
source_connection_profile = google_datastream_connection_profile.source.id
oracle_source_config {
max_concurrent_cdc_tasks = 8
max_concurrent_backfill_tasks = 12
include_objects {
oracle_schemas {
schema = "schema"
oracle_tables {
table = "table"
oracle_columns {
column = "column"
}
}
}
}
exclude_objects {
oracle_schemas {
schema = "schema"
oracle_tables {
table = "table"
oracle_columns {
column = "column"
}
}
}
}
drop_large_objects {}
}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination.id
bigquery_destination_config {
data_freshness = "900s"
source_hierarchy_datasets {
dataset_template {
location = "us-central1"
}
}
}
}
backfill_all {
oracle_excluded_objects {
oracle_schemas {
schema = "schema"
oracle_tables {
table = "table"
oracle_columns {
column = "column"
}
}
}
}
}
}
```
## Example Usage - Datastream Stream Sql Server
```hcl
resource "google_sql_database_instance" "instance" {
name = "sql-server"
database_version = "SQLSERVER_2019_STANDARD"
region = "us-central1"
root_password = "root-password"
deletion_protection = "true"
settings {
tier = "db-custom-2-4096"
ip_configuration {
// Datastream IPs will vary by region.
// https://cloud.google.com/datastream/docs/ip-allowlists-and-regions
authorized_networks {
value = "34.71.242.81"
}
authorized_networks {
value = "34.72.28.29"
}
authorized_networks {
value = "34.67.6.157"
}
authorized_networks {
value = "34.67.234.134"
}
authorized_networks {
value = "34.72.239.218"
}
}
}
}
resource "google_sql_database" "db" {
name = "db"
instance = google_sql_database_instance.instance.name
depends_on = [google_sql_user.user]
}
resource "google_sql_user" "user" {
name = "user"
instance = google_sql_database_instance.instance.name
password = "password"
}
resource "google_datastream_connection_profile" "source" {
display_name = "SQL Server Source"
location = "us-central1"
connection_profile_id = "source-profile"
sql_server_profile {
hostname = google_sql_database_instance.instance.public_ip_address
port = 1433
username = google_sql_user.user.name
password = google_sql_user.user.password
database = google_sql_database.db.name
}
}
resource "google_datastream_connection_profile" "destination" {
display_name = "BigQuery Destination"
location = "us-central1"
connection_profile_id = "destination-profile"
bigquery_profile {}
}
resource "google_datastream_stream" "default" {
display_name = "SQL Server to BigQuery"
location = "us-central1"
stream_id = "stream"
source_config {
source_connection_profile = google_datastream_connection_profile.source.id
sql_server_source_config {
include_objects {
schemas {
schema = "schema"
tables {
table = "table"
}
}
}
transaction_logs {}
}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination.id
bigquery_destination_config {
data_freshness = "900s"
source_hierarchy_datasets {
dataset_template {
location = "us-central1"
}
}
}
}
backfill_none {}
}
```
## Example Usage - Datastream Stream Sql Server Change Tables
```hcl
resource "google_sql_database_instance" "instance" {
name = "sql-server"
database_version = "SQLSERVER_2019_STANDARD"
region = "us-central1"
root_password = "root-password"
deletion_protection = "true"
settings {
tier = "db-custom-2-4096"
ip_configuration {
// Datastream IPs will vary by region.
// https://cloud.google.com/datastream/docs/ip-allowlists-and-regions
authorized_networks {
value = "34.71.242.81"
}
authorized_networks {
value = "34.72.28.29"
}
authorized_networks {
value = "34.67.6.157"
}
authorized_networks {
value = "34.67.234.134"
}
authorized_networks {
value = "34.72.239.218"
}
}
}
}
resource "google_sql_database" "db" {
name = "db"
instance = google_sql_database_instance.instance.name
depends_on = [google_sql_user.user]
}
resource "google_sql_user" "user" {
name = "user"
instance = google_sql_database_instance.instance.name
password = "password"
}
resource "google_datastream_connection_profile" "source" {
display_name = "SQL Server Source"
location = "us-central1"
connection_profile_id = "source-profile"
sql_server_profile {
hostname = google_sql_database_instance.instance.public_ip_address
port = 1433
username = google_sql_user.user.name
password = google_sql_user.user.password
database = google_sql_database.db.name
}
}
resource "google_datastream_connection_profile" "destination" {
display_name = "BigQuery Destination"
location = "us-central1"
connection_profile_id = "destination-profile"
bigquery_profile {}
}
resource "google_datastream_stream" "default" {
display_name = "SQL Server to BigQuery"
location = "us-central1"
stream_id = "stream"
source_config {
source_connection_profile = google_datastream_connection_profile.source.id
sql_server_source_config {
include_objects {
schemas {
schema = "schema"
tables {
table = "table"
}
}
}
change_tables {}
}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination.id
bigquery_destination_config {
data_freshness = "900s"
source_hierarchy_datasets {
dataset_template {
location = "us-central1"
}
}
}
}
backfill_none {}
}
```
<div class = "oics-button" style="float: right; margin: 0 0 -15px">
<a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_image=gcr.io%2Fcloudshell-images%2Fcloudshell%3Alatest&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md&cloudshell_working_dir=datastream_stream_postgresql_bigquery_dataset_id&open_in_editor=main.tf" target="_blank">
<img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;">
</a>
</div>
## Example Usage - Datastream Stream Postgresql Bigquery Dataset Id
```hcl
resource "google_bigquery_dataset" "postgres" {
dataset_id = "postgres"
friendly_name = "postgres"
description = "Database of postgres"
location = "us-central1"
}
resource "google_datastream_stream" "default" {
display_name = "postgres to bigQuery"
location = "us-central1"
stream_id = "postgres-bigquery"
source_config {
source_connection_profile = google_datastream_connection_profile.source_connection_profile.id
mysql_source_config {}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination_connection_profile2.id
bigquery_destination_config {
data_freshness = "900s"
single_target_dataset {
dataset_id = google_bigquery_dataset.postgres.id
}
}
}
backfill_all {
}
}
resource "google_datastream_connection_profile" "destination_connection_profile2" {
display_name = "Connection profile"
location = "us-central1"
connection_profile_id = "dest-profile"
bigquery_profile {}
}
resource "google_sql_database_instance" "instance" {
name = "instance-name"
database_version = "MYSQL_8_0"
region = "us-central1"
settings {
tier = "db-f1-micro"
backup_configuration {
enabled = true
binary_log_enabled = true
}
ip_configuration {
// Datastream IPs will vary by region.
authorized_networks {
value = "34.71.242.81"
}
authorized_networks {
value = "34.72.28.29"
}
authorized_networks {
value = "34.67.6.157"
}
authorized_networks {
value = "34.67.234.134"
}
authorized_networks {
value = "34.72.239.218"
}
}
}
deletion_protection = false
}
resource "google_sql_database" "db" {
instance = google_sql_database_instance.instance.name
name = "db"
}
resource "random_password" "pwd" {
length = 16
special = false
}
resource "google_sql_user" "user" {
name = "my-user"
instance = google_sql_database_instance.instance.name
host = "%"
password = random_password.pwd.result
}
resource "google_datastream_connection_profile" "source_connection_profile" {
display_name = "Source connection profile"
location = "us-central1"
connection_profile_id = "source-profile"
mysql_profile {
hostname = google_sql_database_instance.instance.public_ip_address
username = google_sql_user.user.name
password = google_sql_user.user.password
}
}
```
<div class = "oics-button" style="float: right; margin: 0 0 -15px">
<a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_image=gcr.io%2Fcloudshell-images%2Fcloudshell%3Alatest&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md&cloudshell_working_dir=datastream_stream_bigquery&open_in_editor=main.tf" target="_blank">
<img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;">
</a>
</div>
## Example Usage - Datastream Stream Bigquery
```hcl
data "google_project" "project" {
}
resource "google_sql_database_instance" "instance" {
name = "my-instance"
database_version = "MYSQL_8_0"
region = "us-central1"
settings {
tier = "db-f1-micro"
backup_configuration {
enabled = true
binary_log_enabled = true
}
ip_configuration {
// Datastream IPs will vary by region.
authorized_networks {
value = "34.71.242.81"
}
authorized_networks {
value = "34.72.28.29"
}
authorized_networks {
value = "34.67.6.157"
}
authorized_networks {
value = "34.67.234.134"
}
authorized_networks {
value = "34.72.239.218"
}
}
}
deletion_protection = true
}
resource "google_sql_database" "db" {
instance = google_sql_database_instance.instance.name
name = "db"
}
resource "random_password" "pwd" {
length = 16
special = false
}
resource "google_sql_user" "user" {
name = "user"
instance = google_sql_database_instance.instance.name
host = "%"
password = random_password.pwd.result
}
resource "google_datastream_connection_profile" "source_connection_profile" {
display_name = "Source connection profile"
location = "us-central1"
connection_profile_id = "source-profile"
mysql_profile {
hostname = google_sql_database_instance.instance.public_ip_address
username = google_sql_user.user.name
password = google_sql_user.user.password
}
}
data "google_bigquery_default_service_account" "bq_sa" {
}
resource "google_kms_crypto_key_iam_member" "bigquery_key_user" {
crypto_key_id = "bigquery-kms-name"
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
member = "serviceAccount:${data.google_bigquery_default_service_account.bq_sa.email}"
}
resource "google_datastream_connection_profile" "destination_connection_profile" {
display_name = "Connection profile"
location = "us-central1"
connection_profile_id = "destination-profile"
bigquery_profile {}
}
resource "google_datastream_stream" "default" {
depends_on = [
google_kms_crypto_key_iam_member.bigquery_key_user
]
stream_id = "my-stream"
location = "us-central1"
display_name = "my stream"
source_config {
source_connection_profile = google_datastream_connection_profile.source_connection_profile.id
mysql_source_config {}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination_connection_profile.id
bigquery_destination_config {
source_hierarchy_datasets {
dataset_template {
location = "us-central1"
kms_key_name = "bigquery-kms-name"
}
}
}
}
backfill_none {
}
}
```
<div class = "oics-button" style="float: right; margin: 0 0 -15px">
<a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_image=gcr.io%2Fcloudshell-images%2Fcloudshell%3Alatest&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md&cloudshell_working_dir=datastream_stream_bigquery_append_only&open_in_editor=main.tf" target="_blank">
<img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;">
</a>
</div>
## Example Usage - Datastream Stream Bigquery Append Only
```hcl
data "google_project" "project" {
}
resource "google_sql_database_instance" "instance" {
name = "my-instance"
database_version = "MYSQL_8_0"
region = "us-central1"
settings {
tier = "db-f1-micro"
backup_configuration {
enabled = true
binary_log_enabled = true
}
ip_configuration {
// Datastream IPs will vary by region.
authorized_networks {
value = "34.71.242.81"
}
authorized_networks {
value = "34.72.28.29"
}
authorized_networks {
value = "34.67.6.157"
}
authorized_networks {
value = "34.67.234.134"
}
authorized_networks {
value = "34.72.239.218"
}
}
}
deletion_protection = true
}
resource "google_sql_database" "db" {
instance = google_sql_database_instance.instance.name
name = "db"
}
resource "random_password" "pwd" {
length = 16
special = false
}
resource "google_sql_user" "user" {
name = "user"
instance = google_sql_database_instance.instance.name
host = "%"
password = random_password.pwd.result
}
resource "google_datastream_connection_profile" "source_connection_profile" {
display_name = "Source connection profile"
location = "us-central1"
connection_profile_id = "source-profile"
mysql_profile {
hostname = google_sql_database_instance.instance.public_ip_address
username = google_sql_user.user.name
password = google_sql_user.user.password
}
}
resource "google_datastream_connection_profile" "destination_connection_profile" {
display_name = "Connection profile"
location = "us-central1"
connection_profile_id = "destination-profile"
bigquery_profile {}
}
resource "google_datastream_stream" "default" {
stream_id = "my-stream"
location = "us-central1"
display_name = "my stream"
source_config {
source_connection_profile = google_datastream_connection_profile.source_connection_profile.id
mysql_source_config {}
}
destination_config {
destination_connection_profile = google_datastream_connection_profile.destination_connection_profile.id
bigquery_destination_config {
source_hierarchy_datasets {
dataset_template {
location = "us-central1"
}
}
append_only {}
}
}
backfill_none {
}
}
```
## Argument Reference
The following arguments are supported:
* `display_name` -
(Required)
Display name.
* `source_config` -
(Required)
Source connection profile configuration.
Structure is [documented below](#nested_source_config).
* `destination_config` -
(Required)
Destination connection profile configuration.
Structure is [documented below](#nested_destination_config).
* `stream_id` -
(Required)
The stream identifier.
* `location` -
(Required)
The name of the location this stream is located in.
<a name="nested_source_config"></a>The `source_config` block supports:
* `source_connection_profile` -
(Required)
Source connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name}
* `mysql_source_config` -
(Optional)
MySQL data source configuration.
Structure is [documented below](#nested_mysql_source_config).
* `oracle_source_config` -
(Optional)
MySQL data source configuration.
Structure is [documented below](#nested_oracle_source_config).
* `postgresql_source_config` -
(Optional)
PostgreSQL data source configuration.
Structure is [documented below](#nested_postgresql_source_config).
* `sql_server_source_config` -
(Optional)
SQL Server data source configuration.
Structure is [documented below](#nested_sql_server_source_config).
<a name="nested_mysql_source_config"></a>The `mysql_source_config` block supports:
* `include_objects` -
(Optional)
MySQL objects to retrieve from the source.
Structure is [documented below](#nested_include_objects).
* `exclude_objects` -
(Optional)
MySQL objects to exclude from the stream.
Structure is [documented below](#nested_exclude_objects).
* `max_concurrent_cdc_tasks` -
(Optional)
Maximum number of concurrent CDC tasks. The number should be non negative.
If not set (or set to 0), the system's default value will be used.
* `max_concurrent_backfill_tasks` -
(Optional)
Maximum number of concurrent backfill tasks. The number should be non negative.
If not set (or set to 0), the system's default value will be used.
<a name="nested_include_objects"></a>The `include_objects` block supports:
* `mysql_databases` -
(Required)
MySQL databases on the server
Structure is [documented below](#nested_mysql_databases).
<a name="nested_mysql_databases"></a>The `mysql_databases` block supports:
* `database` -
(Required)
Database name.
* `mysql_tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_mysql_tables).
<a name="nested_mysql_tables"></a>The `mysql_tables` block supports:
* `table` -
(Required)
Table name.
* `mysql_columns` -
(Optional)
MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_mysql_columns).
<a name="nested_mysql_columns"></a>The `mysql_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The MySQL data type. Full data types list can be found here:
https://dev.mysql.com/doc/refman/8.0/en/data-types.html
* `length` -
(Output)
Column length.
* `collation` -
(Optional)
Column collation.
* `primary_key` -
(Optional)
Whether or not the column represents a primary key.
* `nullable` -
(Optional)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Optional)
The ordinal position of the column in the table.
<a name="nested_exclude_objects"></a>The `exclude_objects` block supports:
* `mysql_databases` -
(Required)
MySQL databases on the server
Structure is [documented below](#nested_mysql_databases).
<a name="nested_mysql_databases"></a>The `mysql_databases` block supports:
* `database` -
(Required)
Database name.
* `mysql_tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_mysql_tables).
<a name="nested_mysql_tables"></a>The `mysql_tables` block supports:
* `table` -
(Required)
Table name.
* `mysql_columns` -
(Optional)
MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_mysql_columns).
<a name="nested_mysql_columns"></a>The `mysql_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The MySQL data type. Full data types list can be found here:
https://dev.mysql.com/doc/refman/8.0/en/data-types.html
* `length` -
(Output)
Column length.
* `collation` -
(Optional)
Column collation.
* `primary_key` -
(Optional)
Whether or not the column represents a primary key.
* `nullable` -
(Optional)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Optional)
The ordinal position of the column in the table.
<a name="nested_oracle_source_config"></a>The `oracle_source_config` block supports:
* `include_objects` -
(Optional)
Oracle objects to retrieve from the source.
Structure is [documented below](#nested_include_objects).
* `exclude_objects` -
(Optional)
Oracle objects to exclude from the stream.
Structure is [documented below](#nested_exclude_objects).
* `max_concurrent_cdc_tasks` -
(Optional)
Maximum number of concurrent CDC tasks. The number should be non negative.
If not set (or set to 0), the system's default value will be used.
* `max_concurrent_backfill_tasks` -
(Optional)
Maximum number of concurrent backfill tasks. The number should be non negative.
If not set (or set to 0), the system's default value will be used.
* `drop_large_objects` -
(Optional)
Configuration to drop large object values.
* `stream_large_objects` -
(Optional)
Configuration to drop large object values.
<a name="nested_include_objects"></a>The `include_objects` block supports:
* `oracle_schemas` -
(Required)
Oracle schemas/databases in the database server
Structure is [documented below](#nested_oracle_schemas).
<a name="nested_oracle_schemas"></a>The `oracle_schemas` block supports:
* `schema` -
(Required)
Schema name.
* `oracle_tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_oracle_tables).
<a name="nested_oracle_tables"></a>The `oracle_tables` block supports:
* `table` -
(Required)
Table name.
* `oracle_columns` -
(Optional)
Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_oracle_columns).
<a name="nested_oracle_columns"></a>The `oracle_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The Oracle data type. Full data types list can be found here:
https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `encoding` -
(Output)
Column encoding.
* `primary_key` -
(Output)
Whether or not the column represents a primary key.
* `nullable` -
(Output)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Output)
The ordinal position of the column in the table.
<a name="nested_exclude_objects"></a>The `exclude_objects` block supports:
* `oracle_schemas` -
(Required)
Oracle schemas/databases in the database server
Structure is [documented below](#nested_oracle_schemas).
<a name="nested_oracle_schemas"></a>The `oracle_schemas` block supports:
* `schema` -
(Required)
Schema name.
* `oracle_tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_oracle_tables).
<a name="nested_oracle_tables"></a>The `oracle_tables` block supports:
* `table` -
(Required)
Table name.
* `oracle_columns` -
(Optional)
Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_oracle_columns).
<a name="nested_oracle_columns"></a>The `oracle_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The Oracle data type. Full data types list can be found here:
https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `encoding` -
(Output)
Column encoding.
* `primary_key` -
(Output)
Whether or not the column represents a primary key.
* `nullable` -
(Output)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Output)
The ordinal position of the column in the table.
<a name="nested_postgresql_source_config"></a>The `postgresql_source_config` block supports:
* `include_objects` -
(Optional)
PostgreSQL objects to retrieve from the source.
Structure is [documented below](#nested_include_objects).
* `exclude_objects` -
(Optional)
PostgreSQL objects to exclude from the stream.
Structure is [documented below](#nested_exclude_objects).
* `replication_slot` -
(Required)
The name of the logical replication slot that's configured with
the pgoutput plugin.
* `publication` -
(Required)
The name of the publication that includes the set of all tables
that are defined in the stream's include_objects.
* `max_concurrent_backfill_tasks` -
(Optional)
Maximum number of concurrent backfill tasks. The number should be non
negative. If not set (or set to 0), the system's default value will be used.
<a name="nested_include_objects"></a>The `include_objects` block supports:
* `postgresql_schemas` -
(Required)
PostgreSQL schemas on the server
Structure is [documented below](#nested_postgresql_schemas).
<a name="nested_postgresql_schemas"></a>The `postgresql_schemas` block supports:
* `schema` -
(Required)
Database name.
* `postgresql_tables` -
(Optional)
Tables in the schema.
Structure is [documented below](#nested_postgresql_tables).
<a name="nested_postgresql_tables"></a>The `postgresql_tables` block supports:
* `table` -
(Required)
Table name.
* `postgresql_columns` -
(Optional)
PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_postgresql_columns).
<a name="nested_postgresql_columns"></a>The `postgresql_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The PostgreSQL data type. Full data types list can be found here:
https://www.postgresql.org/docs/current/datatype.html
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `primary_key` -
(Optional)
Whether or not the column represents a primary key.
* `nullable` -
(Optional)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Optional)
The ordinal position of the column in the table.
<a name="nested_exclude_objects"></a>The `exclude_objects` block supports:
* `postgresql_schemas` -
(Required)
PostgreSQL schemas on the server
Structure is [documented below](#nested_postgresql_schemas).
<a name="nested_postgresql_schemas"></a>The `postgresql_schemas` block supports:
* `schema` -
(Required)
Database name.
* `postgresql_tables` -
(Optional)
Tables in the schema.
Structure is [documented below](#nested_postgresql_tables).
<a name="nested_postgresql_tables"></a>The `postgresql_tables` block supports:
* `table` -
(Required)
Table name.
* `postgresql_columns` -
(Optional)
PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_postgresql_columns).
<a name="nested_postgresql_columns"></a>The `postgresql_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The PostgreSQL data type. Full data types list can be found here:
https://www.postgresql.org/docs/current/datatype.html
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `primary_key` -
(Optional)
Whether or not the column represents a primary key.
* `nullable` -
(Optional)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Optional)
The ordinal position of the column in the table.
<a name="nested_sql_server_source_config"></a>The `sql_server_source_config` block supports:
* `include_objects` -
(Optional)
SQL Server objects to retrieve from the source.
Structure is [documented below](#nested_include_objects).
* `exclude_objects` -
(Optional)
SQL Server objects to exclude from the stream.
Structure is [documented below](#nested_exclude_objects).
* `max_concurrent_cdc_tasks` -
(Optional)
Max concurrent CDC tasks.
* `max_concurrent_backfill_tasks` -
(Optional)
Max concurrent backfill tasks.
* `transaction_logs` -
(Optional)
CDC reader reads from transaction logs.
* `change_tables` -
(Optional)
CDC reader reads from change tables.
<a name="nested_include_objects"></a>The `include_objects` block supports:
* `schemas` -
(Required)
SQL Server schemas/databases in the database server
Structure is [documented below](#nested_schemas).
<a name="nested_schemas"></a>The `schemas` block supports:
* `schema` -
(Required)
Schema name.
* `tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_tables).
<a name="nested_tables"></a>The `tables` block supports:
* `table` -
(Required)
Table name.
* `columns` -
(Optional)
SQL Server columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_columns).
<a name="nested_columns"></a>The `columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The SQL Server data type. Full data types list can be found here:
https://learn.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-ver16
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `primary_key` -
(Output)
Whether or not the column represents a primary key.
* `nullable` -
(Output)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Output)
The ordinal position of the column in the table.
<a name="nested_exclude_objects"></a>The `exclude_objects` block supports:
* `schemas` -
(Required)
SQL Server schemas/databases in the database server
Structure is [documented below](#nested_schemas).
<a name="nested_schemas"></a>The `schemas` block supports:
* `schema` -
(Required)
Schema name.
* `tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_tables).
<a name="nested_tables"></a>The `tables` block supports:
* `table` -
(Required)
Table name.
* `columns` -
(Optional)
SQL Server columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_columns).
<a name="nested_columns"></a>The `columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The SQL Server data type. Full data types list can be found here:
https://learn.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-ver16
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `primary_key` -
(Output)
Whether or not the column represents a primary key.
* `nullable` -
(Output)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Output)
The ordinal position of the column in the table.
<a name="nested_destination_config"></a>The `destination_config` block supports:
* `destination_connection_profile` -
(Required)
Destination connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name}
* `gcs_destination_config` -
(Optional)
A configuration for how data should be loaded to Cloud Storage.
Structure is [documented below](#nested_gcs_destination_config).
* `bigquery_destination_config` -
(Optional)
A configuration for how data should be loaded to Google BigQuery.
Structure is [documented below](#nested_bigquery_destination_config).
<a name="nested_gcs_destination_config"></a>The `gcs_destination_config` block supports:
* `path` -
(Optional)
Path inside the Cloud Storage bucket to write data to.
* `file_rotation_mb` -
(Optional)
The maximum file size to be saved in the bucket.
* `file_rotation_interval` -
(Optional)
The maximum duration for which new events are added before a file is closed and a new file is created.
A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s.
* `avro_file_format` -
(Optional)
AVRO file format configuration.
* `json_file_format` -
(Optional)
JSON file format configuration.
Structure is [documented below](#nested_json_file_format).
<a name="nested_json_file_format"></a>The `json_file_format` block supports:
* `schema_file_format` -
(Optional)
The schema file format along JSON data files.
Possible values are: `NO_SCHEMA_FILE`, `AVRO_SCHEMA_FILE`.
* `compression` -
(Optional)
Compression of the loaded JSON file.
Possible values are: `NO_COMPRESSION`, `GZIP`.
<a name="nested_bigquery_destination_config"></a>The `bigquery_destination_config` block supports:
* `data_freshness` -
(Optional)
The guaranteed data freshness (in seconds) when querying tables created by the stream.
Editing this field will only affect new tables created in the future, but existing tables
will not be impacted. Lower values mean that queries will return fresher data, but may result in higher cost.
A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s.
* `single_target_dataset` -
(Optional)
A single target dataset to which all data will be streamed.
Structure is [documented below](#nested_single_target_dataset).
* `source_hierarchy_datasets` -
(Optional)
Destination datasets are created so that hierarchy of the destination data objects matches the source hierarchy.
Structure is [documented below](#nested_source_hierarchy_datasets).
* `merge` -
(Optional)
Merge mode defines that all changes to a table will be merged at the destination Google BigQuery
table. This is the default write mode. When selected, BigQuery reflects the way the data is stored
in the source database. With Merge mode, no historical record of the change events is kept.
* `append_only` -
(Optional)
AppendOnly mode defines that the stream of changes (INSERT, UPDATE-INSERT, UPDATE-DELETE and DELETE
events) to a source table will be written to the destination Google BigQuery table, retaining the
historical state of the data.
<a name="nested_single_target_dataset"></a>The `single_target_dataset` block supports:
* `dataset_id` -
(Required)
Dataset ID in the format projects/{project}/datasets/{dataset_id} or
{project}:{dataset_id}
<a name="nested_source_hierarchy_datasets"></a>The `source_hierarchy_datasets` block supports:
* `dataset_template` -
(Required)
Dataset template used for dynamic dataset creation.
Structure is [documented below](#nested_dataset_template).
<a name="nested_dataset_template"></a>The `dataset_template` block supports:
* `location` -
(Required)
The geographic location where the dataset should reside.
See https://cloud.google.com/bigquery/docs/locations for supported locations.
* `dataset_id_prefix` -
(Optional)
If supplied, every created dataset will have its name prefixed by the provided value.
The prefix and name will be separated by an underscore. i.e. _.
* `kms_key_name` -
(Optional)
Describes the Cloud KMS encryption key that will be used to protect destination BigQuery
table. The BigQuery Service Account associated with your project requires access to this
encryption key. i.e. projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{cryptoKey}.
See https://cloud.google.com/bigquery/docs/customer-managed-encryption for more information.
- - -
* `labels` -
(Optional)
Labels.
**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field `effective_labels` for all of the labels present on the resource.
* `backfill_all` -
(Optional)
Backfill strategy to automatically backfill the Stream's objects. Specific objects can be excluded.
Structure is [documented below](#nested_backfill_all).
* `backfill_none` -
(Optional)
Backfill strategy to disable automatic backfill for the Stream's objects.
* `customer_managed_encryption_key` -
(Optional)
A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data
will be encrypted using an internal Stream-specific encryption key provisioned through KMS.
* `create_without_validation` -
(Optional)
Create the stream without validating it.
* `project` - (Optional) The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
* `desired_state` - (Optional) Desired state of the Stream. Set this field to `RUNNING` to start the stream, and `PAUSED` to pause the stream.
Possible values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
<a name="nested_backfill_all"></a>The `backfill_all` block supports:
* `mysql_excluded_objects` -
(Optional)
MySQL data source objects to avoid backfilling.
Structure is [documented below](#nested_mysql_excluded_objects).
* `postgresql_excluded_objects` -
(Optional)
PostgreSQL data source objects to avoid backfilling.
Structure is [documented below](#nested_postgresql_excluded_objects).
* `oracle_excluded_objects` -
(Optional)
PostgreSQL data source objects to avoid backfilling.
Structure is [documented below](#nested_oracle_excluded_objects).
* `sql_server_excluded_objects` -
(Optional)
SQL Server data source objects to avoid backfilling.
Structure is [documented below](#nested_sql_server_excluded_objects).
<a name="nested_mysql_excluded_objects"></a>The `mysql_excluded_objects` block supports:
* `mysql_databases` -
(Required)
MySQL databases on the server
Structure is [documented below](#nested_mysql_databases).
<a name="nested_mysql_databases"></a>The `mysql_databases` block supports:
* `database` -
(Required)
Database name.
* `mysql_tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_mysql_tables).
<a name="nested_mysql_tables"></a>The `mysql_tables` block supports:
* `table` -
(Required)
Table name.
* `mysql_columns` -
(Optional)
MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_mysql_columns).
<a name="nested_mysql_columns"></a>The `mysql_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The MySQL data type. Full data types list can be found here:
https://dev.mysql.com/doc/refman/8.0/en/data-types.html
* `length` -
(Output)
Column length.
* `collation` -
(Optional)
Column collation.
* `primary_key` -
(Optional)
Whether or not the column represents a primary key.
* `nullable` -
(Optional)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Optional)
The ordinal position of the column in the table.
<a name="nested_postgresql_excluded_objects"></a>The `postgresql_excluded_objects` block supports:
* `postgresql_schemas` -
(Required)
PostgreSQL schemas on the server
Structure is [documented below](#nested_postgresql_schemas).
<a name="nested_postgresql_schemas"></a>The `postgresql_schemas` block supports:
* `schema` -
(Required)
Database name.
* `postgresql_tables` -
(Optional)
Tables in the schema.
Structure is [documented below](#nested_postgresql_tables).
<a name="nested_postgresql_tables"></a>The `postgresql_tables` block supports:
* `table` -
(Required)
Table name.
* `postgresql_columns` -
(Optional)
PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_postgresql_columns).
<a name="nested_postgresql_columns"></a>The `postgresql_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The PostgreSQL data type. Full data types list can be found here:
https://www.postgresql.org/docs/current/datatype.html
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `primary_key` -
(Optional)
Whether or not the column represents a primary key.
* `nullable` -
(Optional)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Optional)
The ordinal position of the column in the table.
<a name="nested_oracle_excluded_objects"></a>The `oracle_excluded_objects` block supports:
* `oracle_schemas` -
(Required)
Oracle schemas/databases in the database server
Structure is [documented below](#nested_oracle_schemas).
<a name="nested_oracle_schemas"></a>The `oracle_schemas` block supports:
* `schema` -
(Required)
Schema name.
* `oracle_tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_oracle_tables).
<a name="nested_oracle_tables"></a>The `oracle_tables` block supports:
* `table` -
(Required)
Table name.
* `oracle_columns` -
(Optional)
Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_oracle_columns).
<a name="nested_oracle_columns"></a>The `oracle_columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The Oracle data type. Full data types list can be found here:
https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `encoding` -
(Output)
Column encoding.
* `primary_key` -
(Output)
Whether or not the column represents a primary key.
* `nullable` -
(Output)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Output)
The ordinal position of the column in the table.
<a name="nested_sql_server_excluded_objects"></a>The `sql_server_excluded_objects` block supports:
* `schemas` -
(Required)
SQL Server schemas/databases in the database server
Structure is [documented below](#nested_schemas).
<a name="nested_schemas"></a>The `schemas` block supports:
* `schema` -
(Required)
Schema name.
* `tables` -
(Optional)
Tables in the database.
Structure is [documented below](#nested_tables).
<a name="nested_tables"></a>The `tables` block supports:
* `table` -
(Required)
Table name.
* `columns` -
(Optional)
SQL Server columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.
Structure is [documented below](#nested_columns).
<a name="nested_columns"></a>The `columns` block supports:
* `column` -
(Optional)
Column name.
* `data_type` -
(Optional)
The SQL Server data type. Full data types list can be found here:
https://learn.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-ver16
* `length` -
(Output)
Column length.
* `precision` -
(Output)
Column precision.
* `scale` -
(Output)
Column scale.
* `primary_key` -
(Output)
Whether or not the column represents a primary key.
* `nullable` -
(Output)
Whether or not the column can accept a null value.
* `ordinal_position` -
(Output)
The ordinal position of the column in the table.
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are exported:
* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/streams/{{stream_id}}`
* `name` -
The stream's name.
* `state` -
The state of the stream.
* `terraform_labels` -
The combination of labels configured directly on the resource
and default labels configured on the provider.
* `effective_labels` -
All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.
## Timeouts
This resource provides the following
[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options:
- `create` - Default is 20 minutes.
- `update` - Default is 20 minutes.
- `delete` - Default is 20 minutes.
## Import
Stream can be imported using any of these accepted formats:
* `projects/{{project}}/locations/{{location}}/streams/{{stream_id}}`
* `{{project}}/{{location}}/{{stream_id}}`
* `{{location}}/{{stream_id}}`
In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Stream using one of the formats above. For example:
```tf
import {
id = "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}"
to = google_datastream_stream.default
}
```
When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), Stream can be imported using one of the formats above. For example:
```
$ terraform import google_datastream_stream.default projects/{{project}}/locations/{{location}}/streams/{{stream_id}}
$ terraform import google_datastream_stream.default {{project}}/{{location}}/{{stream_id}}
$ terraform import google_datastream_stream.default {{location}}/{{stream_id}}
```
## User Project Overrides
This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override).