| --- |
| # ---------------------------------------------------------------------------- |
| # |
| # *** AUTO GENERATED CODE *** Type: MMv1 *** |
| # |
| # ---------------------------------------------------------------------------- |
| # |
| # This file is automatically generated by Magic Modules and manual |
| # changes will be clobbered when the file is regenerated. |
| # |
| # Please read more about how to change this file in |
| # .github/CONTRIBUTING.md. |
| # |
| # ---------------------------------------------------------------------------- |
| subcategory: "Data loss prevention" |
| description: |- |
| A job trigger configuration. |
| --- |
| |
| # google\_data\_loss\_prevention\_job\_trigger |
| |
| A job trigger configuration. |
| |
| |
| To get more information about JobTrigger, see: |
| |
| * [API documentation](https://cloud.google.com/dlp/docs/reference/rest/v2/projects.jobTriggers) |
| * How-to Guides |
| * [Official Documentation](https://cloud.google.com/dlp/docs/creating-job-triggers) |
| |
| ## Example Usage - Dlp Job Trigger Basic |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "basic" { |
| parent = "projects/my-project-name" |
| description = "Description" |
| display_name = "Displayname" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset" |
| } |
| } |
| } |
| } |
| storage_config { |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Bigquery Row Limit |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "bigquery_row_limit" { |
| parent = "projects/my-project-name" |
| description = "Description" |
| display_name = "Displayname" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset" |
| } |
| } |
| } |
| } |
| storage_config { |
| big_query_options { |
| table_reference { |
| project_id = "project" |
| dataset_id = "dataset" |
| table_id = "table_to_scan" |
| } |
| |
| rows_limit = 1000 |
| sample_method = "RANDOM_START" |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Bigquery Row Limit Percentage |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "bigquery_row_limit_percentage" { |
| parent = "projects/my-project-name" |
| description = "Description" |
| display_name = "Displayname" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset" |
| } |
| } |
| } |
| } |
| storage_config { |
| big_query_options { |
| table_reference { |
| project_id = "project" |
| dataset_id = "dataset" |
| table_id = "table_to_scan" |
| } |
| |
| rows_limit_percent = 50 |
| sample_method = "RANDOM_START" |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Job Notification Emails |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "job_notification_emails" { |
| parent = "projects/my-project-name" |
| description = "Description for the job_trigger created by terraform" |
| display_name = "TerraformDisplayName" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "sample-inspect-template" |
| actions { |
| job_notification_emails {} |
| } |
| storage_config { |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Deidentify |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "deidentify" { |
| parent = "projects/my-project-name" |
| description = "Description for the job_trigger created by terraform" |
| display_name = "TerraformDisplayName" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "sample-inspect-template" |
| actions { |
| deidentify { |
| cloud_storage_output = "gs://samplebucket/dir/" |
| file_types_to_transform = ["CSV", "TSV"] |
| transformation_details_storage_config { |
| table { |
| project_id = "my-project-name" |
| dataset_id = google_bigquery_dataset.default.dataset_id |
| table_id = google_bigquery_table.default.table_id |
| } |
| } |
| transformation_config { |
| deidentify_template = "sample-deidentify-template" |
| image_redact_template = "sample-image-redact-template" |
| structured_deidentify_template = "sample-structured-deidentify-template" |
| } |
| } |
| } |
| storage_config { |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| } |
| } |
| |
| resource "google_bigquery_dataset" "default" { |
| dataset_id = "tf_test" |
| friendly_name = "terraform-test" |
| description = "Description for the dataset created by terraform" |
| location = "US" |
| default_table_expiration_ms = 3600000 |
| |
| labels = { |
| env = "default" |
| } |
| } |
| |
| resource "google_bigquery_table" "default" { |
| dataset_id = google_bigquery_dataset.default.dataset_id |
| table_id = "tf_test" |
| deletion_protection = false |
| |
| time_partitioning { |
| type = "DAY" |
| } |
| |
| labels = { |
| env = "default" |
| } |
| |
| schema = <<EOF |
| [ |
| { |
| "name": "quantity", |
| "type": "NUMERIC", |
| "mode": "NULLABLE", |
| "description": "The quantity" |
| }, |
| { |
| "name": "name", |
| "type": "STRING", |
| "mode": "NULLABLE", |
| "description": "Name of the object" |
| } |
| ] |
| EOF |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Hybrid |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "hybrid_trigger" { |
| parent = "projects/my-project-name" |
| |
| triggers { |
| manual {} |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset" |
| } |
| } |
| } |
| } |
| storage_config { |
| hybrid_options { |
| description = "Hybrid job trigger for data from the comments field of a table that contains customer appointment bookings" |
| required_finding_label_keys = [ |
| "appointment-bookings-comments" |
| ] |
| labels = { |
| env = "prod" |
| } |
| table_options { |
| identifying_fields { |
| name = "booking_id" |
| } |
| } |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Inspect |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "inspect" { |
| parent = "projects/my-project-name" |
| description = "Description" |
| display_name = "Displayname" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset" |
| } |
| } |
| } |
| } |
| storage_config { |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| inspect_config { |
| custom_info_types { |
| info_type { |
| name = "MY_CUSTOM_TYPE" |
| } |
| |
| likelihood = "UNLIKELY" |
| |
| regex { |
| pattern = "test*" |
| } |
| } |
| |
| info_types { |
| name = "EMAIL_ADDRESS" |
| } |
| |
| min_likelihood = "UNLIKELY" |
| rule_set { |
| info_types { |
| name = "EMAIL_ADDRESS" |
| } |
| rules { |
| exclusion_rule { |
| regex { |
| pattern = ".+@example.com" |
| } |
| matching_type = "MATCHING_TYPE_FULL_MATCH" |
| } |
| } |
| } |
| |
| rule_set { |
| info_types { |
| name = "MY_CUSTOM_TYPE" |
| } |
| rules { |
| hotword_rule { |
| hotword_regex { |
| pattern = "example*" |
| } |
| proximity { |
| window_before = 50 |
| } |
| likelihood_adjustment { |
| fixed_likelihood = "VERY_LIKELY" |
| } |
| } |
| } |
| } |
| |
| limits { |
| max_findings_per_item = 10 |
| max_findings_per_request = 50 |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Publish To Stackdriver |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "publish_to_stackdriver" { |
| parent = "projects/my-project-name" |
| description = "Description for the job_trigger created by terraform" |
| display_name = "TerraformDisplayName" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "sample-inspect-template" |
| actions { |
| publish_to_stackdriver {} |
| } |
| storage_config { |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger With Id |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "with_trigger_id" { |
| parent = "projects/my-project-name" |
| description = "Starting description" |
| display_name = "display" |
| trigger_id = "id-" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset123" |
| } |
| } |
| } |
| } |
| storage_config { |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Multiple Actions |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "basic" { |
| parent = "projects/my-project-name" |
| description = "Description" |
| display_name = "Displayname" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset" |
| } |
| } |
| } |
| } |
| |
| actions { |
| pub_sub { |
| topic = "projects/project/topics/topic-name" |
| } |
| } |
| |
| storage_config { |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| } |
| } |
| ``` |
| ## Example Usage - Dlp Job Trigger Cloud Storage Optional Timespan Autopopulation |
| |
| |
| ```hcl |
| resource "google_data_loss_prevention_job_trigger" "basic" { |
| parent = "projects/my-project-name" |
| description = "Description" |
| display_name = "Displayname" |
| |
| triggers { |
| schedule { |
| recurrence_period_duration = "86400s" |
| } |
| } |
| |
| inspect_job { |
| inspect_template_name = "fake" |
| actions { |
| save_findings { |
| output_config { |
| table { |
| project_id = "project" |
| dataset_id = "dataset" |
| } |
| } |
| } |
| } |
| storage_config { |
| timespan_config { |
| enable_auto_population_of_timespan_config = true |
| } |
| |
| cloud_storage_options { |
| file_set { |
| url = "gs://mybucket/directory/" |
| } |
| } |
| } |
| } |
| } |
| ``` |
| |
| ## Argument Reference |
| |
| The following arguments are supported: |
| |
| |
| * `triggers` - |
| (Required) |
| What event needs to occur for a new job to be started. |
| Structure is [documented below](#nested_triggers). |
| |
| * `parent` - |
| (Required) |
| The parent of the trigger, either in the format `projects/{{project}}` |
| or `projects/{{project}}/locations/{{location}}` |
| |
| |
| <a name="nested_triggers"></a>The `triggers` block supports: |
| |
| * `schedule` - |
| (Optional) |
| Schedule for triggered jobs |
| Structure is [documented below](#nested_schedule). |
| |
| * `manual` - |
| (Optional) |
| For use with hybrid jobs. Jobs must be manually created and finished. |
| |
| |
| <a name="nested_schedule"></a>The `schedule` block supports: |
| |
| * `recurrence_period_duration` - |
| (Optional) |
| With this option a job is started a regular periodic basis. For example: every day (86400 seconds). |
| A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. |
| This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days. |
| A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". |
| |
| - - - |
| |
| |
| * `description` - |
| (Optional) |
| A description of the job trigger. |
| |
| * `display_name` - |
| (Optional) |
| User set display name of the job trigger. |
| |
| * `trigger_id` - |
| (Optional) |
| The trigger id can contain uppercase and lowercase letters, numbers, and hyphens; |
| that is, it must match the regular expression: [a-zA-Z\d-_]+. |
| The maximum length is 100 characters. Can be empty to allow the system to generate one. |
| |
| * `status` - |
| (Optional) |
| Whether the trigger is currently active. |
| Default value is `HEALTHY`. |
| Possible values are: `PAUSED`, `HEALTHY`, `CANCELLED`. |
| |
| * `inspect_job` - |
| (Optional) |
| Controls what and how to inspect for findings. |
| Structure is [documented below](#nested_inspect_job). |
| |
| |
| <a name="nested_inspect_job"></a>The `inspect_job` block supports: |
| |
| * `inspect_template_name` - |
| (Optional) |
| The name of the template to run when this job is triggered. |
| |
| * `inspect_config` - |
| (Optional) |
| The core content of the template. |
| Structure is [documented below](#nested_inspect_config). |
| |
| * `storage_config` - |
| (Required) |
| Information on where to inspect |
| Structure is [documented below](#nested_storage_config). |
| |
| * `actions` - |
| (Optional) |
| Configuration block for the actions to execute on the completion of a job. Can be specified multiple times, but only one for each type. Each action block supports fields documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). |
| Structure is [documented below](#nested_actions). |
| |
| |
| <a name="nested_inspect_config"></a>The `inspect_config` block supports: |
| |
| * `exclude_info_types` - |
| (Optional) |
| When true, excludes type information of the findings. |
| |
| * `include_quote` - |
| (Optional) |
| When true, a contextual quote from the data that triggered a finding is included in the response. |
| |
| * `min_likelihood` - |
| (Optional) |
| Only returns findings equal or above this threshold. See https://cloud.google.com/dlp/docs/likelihood for more info |
| Default value is `POSSIBLE`. |
| Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. |
| |
| * `limits` - |
| (Optional) |
| Configuration to control the number of findings returned. |
| Structure is [documented below](#nested_limits). |
| |
| * `info_types` - |
| (Optional) |
| Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list |
| or listed at https://cloud.google.com/dlp/docs/infotypes-reference. |
| When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. |
| By default this may be all types, but may change over time as detectors are updated. |
| Structure is [documented below](#nested_info_types). |
| |
| * `rule_set` - |
| (Optional) |
| Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, |
| other rules are executed in the order they are specified for each info type. |
| Structure is [documented below](#nested_rule_set). |
| |
| * `custom_info_types` - |
| (Optional) |
| Custom info types to be used. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more. |
| Structure is [documented below](#nested_custom_info_types). |
| |
| |
| <a name="nested_limits"></a>The `limits` block supports: |
| |
| * `max_findings_per_item` - |
| (Optional) |
| Max number of findings that will be returned for each item scanned. The maximum returned is 2000. |
| |
| * `max_findings_per_request` - |
| (Optional) |
| Max number of findings that will be returned per request/job. The maximum returned is 2000. |
| |
| * `max_findings_per_info_type` - |
| (Optional) |
| Configuration of findings limit given for specified infoTypes. |
| Structure is [documented below](#nested_max_findings_per_info_type). |
| |
| |
| <a name="nested_max_findings_per_info_type"></a>The `max_findings_per_info_type` block supports: |
| |
| * `info_type` - |
| (Optional) |
| Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does |
| not have an infoType, the DLP API applies the limit against all infoTypes that are found but not |
| specified in another InfoTypeLimit. |
| Structure is [documented below](#nested_info_type). |
| |
| * `max_findings` - |
| (Optional) |
| Max findings limit for the given infoType. |
| |
| |
| <a name="nested_info_type"></a>The `info_type` block supports: |
| |
| * `name` - |
| (Required) |
| Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed |
| at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. |
| |
| * `version` - |
| (Optional) |
| Version of the information type to use. By default, the version is set to stable |
| |
| * `sensitivity_score` - |
| (Optional) |
| Optional custom sensitivity for this InfoType. This only applies to data profiling. |
| Structure is [documented below](#nested_sensitivity_score). |
| |
| |
| <a name="nested_sensitivity_score"></a>The `sensitivity_score` block supports: |
| |
| * `score` - |
| (Required) |
| The sensitivity score applied to the resource. |
| Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. |
| |
| <a name="nested_info_types"></a>The `info_types` block supports: |
| |
| * `name` - |
| (Required) |
| Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed |
| at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. |
| |
| * `version` - |
| (Optional) |
| Version of the information type to use. By default, the version is set to stable |
| |
| * `sensitivity_score` - |
| (Optional) |
| Optional custom sensitivity for this InfoType. This only applies to data profiling. |
| Structure is [documented below](#nested_sensitivity_score). |
| |
| |
| <a name="nested_sensitivity_score"></a>The `sensitivity_score` block supports: |
| |
| * `score` - |
| (Required) |
| The sensitivity score applied to the resource. |
| Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. |
| |
| <a name="nested_rule_set"></a>The `rule_set` block supports: |
| |
| * `info_types` - |
| (Optional) |
| List of infoTypes this rule set is applied to. |
| Structure is [documented below](#nested_info_types). |
| |
| * `rules` - |
| (Required) |
| Set of rules to be applied to infoTypes. The rules are applied in order. |
| Structure is [documented below](#nested_rules). |
| |
| |
| <a name="nested_info_types"></a>The `info_types` block supports: |
| |
| * `name` - |
| (Required) |
| Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed |
| at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. |
| |
| * `version` - |
| (Optional) |
| Version of the information type to use. By default, the version is set to stable. |
| |
| * `sensitivity_score` - |
| (Optional) |
| Optional custom sensitivity for this InfoType. This only applies to data profiling. |
| Structure is [documented below](#nested_sensitivity_score). |
| |
| |
| <a name="nested_sensitivity_score"></a>The `sensitivity_score` block supports: |
| |
| * `score` - |
| (Required) |
| The sensitivity score applied to the resource. |
| Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. |
| |
| <a name="nested_rules"></a>The `rules` block supports: |
| |
| * `hotword_rule` - |
| (Optional) |
| Hotword-based detection rule. |
| Structure is [documented below](#nested_hotword_rule). |
| |
| * `exclusion_rule` - |
| (Optional) |
| The rule that specifies conditions when findings of infoTypes specified in InspectionRuleSet are removed from results. |
| Structure is [documented below](#nested_exclusion_rule). |
| |
| |
| <a name="nested_hotword_rule"></a>The `hotword_rule` block supports: |
| |
| * `hotword_regex` - |
| (Optional) |
| Regular expression pattern defining what qualifies as a hotword. |
| Structure is [documented below](#nested_hotword_regex). |
| |
| * `proximity` - |
| (Optional) |
| Proximity of the finding within which the entire hotword must reside. The total length of the window cannot |
| exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be |
| used to match substrings of the finding itself. For example, the certainty of a phone number regex |
| `(\d{3}) \d{3}-\d{4}` could be adjusted upwards if the area code is known to be the local area code of a company |
| office using the hotword regex `(xxx)`, where `xxx` is the area code in question. |
| Structure is [documented below](#nested_proximity). |
| |
| * `likelihood_adjustment` - |
| (Optional) |
| Likelihood adjustment to apply to all matching findings. |
| Structure is [documented below](#nested_likelihood_adjustment). |
| |
| |
| <a name="nested_hotword_regex"></a>The `hotword_regex` block supports: |
| |
| * `pattern` - |
| (Optional) |
| Pattern defining the regular expression. Its syntax |
| (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. |
| |
| * `group_indexes` - |
| (Optional) |
| The index of the submatch to extract as findings. When not specified, |
| the entire match is returned. No more than 3 may be included. |
| |
| <a name="nested_proximity"></a>The `proximity` block supports: |
| |
| * `window_before` - |
| (Optional) |
| Number of characters before the finding to consider. Either this or window_after must be specified |
| |
| * `window_after` - |
| (Optional) |
| Number of characters after the finding to consider. Either this or window_before must be specified |
| |
| <a name="nested_likelihood_adjustment"></a>The `likelihood_adjustment` block supports: |
| |
| * `fixed_likelihood` - |
| (Optional) |
| Set the likelihood of a finding to a fixed value. Either this or relative_likelihood can be set. |
| Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. |
| |
| * `relative_likelihood` - |
| (Optional) |
| Increase or decrease the likelihood by the specified number of levels. For example, |
| if a finding would be POSSIBLE without the detection rule and relativeLikelihood is 1, |
| then it is upgraded to LIKELY, while a value of -1 would downgrade it to UNLIKELY. |
| Likelihood may never drop below VERY_UNLIKELY or exceed VERY_LIKELY, so applying an |
| adjustment of 1 followed by an adjustment of -1 when base likelihood is VERY_LIKELY |
| will result in a final likelihood of LIKELY. Either this or fixed_likelihood can be set. |
| |
| <a name="nested_exclusion_rule"></a>The `exclusion_rule` block supports: |
| |
| * `matching_type` - |
| (Required) |
| How the rule is applied. See the documentation for more information: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#MatchingType |
| Possible values are: `MATCHING_TYPE_FULL_MATCH`, `MATCHING_TYPE_PARTIAL_MATCH`, `MATCHING_TYPE_INVERSE_MATCH`. |
| |
| * `dictionary` - |
| (Optional) |
| Dictionary which defines the rule. |
| Structure is [documented below](#nested_dictionary). |
| |
| * `regex` - |
| (Optional) |
| Regular expression which defines the rule. |
| Structure is [documented below](#nested_regex). |
| |
| * `exclude_info_types` - |
| (Optional) |
| Set of infoTypes for which findings would affect this rule. |
| Structure is [documented below](#nested_exclude_info_types). |
| |
| * `exclude_by_hotword` - |
| (Optional) |
| Drop if the hotword rule is contained in the proximate context. |
| Structure is [documented below](#nested_exclude_by_hotword). |
| |
| |
| <a name="nested_dictionary"></a>The `dictionary` block supports: |
| |
| * `word_list` - |
| (Optional) |
| List of words or phrases to search for. |
| Structure is [documented below](#nested_word_list). |
| |
| * `cloud_storage_path` - |
| (Optional) |
| Newline-delimited file of words in Cloud Storage. Only a single file is accepted. |
| Structure is [documented below](#nested_cloud_storage_path). |
| |
| |
| <a name="nested_word_list"></a>The `word_list` block supports: |
| |
| * `words` - |
| (Required) |
| Words or phrases defining the dictionary. The dictionary must contain at least one |
| phrase and every phrase must contain at least 2 characters that are letters or digits. |
| |
| <a name="nested_cloud_storage_path"></a>The `cloud_storage_path` block supports: |
| |
| * `path` - |
| (Required) |
| A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` |
| |
| <a name="nested_regex"></a>The `regex` block supports: |
| |
| * `pattern` - |
| (Required) |
| Pattern defining the regular expression. |
| Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. |
| |
| * `group_indexes` - |
| (Optional) |
| The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included. |
| |
| <a name="nested_exclude_info_types"></a>The `exclude_info_types` block supports: |
| |
| * `info_types` - |
| (Required) |
| If a finding is matched by any of the infoType detectors listed here, the finding will be excluded from the scan results. |
| Structure is [documented below](#nested_info_types). |
| |
| |
| <a name="nested_info_types"></a>The `info_types` block supports: |
| |
| * `name` - |
| (Required) |
| Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed |
| at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. |
| |
| * `version` - |
| (Optional) |
| Version of the information type to use. By default, the version is set to stable. |
| |
| * `sensitivity_score` - |
| (Optional) |
| Optional custom sensitivity for this InfoType. This only applies to data profiling. |
| Structure is [documented below](#nested_sensitivity_score). |
| |
| |
| <a name="nested_sensitivity_score"></a>The `sensitivity_score` block supports: |
| |
| * `score` - |
| (Required) |
| The sensitivity score applied to the resource. |
| Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. |
| |
| <a name="nested_exclude_by_hotword"></a>The `exclude_by_hotword` block supports: |
| |
| * `hotword_regex` - |
| (Optional) |
| Regular expression pattern defining what qualifies as a hotword. |
| Structure is [documented below](#nested_hotword_regex). |
| |
| * `proximity` - |
| (Optional) |
| Proximity of the finding within which the entire hotword must reside. The total length of the window cannot |
| exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be |
| used to match substrings of the finding itself. For example, the certainty of a phone number regex |
| `(\d{3}) \d{3}-\d{4}` could be adjusted upwards if the area code is known to be the local area code of a company |
| office using the hotword regex `(xxx)`, where `xxx` is the area code in question. |
| Structure is [documented below](#nested_proximity). |
| |
| |
| <a name="nested_hotword_regex"></a>The `hotword_regex` block supports: |
| |
| * `pattern` - |
| (Optional) |
| Pattern defining the regular expression. Its syntax |
| (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. |
| |
| * `group_indexes` - |
| (Optional) |
| The index of the submatch to extract as findings. When not specified, |
| the entire match is returned. No more than 3 may be included. |
| |
| <a name="nested_proximity"></a>The `proximity` block supports: |
| |
| * `window_before` - |
| (Optional) |
| Number of characters before the finding to consider. Either this or window_after must be specified |
| |
| * `window_after` - |
| (Optional) |
| Number of characters after the finding to consider. Either this or window_before must be specified |
| |
| <a name="nested_custom_info_types"></a>The `custom_info_types` block supports: |
| |
| * `info_type` - |
| (Required) |
| CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing |
| infoTypes and that infoType is specified in `info_types` field. Specifying the latter adds findings to the |
| one detected by the system. If built-in info type is not specified in `info_types` list then the name is |
| treated as a custom info type. |
| Structure is [documented below](#nested_info_type). |
| |
| * `likelihood` - |
| (Optional) |
| Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria |
| specified by the rule. |
| Default value is `VERY_LIKELY`. |
| Possible values are: `VERY_UNLIKELY`, `UNLIKELY`, `POSSIBLE`, `LIKELY`, `VERY_LIKELY`. |
| |
| * `exclusion_type` - |
| (Optional) |
| If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching. |
| Possible values are: `EXCLUSION_TYPE_EXCLUDE`. |
| |
| * `sensitivity_score` - |
| (Optional) |
| Optional custom sensitivity for this InfoType. This only applies to data profiling. |
| Structure is [documented below](#nested_sensitivity_score). |
| |
| * `regex` - |
| (Optional) |
| Regular expression which defines the rule. |
| Structure is [documented below](#nested_regex). |
| |
| * `dictionary` - |
| (Optional) |
| Dictionary which defines the rule. |
| Structure is [documented below](#nested_dictionary). |
| |
| * `stored_type` - |
| (Optional) |
| A reference to a StoredInfoType to use with scanning. |
| Structure is [documented below](#nested_stored_type). |
| |
| * `surrogate_type` - |
| (Optional) |
| Message for detecting output from deidentification transformations that support reversing. |
| |
| |
| <a name="nested_info_type"></a>The `info_type` block supports: |
| |
| * `name` - |
| (Required) |
| Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names |
| listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. |
| |
| * `version` - |
| (Optional) |
| Version of the information type to use. By default, the version is set to stable. |
| |
| * `sensitivity_score` - |
| (Optional) |
| Optional custom sensitivity for this InfoType. This only applies to data profiling. |
| Structure is [documented below](#nested_sensitivity_score). |
| |
| |
| <a name="nested_sensitivity_score"></a>The `sensitivity_score` block supports: |
| |
| * `score` - |
| (Required) |
| The sensitivity score applied to the resource. |
| Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. |
| |
| <a name="nested_sensitivity_score"></a>The `sensitivity_score` block supports: |
| |
| * `score` - |
| (Required) |
| The sensitivity score applied to the resource. |
| Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. |
| |
| <a name="nested_regex"></a>The `regex` block supports: |
| |
| * `pattern` - |
| (Required) |
| Pattern defining the regular expression. |
| Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub. |
| |
| * `group_indexes` - |
| (Optional) |
| The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included. |
| |
| <a name="nested_dictionary"></a>The `dictionary` block supports: |
| |
| * `word_list` - |
| (Optional) |
| List of words or phrases to search for. |
| Structure is [documented below](#nested_word_list). |
| |
| * `cloud_storage_path` - |
| (Optional) |
| Newline-delimited file of words in Cloud Storage. Only a single file is accepted. |
| Structure is [documented below](#nested_cloud_storage_path). |
| |
| |
| <a name="nested_word_list"></a>The `word_list` block supports: |
| |
| * `words` - |
| (Required) |
| Words or phrases defining the dictionary. The dictionary must contain at least one |
| phrase and every phrase must contain at least 2 characters that are letters or digits. |
| |
| <a name="nested_cloud_storage_path"></a>The `cloud_storage_path` block supports: |
| |
| * `path` - |
| (Required) |
| A url representing a file or path (no wildcards) in Cloud Storage. Example: `gs://[BUCKET_NAME]/dictionary.txt` |
| |
| <a name="nested_stored_type"></a>The `stored_type` block supports: |
| |
| * `name` - |
| (Required) |
| Resource name of the requested StoredInfoType, for example `organizations/433245324/storedInfoTypes/432452342` |
| or `projects/project-id/storedInfoTypes/432452342`. |
| |
| * `create_time` - |
| (Output) |
| The creation timestamp of an inspectTemplate. Set by the server. |
| |
| <a name="nested_storage_config"></a>The `storage_config` block supports: |
| |
| * `timespan_config` - |
| (Optional) |
| Configuration of the timespan of the items to include in scanning |
| Structure is [documented below](#nested_timespan_config). |
| |
| * `datastore_options` - |
| (Optional) |
| Options defining a data set within Google Cloud Datastore. |
| Structure is [documented below](#nested_datastore_options). |
| |
| * `cloud_storage_options` - |
| (Optional) |
| Options defining a file or a set of files within a Google Cloud Storage bucket. |
| Structure is [documented below](#nested_cloud_storage_options). |
| |
| * `big_query_options` - |
| (Optional) |
| Options defining BigQuery table and row identifiers. |
| Structure is [documented below](#nested_big_query_options). |
| |
| * `hybrid_options` - |
| (Optional) |
| Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. |
| Structure is [documented below](#nested_hybrid_options). |
| |
| |
| <a name="nested_timespan_config"></a>The `timespan_config` block supports: |
| |
| * `start_time` - |
| (Optional) |
| Exclude files, tables, or rows older than this value. If not set, no lower time limit is applied. |
| |
| * `end_time` - |
| (Optional) |
| Exclude files, tables, or rows newer than this value. If not set, no upper time limit is applied. |
| |
| * `enable_auto_population_of_timespan_config` - |
| (Optional) |
| When the job is started by a JobTrigger we will automatically figure out a valid startTime to avoid |
| scanning files that have not been modified since the last time the JobTrigger executed. This will |
| be based on the time of the execution of the last run of the JobTrigger or the timespan endTime |
| used in the last run of the JobTrigger. |
| |
| * `timestamp_field` - |
| (Optional) |
| Specification of the field containing the timestamp of scanned items. |
| Structure is [documented below](#nested_timestamp_field). |
| |
| |
| <a name="nested_timestamp_field"></a>The `timestamp_field` block supports: |
| |
| * `name` - |
| (Required) |
| Specification of the field containing the timestamp of scanned items. Used for data sources like Datastore and BigQuery. |
| For BigQuery: Required to filter out rows based on the given start and end times. If not specified and the table was |
| modified between the given start and end times, the entire table will be scanned. The valid data types of the timestamp |
| field are: INTEGER, DATE, TIMESTAMP, or DATETIME BigQuery column. |
| For Datastore. Valid data types of the timestamp field are: TIMESTAMP. Datastore entity will be scanned if the |
| timestamp property does not exist or its value is empty or invalid. |
| |
| <a name="nested_datastore_options"></a>The `datastore_options` block supports: |
| |
| * `partition_id` - |
| (Required) |
| Datastore partition ID. A partition ID identifies a grouping of entities. The grouping |
| is always by project and namespace, however the namespace ID may be empty. |
| Structure is [documented below](#nested_partition_id). |
| |
| * `kind` - |
| (Required) |
| A representation of a Datastore kind. |
| Structure is [documented below](#nested_kind). |
| |
| |
| <a name="nested_partition_id"></a>The `partition_id` block supports: |
| |
| * `project_id` - |
| (Required) |
| The ID of the project to which the entities belong. |
| |
| * `namespace_id` - |
| (Optional) |
| If not empty, the ID of the namespace to which the entities belong. |
| |
| <a name="nested_kind"></a>The `kind` block supports: |
| |
| * `name` - |
| (Required) |
| The name of the Datastore kind. |
| |
| <a name="nested_cloud_storage_options"></a>The `cloud_storage_options` block supports: |
| |
| * `file_set` - |
| (Required) |
| Set of files to scan. |
| Structure is [documented below](#nested_file_set). |
| |
| * `bytes_limit_per_file` - |
| (Optional) |
| Max number of bytes to scan from a file. If a scanned file's size is bigger than this value |
| then the rest of the bytes are omitted. |
| |
| * `bytes_limit_per_file_percent` - |
| (Optional) |
| Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. |
| Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. |
| |
| * `files_limit_percent` - |
| (Optional) |
| Limits the number of files to scan to this percentage of the input FileSet. Number of files scanned is rounded down. |
| Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. |
| |
| * `file_types` - |
| (Optional) |
| List of file type groups to include in the scan. If empty, all files are scanned and available data |
| format processors are applied. In addition, the binary content of the selected files is always scanned as well. |
| Images are scanned only as binary if the specified region does not support image inspection and no fileTypes were specified. |
| Each value may be one of: `BINARY_FILE`, `TEXT_FILE`, `IMAGE`, `WORD`, `PDF`, `AVRO`, `CSV`, `TSV`, `POWERPOINT`, `EXCEL`. |
| |
| * `sample_method` - |
| (Optional) |
| How to sample bytes if not all bytes are scanned. Meaningful only when used in conjunction with bytesLimitPerFile. |
| If not specified, scanning would start from the top. |
| Possible values are: `TOP`, `RANDOM_START`. |
| |
| |
| <a name="nested_file_set"></a>The `file_set` block supports: |
| |
| * `url` - |
| (Optional) |
| The Cloud Storage url of the file(s) to scan, in the format `gs://<bucket>/<path>`. Trailing wildcard |
| in the path is allowed. |
| If the url ends in a trailing slash, the bucket or directory represented by the url will be scanned |
| non-recursively (content in sub-directories will not be scanned). This means that `gs://mybucket/` is |
| equivalent to `gs://mybucket/*`, and `gs://mybucket/directory/` is equivalent to `gs://mybucket/directory/*`. |
| |
| * `regex_file_set` - |
| (Optional) |
| The regex-filtered set of files to scan. |
| Structure is [documented below](#nested_regex_file_set). |
| |
| |
| <a name="nested_regex_file_set"></a>The `regex_file_set` block supports: |
| |
| * `bucket_name` - |
| (Required) |
| The name of a Cloud Storage bucket. |
| |
| * `include_regex` - |
| (Optional) |
| A list of regular expressions matching file paths to include. All files in the bucket |
| that match at least one of these regular expressions will be included in the set of files, |
| except for those that also match an item in excludeRegex. Leaving this field empty will |
| match all files by default (this is equivalent to including .* in the list) |
| |
| * `exclude_regex` - |
| (Optional) |
| A list of regular expressions matching file paths to exclude. All files in the bucket that match at |
| least one of these regular expressions will be excluded from the scan. |
| |
| <a name="nested_big_query_options"></a>The `big_query_options` block supports: |
| |
| * `table_reference` - |
| (Required) |
| Set of files to scan. |
| Structure is [documented below](#nested_table_reference). |
| |
| * `rows_limit` - |
| (Optional) |
| Max number of rows to scan. If the table has more rows than this value, the rest of the rows are omitted. |
| If not set, or if set to 0, all rows will be scanned. Only one of rowsLimit and rowsLimitPercent can be |
| specified. Cannot be used in conjunction with TimespanConfig. |
| |
| * `rows_limit_percent` - |
| (Optional) |
| Max percentage of rows to scan. The rest are omitted. The number of rows scanned is rounded down. |
| Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of |
| rowsLimit and rowsLimitPercent can be specified. Cannot be used in conjunction with TimespanConfig. |
| |
| * `sample_method` - |
| (Optional) |
| How to sample rows if not all rows are scanned. Meaningful only when used in conjunction with either |
| rowsLimit or rowsLimitPercent. If not specified, rows are scanned in the order BigQuery reads them. |
| Default value is `TOP`. |
| Possible values are: `TOP`, `RANDOM_START`. |
| |
| * `identifying_fields` - |
| (Optional) |
| Specifies the BigQuery fields that will be returned with findings. |
| If not specified, no identifying fields will be returned for findings. |
| Structure is [documented below](#nested_identifying_fields). |
| |
| * `included_fields` - |
| (Optional) |
| Limit scanning only to these fields. |
| Structure is [documented below](#nested_included_fields). |
| |
| * `excluded_fields` - |
| (Optional) |
| References to fields excluded from scanning. |
| This allows you to skip inspection of entire columns which you know have no findings. |
| Structure is [documented below](#nested_excluded_fields). |
| |
| |
| <a name="nested_table_reference"></a>The `table_reference` block supports: |
| |
| * `project_id` - |
| (Required) |
| The Google Cloud Platform project ID of the project containing the table. |
| |
| * `dataset_id` - |
| (Required) |
| The dataset ID of the table. |
| |
| * `table_id` - |
| (Required) |
| The name of the table. |
| |
| <a name="nested_identifying_fields"></a>The `identifying_fields` block supports: |
| |
| * `name` - |
| (Required) |
| Name of a BigQuery field to be returned with the findings. |
| |
| <a name="nested_included_fields"></a>The `included_fields` block supports: |
| |
| * `name` - |
| (Required) |
| Name describing the field to which scanning is limited. |
| |
| <a name="nested_excluded_fields"></a>The `excluded_fields` block supports: |
| |
| * `name` - |
| (Required) |
| Name describing the field excluded from scanning. |
| |
| <a name="nested_hybrid_options"></a>The `hybrid_options` block supports: |
| |
| * `description` - |
| (Optional) |
| A short description of where the data is coming from. Will be stored once in the job. 256 max length. |
| |
| * `required_finding_label_keys` - |
| (Optional) |
| These are labels that each inspection request must include within their 'finding_labels' map. Request |
| may contain others, but any missing one of these will be rejected. |
| Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. |
| No more than 10 keys can be required. |
| |
| * `table_options` - |
| (Optional) |
| If the container is a table, additional information to make findings meaningful such as the columns that are primary keys. |
| Structure is [documented below](#nested_table_options). |
| |
| * `labels` - |
| (Optional) |
| To organize findings, these labels will be added to each finding. |
| Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. |
| Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. |
| No more than 10 labels can be associated with a given finding. |
| Examples: |
| * `"environment" : "production"` |
| * `"pipeline" : "etl"` |
| |
| |
| <a name="nested_table_options"></a>The `table_options` block supports: |
| |
| * `identifying_fields` - |
| (Optional) |
| The columns that are the primary keys for table objects included in ContentItem. A copy of this |
| cell's value will stored alongside alongside each finding so that the finding can be traced to |
| the specific row it came from. No more than 3 may be provided. |
| Structure is [documented below](#nested_identifying_fields). |
| |
| |
| <a name="nested_identifying_fields"></a>The `identifying_fields` block supports: |
| |
| * `name` - |
| (Required) |
| Name describing the field. |
| |
| <a name="nested_actions"></a>The `actions` block supports: |
| |
| * `save_findings` - |
| (Optional) |
| If set, the detailed findings will be persisted to the specified OutputStorageConfig. Only a single instance of this action can be specified. Compatible with: Inspect, Risk |
| Structure is [documented below](#nested_save_findings). |
| |
| * `pub_sub` - |
| (Optional) |
| Publish a message into a given Pub/Sub topic when the job completes. |
| Structure is [documented below](#nested_pub_sub). |
| |
| * `publish_summary_to_cscc` - |
| (Optional) |
| Publish the result summary of a DlpJob to the Cloud Security Command Center. |
| |
| * `publish_findings_to_cloud_data_catalog` - |
| (Optional) |
| Publish findings of a DlpJob to Data Catalog. |
| |
| * `job_notification_emails` - |
| (Optional) |
| Sends an email when the job completes. The email goes to IAM project owners and technical Essential Contacts. |
| |
| * `deidentify` - |
| (Optional) |
| Create a de-identified copy of the requested table or files. |
| Structure is [documented below](#nested_deidentify). |
| |
| * `publish_to_stackdriver` - |
| (Optional) |
| Enable Stackdriver metric dlp.googleapis.com/findingCount. |
| |
| |
| <a name="nested_save_findings"></a>The `save_findings` block supports: |
| |
| * `output_config` - |
| (Required) |
| Information on where to store output |
| Structure is [documented below](#nested_output_config). |
| |
| |
| <a name="nested_output_config"></a>The `output_config` block supports: |
| |
| * `table` - |
| (Required) |
| Information on the location of the target BigQuery Table. |
| Structure is [documented below](#nested_table). |
| |
| * `output_schema` - |
| (Optional) |
| Schema used for writing the findings for Inspect jobs. This field is only used for |
| Inspect and must be unspecified for Risk jobs. Columns are derived from the Finding |
| object. If appending to an existing table, any columns from the predefined schema |
| that are missing will be added. No columns in the existing table will be deleted. |
| If unspecified, then all available columns will be used for a new table or an (existing) |
| table with no schema, and no changes will be made to an existing table that has a schema. |
| Only for use with external storage. |
| Possible values are: `BASIC_COLUMNS`, `GCS_COLUMNS`, `DATASTORE_COLUMNS`, `BIG_QUERY_COLUMNS`, `ALL_COLUMNS`. |
| |
| |
| <a name="nested_table"></a>The `table` block supports: |
| |
| * `project_id` - |
| (Required) |
| The Google Cloud Platform project ID of the project containing the table. |
| |
| * `dataset_id` - |
| (Required) |
| Dataset ID of the table. |
| |
| * `table_id` - |
| (Optional) |
| Name of the table. If is not set a new one will be generated for you with the following format: |
| `dlp_googleapis_yyyy_mm_dd_[dlp_job_id]`. Pacific timezone will be used for generating the date details. |
| |
| <a name="nested_pub_sub"></a>The `pub_sub` block supports: |
| |
| * `topic` - |
| (Required) |
| Cloud Pub/Sub topic to send notifications to. |
| |
| <a name="nested_deidentify"></a>The `deidentify` block supports: |
| |
| * `cloud_storage_output` - |
| (Required) |
| User settable Cloud Storage bucket and folders to store de-identified files. |
| This field must be set for cloud storage deidentification. |
| The output Cloud Storage bucket must be different from the input bucket. |
| De-identified files will overwrite files in the output path. |
| Form of: gs://bucket/folder/ or gs://bucket |
| |
| * `file_types_to_transform` - |
| (Optional) |
| List of user-specified file type groups to transform. If specified, only the files with these filetypes will be transformed. |
| If empty, all supported files will be transformed. Supported types may be automatically added over time. |
| If a file type is set in this field that isn't supported by the Deidentify action then the job will fail and will not be successfully created/started. |
| Each value may be one of: `IMAGE`, `TEXT_FILE`, `CSV`, `TSV`. |
| |
| * `transformation_config` - |
| (Optional) |
| User specified deidentify templates and configs for structured, unstructured, and image files. |
| Structure is [documented below](#nested_transformation_config). |
| |
| * `transformation_details_storage_config` - |
| (Optional) |
| Config for storing transformation details. |
| Structure is [documented below](#nested_transformation_details_storage_config). |
| |
| |
| <a name="nested_transformation_config"></a>The `transformation_config` block supports: |
| |
| * `deidentify_template` - |
| (Optional) |
| If this template is specified, it will serve as the default de-identify template. |
| |
| * `structured_deidentify_template` - |
| (Optional) |
| If this template is specified, it will serve as the de-identify template for structured content such as delimited files and tables. |
| |
| * `image_redact_template` - |
| (Optional) |
| If this template is specified, it will serve as the de-identify template for images. |
| |
| <a name="nested_transformation_details_storage_config"></a>The `transformation_details_storage_config` block supports: |
| |
| * `table` - |
| (Required) |
| The BigQuery table in which to store the output. |
| Structure is [documented below](#nested_table). |
| |
| |
| <a name="nested_table"></a>The `table` block supports: |
| |
| * `dataset_id` - |
| (Required) |
| The ID of the dataset containing this table. |
| |
| * `project_id` - |
| (Required) |
| The ID of the project containing this table. |
| |
| * `table_id` - |
| (Optional) |
| The ID of the table. The ID must contain only letters (a-z, |
| A-Z), numbers (0-9), or underscores (_). The maximum length |
| is 1,024 characters. |
| |
| ## Attributes Reference |
| |
| In addition to the arguments listed above, the following computed attributes are exported: |
| |
| * `id` - an identifier for the resource with format `{{parent}}/jobTriggers/{{name}}` |
| |
| * `name` - |
| The resource name of the job trigger. Set by the server. |
| |
| * `create_time` - |
| The creation timestamp of an inspectTemplate. Set by the server. |
| |
| * `update_time` - |
| The last update timestamp of an inspectTemplate. Set by the server. |
| |
| * `last_run_time` - |
| The timestamp of the last time this trigger executed. |
| |
| |
| ## Timeouts |
| |
| This resource provides the following |
| [Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: |
| |
| - `create` - Default is 20 minutes. |
| - `update` - Default is 20 minutes. |
| - `delete` - Default is 20 minutes. |
| |
| ## Import |
| |
| |
| JobTrigger can be imported using any of these accepted formats: |
| |
| * `{{parent}}/jobTriggers/{{name}}` |
| * `{{parent}}/{{name}}` |
| |
| |
| In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import JobTrigger using one of the formats above. For example: |
| |
| ```tf |
| import { |
| id = "{{parent}}/jobTriggers/{{name}}" |
| to = google_data_loss_prevention_job_trigger.default |
| } |
| ``` |
| |
| When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), JobTrigger can be imported using one of the formats above. For example: |
| |
| ``` |
| $ terraform import google_data_loss_prevention_job_trigger.default {{parent}}/jobTriggers/{{name}} |
| $ terraform import google_data_loss_prevention_job_trigger.default {{parent}}/{{name}} |
| ``` |