diff --git a/.github/workflows/tf-acc-test.yaml b/.github/workflows/tf-acc-test.yaml index 41be99f6..70501a07 100644 --- a/.github/workflows/tf-acc-test.yaml +++ b/.github/workflows/tf-acc-test.yaml @@ -22,6 +22,7 @@ jobs: TF_ACC_DNS_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_DNS_CUSTOM_ENDPOINT }} TF_ACC_LOGME_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_LOGME_CUSTOM_ENDPOINT }} TF_ACC_MARIADB_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_MARIADB_CUSTOM_ENDPOINT }} + TF_ACC_OBSERVABILITY_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_OBSERVABILITY_CUSTOM_ENDPOINT }} TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT }} TF_ACC_POSTGRESFLEX_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_POSTGRESFLEX_CUSTOM_ENDPOINT }} TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT }} diff --git a/MIGRATION.md b/MIGRATION.md index 60e0230a..22b14f09 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -128,36 +128,39 @@ resource "stackit_logme_credential" "example-credential" { ## Available resources -| Community provider | Official provider | Import available? | `id` format | Notes| -|-|-|-|-|-| -| stackit_argus_credential | stackit_argus_credential | :x: | | | -| stackit_argus_instance | stackit_argus_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_argus_job | stackit_argus_scrapeconfig | :white_check_mark: | [project_id],[instance_id],[name] | | -| stackit_elasticsearch_credential | | | | Service deprecated | -| stackit_elasticsearch_instance | | | | Service deprecated | -| stackit_kubernetes_cluster | stackit_ske_cluster | :white_check_mark: | [project_id],[name] | | -| stackit_kubernetes_project | stackit_ske_project | :white_check_mark: | [project_id] | | -| stackit_load_balancer | stackit_loadbalancer | :white_check_mark: | [project_id],[name] | | -| stackit_logme_credential | stackit_logme_credential | :white_check_mark: | [project_id],[instance_id],[credential_id] | | -| stackit_logme_instance | stackit_logme_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_mariadb_credential | stackit_mariadb_credential | :white_check_mark: | [project_id],[instance_id],[credential_id] | | -| stackit_mariadb_instance | stackit_mariadb_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_mongodb_flex_instance | stackit_mongodbflex_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_mongodb_flex_user | stackit_mongodbflex_user | :warning: | [project_id],[instance_id],[user_id] | `password` field will be empty | -| stackit_object_storage_bucket | stackit_objectstorage_bucket | :white_check_mark: | [project_id],[name] | | -| stackit_object_storage_credential | stackit_objectstorage_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | -| stackit_object_storage_credentials_group | stackit_objectstorage_credentials_group | :white_check_mark: | [project_id],[credentials_group_id] | | -| stackit_object_storage_project | | | | Resource deprecated | -| stackit_opensearch_credential | stackit_opensearch_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | -| stackit_opensearch_instance | stackit_opensearch_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_postgres_credential | stackit_postgresql_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | -| stackit_postgres_flex_instance | stackit_postgresflex_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_postgres_flex_user | stackit_postgresflex_user | :warning: | [project_id],[instance_id],[user_id] | `password` field will be empty | -| stackit_postgres_instance | stackit_postgresql_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_project | stackit_resourcemanager_project | :white_check_mark: | [container_id] | | -| stackit_rabbitmq_credential | stackit_rabbitmq_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | -| stackit_rabbitmq_instance | stackit_rabbitmq_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_redis_credential | stackit_redis_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | -| stackit_redis_instance | stackit_redis_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_secrets_manager_instance | stackit_secretsmanager_instance | :white_check_mark: | [project_id],[instance_id] | | -| stackit_secrets_manager_user | stackit_secretsmanager_user | :warning: | [project_id],[instance_id],[user_id] | `password` field will be empty | +| Community provider | Official provider | Import available? | `id` format | Notes | +|------------------------------------------|-----------------------------------------|-|-|------------------------------------------------------------------| +| stackit_argus_credential | stackit_observability_credential | :x: | | Service deprecated, use stackit_observability_credential instead | +| stackit_argus_instance | stackit_observability_instance | :white_check_mark: | [project_id],[instance_id] | Service deprecated, use stackit_observability_instance instead | +| stackit_argus_job | stackit_observability_scrapeconfig | :white_check_mark: | [project_id],[instance_id],[name] | Service deprecated, use stackit_observability_scrapeconfig instead | +| stackit_elasticsearch_credential | | | | Service deprecated | +| stackit_elasticsearch_instance | | | | Service deprecated | +| stackit_kubernetes_cluster | stackit_ske_cluster | :white_check_mark: | [project_id],[name] | | +| stackit_kubernetes_project | stackit_ske_project | :white_check_mark: | [project_id] | | +| stackit_load_balancer | stackit_loadbalancer | :white_check_mark: | [project_id],[name] | | +| stackit_logme_credential | stackit_logme_credential | :white_check_mark: | [project_id],[instance_id],[credential_id] | | +| stackit_logme_instance | stackit_logme_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_mariadb_credential | stackit_mariadb_credential | :white_check_mark: | [project_id],[instance_id],[credential_id] | | +| stackit_mariadb_instance | stackit_mariadb_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_mongodb_flex_instance | stackit_mongodbflex_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_mongodb_flex_user | stackit_mongodbflex_user | :warning: | [project_id],[instance_id],[user_id] | `password` field will be empty | +| stackit_object_storage_bucket | stackit_objectstorage_bucket | :white_check_mark: | [project_id],[name] | | +| stackit_object_storage_credential | stackit_objectstorage_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | +| stackit_object_storage_credentials_group | stackit_objectstorage_credentials_group | :white_check_mark: | [project_id],[credentials_group_id] | | +| stackit_object_storage_project | | | | Resource deprecated | +| stackit_observability_credential | stackit_observability_credential | :x: | | | +| stackit_observability_instance | stackit_observability_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_observability_job | stackit_observability_scrapeconfig | :white_check_mark: | [project_id],[instance_id],[name] | | +| stackit_opensearch_credential | stackit_opensearch_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | +| stackit_opensearch_instance | stackit_opensearch_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_postgres_credential | stackit_postgresql_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | +| stackit_postgres_flex_instance | stackit_postgresflex_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_postgres_flex_user | stackit_postgresflex_user | :warning: | [project_id],[instance_id],[user_id] | `password` field will be empty | +| stackit_postgres_instance | stackit_postgresql_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_project | stackit_resourcemanager_project | :white_check_mark: | [container_id] | | +| stackit_rabbitmq_credential | stackit_rabbitmq_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | +| stackit_rabbitmq_instance | stackit_rabbitmq_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_redis_credential | stackit_redis_credential | :white_check_mark: | [project_id],[credentials_group_id],[credential_id] | | +| stackit_redis_instance | stackit_redis_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_secrets_manager_instance | stackit_secretsmanager_instance | :white_check_mark: | [project_id],[instance_id] | | +| stackit_secrets_manager_user | stackit_secretsmanager_user | :warning: | [project_id],[instance_id],[user_id] | `password` field will be empty | diff --git a/docs/data-sources/argus_instance.md b/docs/data-sources/argus_instance.md index 4787ec31..450f481d 100644 --- a/docs/data-sources/argus_instance.md +++ b/docs/data-sources/argus_instance.md @@ -4,12 +4,15 @@ page_title: "stackit_argus_instance Data Source - stackit" subcategory: "" description: |- Argus instance data source schema. Must have a region specified in the provider configuration. + !> The stackit_argus_instance data source has been deprecated and will be removed after February 26th 2025. Please use stackit_observability_instance instead, which offers the exact same functionality. --- # stackit_argus_instance (Data Source) Argus instance data source schema. Must have a `region` specified in the provider configuration. +!> The `stackit_argus_instance` data source has been deprecated and will be removed after February 26th 2025. Please use `stackit_observability_instance` instead, which offers the exact same functionality. + ## Example Usage ```terraform @@ -37,7 +40,7 @@ data "stackit_argus_instance" "example" { - `grafana_initial_admin_user` (String) Specifies an initial Grafana admin username. - `grafana_public_read_access` (Boolean) If true, anyone can access Grafana dashboards without logging in. - `grafana_url` (String) Specifies Grafana URL. -- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`instance_id`". +- `id` (String) Terraform's internal data source ID. It is structured as "`project_id`,`instance_id`". - `is_updatable` (Boolean) Specifies if the instance can be updated. - `jaeger_traces_url` (String) - `jaeger_ui_url` (String) diff --git a/docs/data-sources/argus_scrapeconfig.md b/docs/data-sources/argus_scrapeconfig.md index 8ba26b03..f7def53d 100644 --- a/docs/data-sources/argus_scrapeconfig.md +++ b/docs/data-sources/argus_scrapeconfig.md @@ -4,12 +4,15 @@ page_title: "stackit_argus_scrapeconfig Data Source - stackit" subcategory: "" description: |- Argus scrape config data source schema. Must have a region specified in the provider configuration. + !> The stackit_argus_scrapeconfig data source has been deprecated and will be removed after February 26th 2025. Please use stackit_observability_scrapeconfig instead, which offers the exact same functionality. --- # stackit_argus_scrapeconfig (Data Source) Argus scrape config data source schema. Must have a `region` specified in the provider configuration. +!> The `stackit_argus_scrapeconfig` data source has been deprecated and will be removed after February 26th 2025. Please use `stackit_observability_scrapeconfig` instead, which offers the exact same functionality. + ## Example Usage ```terraform diff --git a/docs/data-sources/observability_instance.md b/docs/data-sources/observability_instance.md new file mode 100644 index 00000000..dd6c9e64 --- /dev/null +++ b/docs/data-sources/observability_instance.md @@ -0,0 +1,151 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_observability_instance Data Source - stackit" +subcategory: "" +description: |- + Observability instance data source schema. Must have a region specified in the provider configuration. +--- + +# stackit_observability_instance (Data Source) + +Observability instance data source schema. Must have a `region` specified in the provider configuration. + +## Example Usage + +```terraform +data "stackit_observability_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) The Observability instance ID. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `acl` (Set of String) The access control list for this instance. Each entry is an IP address range that is permitted to access, in CIDR notation. +- `alert_config` (Attributes) Alert configuration for the instance. (see [below for nested schema](#nestedatt--alert_config)) +- `alerting_url` (String) Specifies Alerting URL. +- `dashboard_url` (String) Specifies Observability instance dashboard URL. +- `grafana_initial_admin_password` (String, Sensitive) Specifies an initial Grafana admin password. +- `grafana_initial_admin_user` (String) Specifies an initial Grafana admin username. +- `grafana_public_read_access` (Boolean) If true, anyone can access Grafana dashboards without logging in. +- `grafana_url` (String) Specifies Grafana URL. +- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`instance_id`". +- `is_updatable` (Boolean) Specifies if the instance can be updated. +- `jaeger_traces_url` (String) +- `jaeger_ui_url` (String) +- `logs_push_url` (String) Specifies URL for pushing logs. +- `logs_url` (String) Specifies Logs URL. +- `metrics_push_url` (String) Specifies URL for pushing metrics. +- `metrics_retention_days` (Number) Specifies for how many days the raw metrics are kept. +- `metrics_retention_days_1h_downsampling` (Number) Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled). +- `metrics_retention_days_5m_downsampling` (Number) Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled). +- `metrics_url` (String) Specifies metrics URL. +- `name` (String) The name of the Observability instance. +- `otlp_traces_url` (String) +- `parameters` (Map of String) Additional parameters. +- `plan_id` (String) The Observability plan ID. +- `plan_name` (String) Specifies the Observability plan. E.g. `Monitoring-Medium-EU01`. +- `targets_url` (String) Specifies Targets URL. +- `zipkin_spans_url` (String) + + +### Nested Schema for `alert_config` + +Read-Only: + +- `global` (Attributes) Global configuration for the alerts. (see [below for nested schema](#nestedatt--alert_config--global)) +- `receivers` (Attributes List) List of alert receivers. (see [below for nested schema](#nestedatt--alert_config--receivers)) +- `route` (Attributes) The route for the alert. (see [below for nested schema](#nestedatt--alert_config--route)) + + +### Nested Schema for `alert_config.global` + +Read-Only: + +- `opsgenie_api_key` (String, Sensitive) The API key for OpsGenie. +- `opsgenie_api_url` (String) The host to send OpsGenie API requests to. Must be a valid URL +- `resolve_timeout` (String) The default value used by alertmanager if the alert does not include EndsAt. After this time passes, it can declare the alert as resolved if it has not been updated. This has no impact on alerts from Prometheus, as they always include EndsAt. +- `smtp_auth_identity` (String) SMTP authentication information. Must be a valid email address +- `smtp_auth_password` (String, Sensitive) SMTP Auth using LOGIN and PLAIN. +- `smtp_auth_username` (String) SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server. +- `smtp_from` (String) The default SMTP From header field. Must be a valid email address +- `smtp_smart_host` (String) The default SMTP smarthost used for sending emails, including port number. Port number usually is 25, or 587 for SMTP over TLS (sometimes referred to as STARTTLS). + + + +### Nested Schema for `alert_config.receivers` + +Read-Only: + +- `email_configs` (Attributes List) List of email configurations. (see [below for nested schema](#nestedatt--alert_config--receivers--email_configs)) +- `name` (String) Name of the receiver. +- `opsgenie_configs` (Attributes List) List of OpsGenie configurations. (see [below for nested schema](#nestedatt--alert_config--receivers--opsgenie_configs)) +- `webhooks_configs` (Attributes List) List of Webhooks configurations. (see [below for nested schema](#nestedatt--alert_config--receivers--webhooks_configs)) + + +### Nested Schema for `alert_config.receivers.email_configs` + +Read-Only: + +- `auth_identity` (String) SMTP authentication information. Must be a valid email address +- `auth_password` (String) SMTP authentication password. +- `auth_username` (String) SMTP authentication username. +- `from` (String) The sender email address. Must be a valid email address +- `smart_host` (String) The SMTP host through which emails are sent. +- `to` (String) The email address to send notifications to. Must be a valid email address + + + +### Nested Schema for `alert_config.receivers.opsgenie_configs` + +Read-Only: + +- `api_key` (String) The API key for OpsGenie. +- `api_url` (String) The host to send OpsGenie API requests to. Must be a valid URL +- `tags` (String) Comma separated list of tags attached to the notifications. + + + +### Nested Schema for `alert_config.receivers.webhooks_configs` + +Read-Only: + +- `ms_teams` (Boolean) Microsoft Teams webhooks require special handling, set this to true if the webhook is for Microsoft Teams. +- `url` (String) The endpoint to send HTTP POST requests to. Must be a valid URL + + + + +### Nested Schema for `alert_config.route` + +Read-Only: + +- `group_by` (List of String) The labels by which incoming alerts are grouped together. For example, multiple alerts coming in for cluster=A and alertname=LatencyHigh would be batched into a single group. To aggregate by all possible labels use the special value '...' as the sole label name, for example: group_by: ['...']. This effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. +- `group_interval` (String) How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent. (Usually ~5m or more.) +- `group_wait` (String) How long to initially wait to send a notification for a group of alerts. Allows to wait for an inhibiting alert to arrive or collect more initial alerts for the same group. (Usually ~0s to few minutes.) . +- `match` (Map of String) A set of equality matchers an alert has to fulfill to match the node. +- `match_regex` (Map of String) A set of regex-matchers an alert has to fulfill to match the node. +- `receiver` (String) The name of the receiver to route the alerts to. +- `repeat_interval` (String) How long to wait before sending a notification again if it has already been sent successfully for an alert. (Usually ~3h or more). +- `routes` (Attributes List) List of child routes. (see [below for nested schema](#nestedatt--alert_config--route--routes)) + + +### Nested Schema for `alert_config.route.routes` + +Read-Only: + +- `group_by` (List of String) The labels by which incoming alerts are grouped together. For example, multiple alerts coming in for cluster=A and alertname=LatencyHigh would be batched into a single group. To aggregate by all possible labels use the special value '...' as the sole label name, for example: group_by: ['...']. This effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. +- `group_interval` (String) How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent. (Usually ~5m or more.) +- `group_wait` (String) How long to initially wait to send a notification for a group of alerts. Allows to wait for an inhibiting alert to arrive or collect more initial alerts for the same group. (Usually ~0s to few minutes.) +- `match` (Map of String) A set of equality matchers an alert has to fulfill to match the node. +- `match_regex` (Map of String) A set of regex-matchers an alert has to fulfill to match the node. +- `receiver` (String) The name of the receiver to route the alerts to. +- `repeat_interval` (String) How long to wait before sending a notification again if it has already been sent successfully for an alert. (Usually ~3h or more). diff --git a/docs/data-sources/observability_scrapeconfig.md b/docs/data-sources/observability_scrapeconfig.md new file mode 100644 index 00000000..77ce88dc --- /dev/null +++ b/docs/data-sources/observability_scrapeconfig.md @@ -0,0 +1,67 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_observability_scrapeconfig Data Source - stackit" +subcategory: "" +description: |- + Observability scrape config data source schema. Must have a region specified in the provider configuration. +--- + +# stackit_observability_scrapeconfig (Data Source) + +Observability scrape config data source schema. Must have a `region` specified in the provider configuration. + +## Example Usage + +```terraform +data "stackit_observability_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + job_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) Observability instance ID to which the scraping job is associated. +- `name` (String) Specifies the name of the scraping job +- `project_id` (String) STACKIT project ID to which the scraping job is associated. + +### Read-Only + +- `basic_auth` (Attributes) A basic authentication block. (see [below for nested schema](#nestedatt--basic_auth)) +- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`instance_id`,`name`". +- `metrics_path` (String) Specifies the job scraping url path. +- `saml2` (Attributes) A SAML2 configuration block. (see [below for nested schema](#nestedatt--saml2)) +- `sample_limit` (Number) Specifies the scrape sample limit. +- `scheme` (String) Specifies the http scheme. +- `scrape_interval` (String) Specifies the scrape interval as duration string. +- `scrape_timeout` (String) Specifies the scrape timeout as duration string. +- `targets` (Attributes List) The targets list (specified by the static config). (see [below for nested schema](#nestedatt--targets)) + + +### Nested Schema for `basic_auth` + +Read-Only: + +- `password` (String, Sensitive) Specifies basic auth password. +- `username` (String) Specifies basic auth username. + + + +### Nested Schema for `saml2` + +Read-Only: + +- `enable_url_parameters` (Boolean) Specifies if URL parameters are enabled + + + +### Nested Schema for `targets` + +Read-Only: + +- `labels` (Map of String) Specifies labels. +- `urls` (List of String) Specifies target URLs. diff --git a/docs/data-sources/postgresql_credential.md b/docs/data-sources/postgresql_credential.md index 06443482..e934819e 100644 --- a/docs/data-sources/postgresql_credential.md +++ b/docs/data-sources/postgresql_credential.md @@ -4,7 +4,7 @@ page_title: "stackit_postgresql_credential Data Source - stackit" subcategory: "" description: |- PostgreSQL credential data source schema. Must have a region specified in the provider configuration. - !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackit_postgresflex_user instead. For more details, check + !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackitpostgresflexuser instead. For more details, check https://docs.stackit.cloud/stackit/en/bring-your-data-to-stackit-postgresql-flex-138347648.html --- # stackit_postgresql_credential (Data Source) diff --git a/docs/data-sources/postgresql_instance.md b/docs/data-sources/postgresql_instance.md index 846a9054..944cc752 100644 --- a/docs/data-sources/postgresql_instance.md +++ b/docs/data-sources/postgresql_instance.md @@ -4,7 +4,7 @@ page_title: "stackit_postgresql_instance Data Source - stackit" subcategory: "" description: |- PostgreSQL instance data source schema. Must have a region specified in the provider configuration. - !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackit_postgresflex_instance instead. For more details, check + !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackitpostgresflexinstance instead. For more details, check https://docs.stackit.cloud/stackit/en/bring-your-data-to-stackit-postgresql-flex-138347648.html --- # stackit_postgresql_instance (Data Source) diff --git a/docs/data-sources/resourcemanager_project.md b/docs/data-sources/resourcemanager_project.md index 4946912f..e02fa572 100644 --- a/docs/data-sources/resourcemanager_project.md +++ b/docs/data-sources/resourcemanager_project.md @@ -3,7 +3,7 @@ page_title: "stackit_resourcemanager_project Data Source - stackit" subcategory: "" description: |- - Resource Manager project data source schema. To identify the project, you need to provider either project_id or container_id. If you provide both, project_id will be used. + Resource Manager project data source schema. To identify the project, you need to provider either projectid or containerid. If you provide both, project_id will be used. --- # stackit_resourcemanager_project (Data Source) diff --git a/docs/guides/vault_secrets_manager.md b/docs/guides/vault_secrets_manager.md index 19f8ad60..c6e8bf39 100644 --- a/docs/guides/vault_secrets_manager.md +++ b/docs/guides/vault_secrets_manager.md @@ -51,10 +51,10 @@ This guide outlines the process of utilizing the HashiCorp Vault provider alongs } ``` -5. **Define Terraform Resource (Example: Argus Monitoring Instance)** +5. **Define Terraform Resource (Example: Observability Monitoring Instance)** ```hcl - resource "stackit_argus_instance" "example" { + resource "stackit_observability_instance" "example" { project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" name = "example-instance" plan_name = "Monitoring-Medium-EU01" @@ -71,7 +71,7 @@ This guide outlines the process of utilizing the HashiCorp Vault provider alongs delete_all_versions = true data_json = jsonencode( { - grafana_password = stackit_argus_instance.example.grafana_initial_admin_password, + grafana_password = stackit_observability_instance.example.grafana_initial_admin_password, other_secret = ..., } ) @@ -80,4 +80,4 @@ This guide outlines the process of utilizing the HashiCorp Vault provider alongs ## Note -This example can be adapted for various resources within the provider as well as any other Secret the user wants to set in the Secrets Manager instance. Adapting this examples means replacing the Argus Monitoring Grafana password with the appropriate value. \ No newline at end of file +This example can be adapted for various resources within the provider as well as any other Secret the user wants to set in the Secrets Manager instance. Adapting this examples means replacing the Observability Monitoring Grafana password with the appropriate value. \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index a0dec609..a1dbeb43 100644 --- a/docs/index.md +++ b/docs/index.md @@ -140,7 +140,7 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de ### Optional -- `argus_custom_endpoint` (String) Custom endpoint for the Argus service +- `argus_custom_endpoint` (String, Deprecated) Custom endpoint for the Argus service - `authorization_custom_endpoint` (String) Custom endpoint for the Membership service - `credentials_path` (String) Path of JSON from where the credentials are read. Takes precedence over the env var `STACKIT_CREDENTIALS_PATH`. Default value is `~/.stackit/credentials.json`. - `dns_custom_endpoint` (String) Custom endpoint for the DNS service @@ -152,6 +152,7 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de - `mariadb_custom_endpoint` (String) Custom endpoint for the MariaDB service - `mongodbflex_custom_endpoint` (String) Custom endpoint for the MongoDB Flex service - `objectstorage_custom_endpoint` (String) Custom endpoint for the Object Storage service +- `observability_custom_endpoint` (String) Custom endpoint for the Observability service - `opensearch_custom_endpoint` (String) Custom endpoint for the OpenSearch service - `postgresflex_custom_endpoint` (String) Custom endpoint for the PostgresFlex service - `postgresql_custom_endpoint` (String) Custom endpoint for the PostgreSQL service diff --git a/docs/resources/argus_credential.md b/docs/resources/argus_credential.md index 6872de80..b947e3cf 100644 --- a/docs/resources/argus_credential.md +++ b/docs/resources/argus_credential.md @@ -4,12 +4,15 @@ page_title: "stackit_argus_credential Resource - stackit" subcategory: "" description: |- Argus credential resource schema. Must have a region specified in the provider configuration. + !> The stackit_argus_credential resource has been deprecated and will be removed after February 26th 2025. Please use stackit_observability_credential instead, which offers the exact same functionality. --- # stackit_argus_credential (Resource) Argus credential resource schema. Must have a `region` specified in the provider configuration. +!> The `stackit_argus_credential` resource has been deprecated and will be removed after February 26th 2025. Please use `stackit_observability_credential` instead, which offers the exact same functionality. + ## Example Usage ```terraform diff --git a/docs/resources/argus_instance.md b/docs/resources/argus_instance.md index acbfb441..94ee24cd 100644 --- a/docs/resources/argus_instance.md +++ b/docs/resources/argus_instance.md @@ -4,12 +4,15 @@ page_title: "stackit_argus_instance Resource - stackit" subcategory: "" description: |- Argus instance resource schema. Must have a region specified in the provider configuration. + !> The stackit_argus_instance resource has been deprecated and will be removed after February 26th 2025. Please use stackit_observability_instance instead, which offers the exact same functionality. --- # stackit_argus_instance (Resource) Argus instance resource schema. Must have a `region` specified in the provider configuration. +!> The `stackit_argus_instance` resource has been deprecated and will be removed after February 26th 2025. Please use `stackit_observability_instance` instead, which offers the exact same functionality. + ## Example Usage ```terraform diff --git a/docs/resources/argus_scrapeconfig.md b/docs/resources/argus_scrapeconfig.md index 317ef1e9..ecdf23ed 100644 --- a/docs/resources/argus_scrapeconfig.md +++ b/docs/resources/argus_scrapeconfig.md @@ -4,12 +4,15 @@ page_title: "stackit_argus_scrapeconfig Resource - stackit" subcategory: "" description: |- Argus scrape config resource schema. Must have a region specified in the provider configuration. + !> The stackit_argus_scrapeconfig resource has been deprecated and will be removed after February 26th 2025. Please use stackit_observability_scrapeconfig instead, which offers the exact same functionality. --- # stackit_argus_scrapeconfig (Resource) Argus scrape config resource schema. Must have a `region` specified in the provider configuration. +!> The `stackit_argus_scrapeconfig` resource has been deprecated and will be removed after February 26th 2025. Please use `stackit_observability_scrapeconfig` instead, which offers the exact same functionality. + ## Example Usage ```terraform diff --git a/docs/resources/loadbalancer.md b/docs/resources/loadbalancer.md index ad84605c..71b1d423 100644 --- a/docs/resources/loadbalancer.md +++ b/docs/resources/loadbalancer.md @@ -8,25 +8,23 @@ description: |- To automate the creation of load balancers, OpenStack can be used to setup the supporting infrastructure. To set up the OpenStack provider, you can create a token through the STACKIT Portal, in your project's Infrastructure API page. There, the OpenStack user domain name, username, and password are generated and can be obtained. The provider can then be configured as follows: - + ```terraform terraform { - required_providers { - (...) - openstack = { - source = "terraform-provider-openstack/openstack" - } - } + required_providers { + (...) + openstack = { + source = "terraform-provider-openstack/openstack" + } + } } - provider "openstack" { - user_domain_name = "{OpenStack user domain name}" - user_name = "{OpenStack username}" - password = "{OpenStack password}" - region = "RegionOne" - auth_url = "https://keystone.api.iaas.eu01.stackit.cloud/v3" + userdomainname = "{OpenStack user domain name}" + username = "{OpenStack username}" + password = "{OpenStack password}" + region = "RegionOne" + authurl = "https://keystone.api.iaas.eu01.stackit.cloud/v3" } - - + ``` Configuring the supporting infrastructure The example below uses OpenStack to create the network, router, a public IP address and a compute instance. --- diff --git a/docs/resources/observability_credential.md b/docs/resources/observability_credential.md new file mode 100644 index 00000000..0321c31a --- /dev/null +++ b/docs/resources/observability_credential.md @@ -0,0 +1,34 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_observability_credential Resource - stackit" +subcategory: "" +description: |- + Observability credential resource schema. Must have a region specified in the provider configuration. +--- + +# stackit_observability_credential (Resource) + +Observability credential resource schema. Must have a `region` specified in the provider configuration. + +## Example Usage + +```terraform +resource "stackit_observability_credential" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) The Observability Instance ID the credential belongs to. +- `project_id` (String) STACKIT project ID to which the credential is associated. + +### Read-Only + +- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`instance_id`,`username`". +- `password` (String, Sensitive) Credential password +- `username` (String) Credential username diff --git a/docs/resources/observability_instance.md b/docs/resources/observability_instance.md new file mode 100644 index 00000000..e7b4eb36 --- /dev/null +++ b/docs/resources/observability_instance.md @@ -0,0 +1,172 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_observability_instance Resource - stackit" +subcategory: "" +description: |- + Observability instance resource schema. Must have a region specified in the provider configuration. +--- + +# stackit_observability_instance (Resource) + +Observability instance resource schema. Must have a `region` specified in the provider configuration. + +## Example Usage + +```terraform +resource "stackit_observability_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + plan_name = "Monitoring-Medium-EU01" + acl = ["1.1.1.1/32", "2.2.2.2/32"] + metrics_retention_days = 7 + metrics_retention_days_5m_downsampling = 30 + metrics_retention_days_1h_downsampling = 365 +} +``` + + +## Schema + +### Required + +- `name` (String) The name of the Observability instance. +- `plan_name` (String) Specifies the Observability plan. E.g. `Monitoring-Medium-EU01`. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Optional + +- `acl` (Set of String) The access control list for this instance. Each entry is an IP address range that is permitted to access, in CIDR notation. +- `alert_config` (Attributes) Alert configuration for the instance. (see [below for nested schema](#nestedatt--alert_config)) +- `metrics_retention_days` (Number) Specifies for how many days the raw metrics are kept. +- `metrics_retention_days_1h_downsampling` (Number) Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled). +- `metrics_retention_days_5m_downsampling` (Number) Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled). +- `parameters` (Map of String) Additional parameters. + +### Read-Only + +- `alerting_url` (String) Specifies Alerting URL. +- `dashboard_url` (String) Specifies Observability instance dashboard URL. +- `grafana_initial_admin_password` (String, Sensitive) Specifies an initial Grafana admin password. +- `grafana_initial_admin_user` (String) Specifies an initial Grafana admin username. +- `grafana_public_read_access` (Boolean) If true, anyone can access Grafana dashboards without logging in. +- `grafana_url` (String) Specifies Grafana URL. +- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`instance_id`". +- `instance_id` (String) The Observability instance ID. +- `is_updatable` (Boolean) Specifies if the instance can be updated. +- `jaeger_traces_url` (String) +- `jaeger_ui_url` (String) +- `logs_push_url` (String) Specifies URL for pushing logs. +- `logs_url` (String) Specifies Logs URL. +- `metrics_push_url` (String) Specifies URL for pushing metrics. +- `metrics_url` (String) Specifies metrics URL. +- `otlp_traces_url` (String) +- `plan_id` (String) The Observability plan ID. +- `targets_url` (String) Specifies Targets URL. +- `zipkin_spans_url` (String) + + +### Nested Schema for `alert_config` + +Required: + +- `receivers` (Attributes List) List of alert receivers. (see [below for nested schema](#nestedatt--alert_config--receivers)) +- `route` (Attributes) Route configuration for the alerts. (see [below for nested schema](#nestedatt--alert_config--route)) + +Optional: + +- `global` (Attributes) Global configuration for the alerts. (see [below for nested schema](#nestedatt--alert_config--global)) + + +### Nested Schema for `alert_config.receivers` + +Required: + +- `name` (String) Name of the receiver. + +Optional: + +- `email_configs` (Attributes List) List of email configurations. (see [below for nested schema](#nestedatt--alert_config--receivers--email_configs)) +- `opsgenie_configs` (Attributes List) List of OpsGenie configurations. (see [below for nested schema](#nestedatt--alert_config--receivers--opsgenie_configs)) +- `webhooks_configs` (Attributes List) List of Webhooks configurations. (see [below for nested schema](#nestedatt--alert_config--receivers--webhooks_configs)) + + +### Nested Schema for `alert_config.receivers.email_configs` + +Optional: + +- `auth_identity` (String) SMTP authentication information. Must be a valid email address +- `auth_password` (String) SMTP authentication password. +- `auth_username` (String) SMTP authentication username. +- `from` (String) The sender email address. Must be a valid email address +- `smart_host` (String) The SMTP host through which emails are sent. +- `to` (String) The email address to send notifications to. Must be a valid email address + + + +### Nested Schema for `alert_config.receivers.opsgenie_configs` + +Optional: + +- `api_key` (String) The API key for OpsGenie. +- `api_url` (String) The host to send OpsGenie API requests to. Must be a valid URL +- `tags` (String) Comma separated list of tags attached to the notifications. + + + +### Nested Schema for `alert_config.receivers.webhooks_configs` + +Optional: + +- `ms_teams` (Boolean) Microsoft Teams webhooks require special handling, set this to true if the webhook is for Microsoft Teams. +- `url` (String) The endpoint to send HTTP POST requests to. Must be a valid URL + + + + +### Nested Schema for `alert_config.route` + +Required: + +- `receiver` (String) The name of the receiver to route the alerts to. + +Optional: + +- `group_by` (List of String) The labels by which incoming alerts are grouped together. For example, multiple alerts coming in for cluster=A and alertname=LatencyHigh would be batched into a single group. To aggregate by all possible labels use the special value '...' as the sole label name, for example: group_by: ['...']. This effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. +- `group_interval` (String) How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent. (Usually ~5m or more.) +- `group_wait` (String) How long to initially wait to send a notification for a group of alerts. Allows to wait for an inhibiting alert to arrive or collect more initial alerts for the same group. (Usually ~0s to few minutes.) +- `match` (Map of String) A set of equality matchers an alert has to fulfill to match the node. +- `match_regex` (Map of String) A set of regex-matchers an alert has to fulfill to match the node. +- `repeat_interval` (String) How long to wait before sending a notification again if it has already been sent successfully for an alert. (Usually ~3h or more). +- `routes` (Attributes List) List of child routes. (see [below for nested schema](#nestedatt--alert_config--route--routes)) + + +### Nested Schema for `alert_config.route.routes` + +Required: + +- `receiver` (String) The name of the receiver to route the alerts to. + +Optional: + +- `group_by` (List of String) The labels by which incoming alerts are grouped together. For example, multiple alerts coming in for cluster=A and alertname=LatencyHigh would be batched into a single group. To aggregate by all possible labels use the special value '...' as the sole label name, for example: group_by: ['...']. This effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. +- `group_interval` (String) How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent. (Usually ~5m or more.) +- `group_wait` (String) How long to initially wait to send a notification for a group of alerts. Allows to wait for an inhibiting alert to arrive or collect more initial alerts for the same group. (Usually ~0s to few minutes.) +- `match` (Map of String) A set of equality matchers an alert has to fulfill to match the node. +- `match_regex` (Map of String) A set of regex-matchers an alert has to fulfill to match the node. +- `repeat_interval` (String) How long to wait before sending a notification again if it has already been sent successfully for an alert. (Usually ~3h or more). + + + + +### Nested Schema for `alert_config.global` + +Optional: + +- `opsgenie_api_key` (String, Sensitive) The API key for OpsGenie. +- `opsgenie_api_url` (String) The host to send OpsGenie API requests to. Must be a valid URL +- `resolve_timeout` (String) The default value used by alertmanager if the alert does not include EndsAt. After this time passes, it can declare the alert as resolved if it has not been updated. This has no impact on alerts from Prometheus, as they always include EndsAt. +- `smtp_auth_identity` (String) SMTP authentication information. Must be a valid email address +- `smtp_auth_password` (String, Sensitive) SMTP Auth using LOGIN and PLAIN. +- `smtp_auth_username` (String) SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server. +- `smtp_from` (String) The default SMTP From header field. Must be a valid email address +- `smtp_smart_host` (String) The default SMTP smarthost used for sending emails, including port number in format `host:port` (eg. `smtp.example.com:587`). Port number usually is 25, or 587 for SMTP over TLS (sometimes referred to as STARTTLS). diff --git a/docs/resources/observability_scrapeconfig.md b/docs/resources/observability_scrapeconfig.md new file mode 100644 index 00000000..af24708b --- /dev/null +++ b/docs/resources/observability_scrapeconfig.md @@ -0,0 +1,85 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_observability_scrapeconfig Resource - stackit" +subcategory: "" +description: |- + Observability scrape config resource schema. Must have a region specified in the provider configuration. +--- + +# stackit_observability_scrapeconfig (Resource) + +Observability scrape config resource schema. Must have a `region` specified in the provider configuration. + +## Example Usage + +```terraform +resource "stackit_observability_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-job" + metrics_path = "/my-metrics" + saml2 = { + enable_url_parameters = true + } + targets = [ + { + urls = ["url1", "urls2"] + labels = { + "url1" = "dev" + } + } + ] +} +``` + + +## Schema + +### Required + +- `instance_id` (String) Observability instance ID to which the scraping job is associated. +- `metrics_path` (String) Specifies the job scraping url path. E.g. `/metrics`. +- `name` (String) Specifies the name of the scraping job. +- `project_id` (String) STACKIT project ID to which the scraping job is associated. +- `targets` (Attributes List) The targets list (specified by the static config). (see [below for nested schema](#nestedatt--targets)) + +### Optional + +- `basic_auth` (Attributes) A basic authentication block. (see [below for nested schema](#nestedatt--basic_auth)) +- `saml2` (Attributes) A SAML2 configuration block. (see [below for nested schema](#nestedatt--saml2)) +- `sample_limit` (Number) Specifies the scrape sample limit. Upper limit depends on the service plan. Defaults to `5000`. +- `scheme` (String) Specifies the http scheme. Defaults to `https`. +- `scrape_interval` (String) Specifies the scrape interval as duration string. Defaults to `5m`. +- `scrape_timeout` (String) Specifies the scrape timeout as duration string. Defaults to `2m`. + +### Read-Only + +- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`instance_id`,`name`". + + +### Nested Schema for `targets` + +Required: + +- `urls` (List of String) Specifies target URLs. + +Optional: + +- `labels` (Map of String) Specifies labels. + + + +### Nested Schema for `basic_auth` + +Required: + +- `password` (String, Sensitive) Specifies basic auth password. +- `username` (String) Specifies basic auth username. + + + +### Nested Schema for `saml2` + +Optional: + +- `enable_url_parameters` (Boolean) Specifies if URL parameters are enabled. Defaults to `true` diff --git a/docs/resources/postgresql_credential.md b/docs/resources/postgresql_credential.md index 46da8df6..38f31539 100644 --- a/docs/resources/postgresql_credential.md +++ b/docs/resources/postgresql_credential.md @@ -4,7 +4,7 @@ page_title: "stackit_postgresql_credential Resource - stackit" subcategory: "" description: |- PostgreSQL credential resource schema. Must have a region specified in the provider configuration. - !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackit_postgresflex_user instead. For more details, check + !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackitpostgresflexuser instead. For more details, check https://docs.stackit.cloud/stackit/en/bring-your-data-to-stackit-postgresql-flex-138347648.html --- # stackit_postgresql_credential (Resource) diff --git a/docs/resources/postgresql_instance.md b/docs/resources/postgresql_instance.md index e9a2144c..5e6120ea 100644 --- a/docs/resources/postgresql_instance.md +++ b/docs/resources/postgresql_instance.md @@ -4,7 +4,7 @@ page_title: "stackit_postgresql_instance Resource - stackit" subcategory: "" description: |- PostgreSQL instance resource schema. Must have a region specified in the provider configuration. - !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackit_postgresflex_instance instead. Check on how to backup and restore an instance from PostgreSQL to PostgreSQL Flex, then import the resource to Terraform using an "import" block () + !> The STACKIT PostgreSQL service has reached its end of support on June 30th 2024. Resources of this type have stopped working since then. Use stackitpostgresflexinstance instead. Check https://docs.stackit.cloud/stackit/en/bring-your-data-to-stackit-postgresql-flex-138347648.html on how to backup and restore an instance from PostgreSQL to PostgreSQL Flex, then import the resource to Terraform using an "import" block (https://developer.hashicorp.com/terraform/language/import) --- # stackit_postgresql_instance (Resource) diff --git a/examples/data-sources/stackit_observability_instance/data-source.tf b/examples/data-sources/stackit_observability_instance/data-source.tf new file mode 100644 index 00000000..9606cf85 --- /dev/null +++ b/examples/data-sources/stackit_observability_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_observability_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_observability_scrapeconfig/data-source.tf b/examples/data-sources/stackit_observability_scrapeconfig/data-source.tf new file mode 100644 index 00000000..c7c7d387 --- /dev/null +++ b/examples/data-sources/stackit_observability_scrapeconfig/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_observability_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + job_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_observability_credential/resource.tf b/examples/resources/stackit_observability_credential/resource.tf new file mode 100644 index 00000000..ee03d5e3 --- /dev/null +++ b/examples/resources/stackit_observability_credential/resource.tf @@ -0,0 +1,4 @@ +resource "stackit_observability_credential" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_observability_instance/resource.tf b/examples/resources/stackit_observability_instance/resource.tf new file mode 100644 index 00000000..fe3677a6 --- /dev/null +++ b/examples/resources/stackit_observability_instance/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_observability_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + plan_name = "Monitoring-Medium-EU01" + acl = ["1.1.1.1/32", "2.2.2.2/32"] + metrics_retention_days = 7 + metrics_retention_days_5m_downsampling = 30 + metrics_retention_days_1h_downsampling = 365 +} diff --git a/examples/resources/stackit_observability_scrapeconfig/resource.tf b/examples/resources/stackit_observability_scrapeconfig/resource.tf new file mode 100644 index 00000000..71f20da1 --- /dev/null +++ b/examples/resources/stackit_observability_scrapeconfig/resource.tf @@ -0,0 +1,17 @@ +resource "stackit_observability_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-job" + metrics_path = "/my-metrics" + saml2 = { + enable_url_parameters = true + } + targets = [ + { + urls = ["url1", "urls2"] + labels = { + "url1" = "dev" + } + } + ] +} diff --git a/go.mod b/go.mod index 9b43c6aa..7a914006 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.17.0 github.com/stackitcloud/stackit-sdk-go/services/mongodbflex v0.14.0 github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.10.0 + github.com/stackitcloud/stackit-sdk-go/services/observability v0.1.0 github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.16.0 github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.15.0 github.com/stackitcloud/stackit-sdk-go/services/postgresql v0.12.1 diff --git a/go.sum b/go.sum index d03edbef..a6d60a63 100644 --- a/go.sum +++ b/go.sum @@ -166,6 +166,8 @@ github.com/stackitcloud/stackit-sdk-go/services/mongodbflex v0.14.0 h1:FaJYVfha+ github.com/stackitcloud/stackit-sdk-go/services/mongodbflex v0.14.0/go.mod h1:iFerEzGmkg6R13ldFUyHUWHm0ac9cS4ftTDLhP0k/dU= github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.10.0 h1:tn1MD1nu+gYEbT3lslRI6BrapKwuvHv5Wi2Zw9uVPPc= github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.10.0/go.mod h1:dkVMJI88eJ3Xs0ZV15r4tUpgitUGJXcvrX3RL4Zq2bQ= +github.com/stackitcloud/stackit-sdk-go/services/observability v0.1.0 h1:VdxYMJqGKUvk7/l2b83Z/bB0FUYuFbELK/ipTicJ5QY= +github.com/stackitcloud/stackit-sdk-go/services/observability v0.1.0/go.mod h1:cSnBZGdtx4jnn9HEefkQHDrm8+PuS0NCWvukVfuwP/8= github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.16.0 h1:EEjhfIFiC4TsaFKB4mkxz6NFz4InfVs5STmWc+oEjgQ= github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.16.0/go.mod h1:ZecMIf9oYj2DGZqWh93l97WdVaRdLl+tW5Fq3YKGwBM= github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.15.0 h1:05wQYhO37Z4y8xAD+4OTYz6rYu6eJEmwMfCG4tjETEc= diff --git a/stackit/internal/core/core.go b/stackit/internal/core/core.go index b13ac3d1..d67aaab4 100644 --- a/stackit/internal/core/core.go +++ b/stackit/internal/core/core.go @@ -26,6 +26,7 @@ type ProviderData struct { MariaDBCustomEndpoint string MongoDBFlexCustomEndpoint string ObjectStorageCustomEndpoint string + ObservabilityCustomEndpoint string OpenSearchCustomEndpoint string PostgresFlexCustomEndpoint string PostgreSQLCustomEndpoint string diff --git a/stackit/internal/services/argus/credential/resource.go b/stackit/internal/services/argus/credential/resource.go index 54be3413..0056bbe6 100644 --- a/stackit/internal/services/argus/credential/resource.go +++ b/stackit/internal/services/argus/credential/resource.go @@ -87,8 +87,15 @@ func (r *credentialResource) Configure(ctx context.Context, req resource.Configu } func (r *credentialResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Argus credential resource schema. Must have a `region` specified in the provider configuration.", + "deprecation_message": "The `stackit_argus_credential` resource has been deprecated and will be removed after February 26th 2025. " + + "Please use `stackit_observability_credential` instead, which offers the exact same functionality.", + } resp.Schema = schema.Schema{ - Description: "Argus credential resource schema. Must have a `region` specified in the provider configuration.", + Description: fmt.Sprintf("%s\n%s", descriptions["main"], descriptions["deprecation_message"]), + MarkdownDescription: fmt.Sprintf("%s\n\n!> %s", descriptions["main"], descriptions["deprecation_message"]), + DeprecationMessage: descriptions["deprecation_message"], Attributes: map[string]schema.Attribute{ "id": schema.StringAttribute{ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`,`username`\".", diff --git a/stackit/internal/services/argus/instance/datasource.go b/stackit/internal/services/argus/instance/datasource.go index 36d3809c..a63f379d 100644 --- a/stackit/internal/services/argus/instance/datasource.go +++ b/stackit/internal/services/argus/instance/datasource.go @@ -75,11 +75,18 @@ func (d *instanceDataSource) Configure(ctx context.Context, req datasource.Confi // Schema defines the schema for the data source. func (d *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Argus instance data source schema. Must have a `region` specified in the provider configuration.", + "deprecation_message": "The `stackit_argus_instance` data source has been deprecated and will be removed after February 26th 2025. " + + "Please use `stackit_observability_instance` instead, which offers the exact same functionality.", + } resp.Schema = schema.Schema{ - Description: "Argus instance data source schema. Must have a `region` specified in the provider configuration.", + Description: fmt.Sprintf("%s\n%s", descriptions["main"], descriptions["deprecation_message"]), + MarkdownDescription: fmt.Sprintf("%s\n\n!> %s", descriptions["main"], descriptions["deprecation_message"]), + DeprecationMessage: descriptions["deprecation_message"], Attributes: map[string]schema.Attribute{ "id": schema.StringAttribute{ - Description: "Terraform's internal data source. ID. It is structured as \"`project_id`,`instance_id`\".", + Description: "Terraform's internal data source ID. It is structured as \"`project_id`,`instance_id`\".", Computed: true, }, "project_id": schema.StringAttribute{ diff --git a/stackit/internal/services/argus/instance/resource.go b/stackit/internal/services/argus/instance/resource.go index 34e4833c..d0e1a247 100644 --- a/stackit/internal/services/argus/instance/resource.go +++ b/stackit/internal/services/argus/instance/resource.go @@ -375,8 +375,15 @@ func (r *instanceResource) Configure(ctx context.Context, req resource.Configure // Schema defines the schema for the resource. func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Argus instance resource schema. Must have a `region` specified in the provider configuration.", + "deprecation_message": "The `stackit_argus_instance` resource has been deprecated and will be removed after February 26th 2025. " + + "Please use `stackit_observability_instance` instead, which offers the exact same functionality.", + } resp.Schema = schema.Schema{ - Description: "Argus instance resource schema. Must have a `region` specified in the provider configuration.", + Description: fmt.Sprintf("%s\n%s", descriptions["main"], descriptions["deprecation_message"]), + MarkdownDescription: fmt.Sprintf("%s\n\n!> %s", descriptions["main"], descriptions["deprecation_message"]), + DeprecationMessage: descriptions["deprecation_message"], Attributes: map[string]schema.Attribute{ "id": schema.StringAttribute{ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`\".", diff --git a/stackit/internal/services/argus/scrapeconfig/datasource.go b/stackit/internal/services/argus/scrapeconfig/datasource.go index bc394f6e..5bdb3e89 100644 --- a/stackit/internal/services/argus/scrapeconfig/datasource.go +++ b/stackit/internal/services/argus/scrapeconfig/datasource.go @@ -76,8 +76,15 @@ func (d *scrapeConfigDataSource) Configure(ctx context.Context, req datasource.C // Schema defines the schema for the data source. func (d *scrapeConfigDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Argus scrape config data source schema. Must have a `region` specified in the provider configuration.", + "deprecation_message": "The `stackit_argus_scrapeconfig` data source has been deprecated and will be removed after February 26th 2025. " + + "Please use `stackit_observability_scrapeconfig` instead, which offers the exact same functionality.", + } resp.Schema = schema.Schema{ - Description: "Argus scrape config data source schema. Must have a `region` specified in the provider configuration.", + Description: fmt.Sprintf("%s\n%s", descriptions["main"], descriptions["deprecation_message"]), + MarkdownDescription: fmt.Sprintf("%s\n\n!> %s", descriptions["main"], descriptions["deprecation_message"]), + DeprecationMessage: descriptions["deprecation_message"], Attributes: map[string]schema.Attribute{ "id": schema.StringAttribute{ Description: "Terraform's internal data source. ID. It is structured as \"`project_id`,`instance_id`,`name`\".", diff --git a/stackit/internal/services/argus/scrapeconfig/resource.go b/stackit/internal/services/argus/scrapeconfig/resource.go index 051df8e0..64ee3162 100644 --- a/stackit/internal/services/argus/scrapeconfig/resource.go +++ b/stackit/internal/services/argus/scrapeconfig/resource.go @@ -151,8 +151,15 @@ func (r *scrapeConfigResource) Configure(ctx context.Context, req resource.Confi // Schema defines the schema for the resource. func (r *scrapeConfigResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Argus scrape config resource schema. Must have a `region` specified in the provider configuration.", + "deprecation_message": "The `stackit_argus_scrapeconfig` resource has been deprecated and will be removed after February 26th 2025. " + + "Please use `stackit_observability_scrapeconfig` instead, which offers the exact same functionality.", + } resp.Schema = schema.Schema{ - Description: "Argus scrape config resource schema. Must have a `region` specified in the provider configuration.", + Description: fmt.Sprintf("%s\n%s", descriptions["main"], descriptions["deprecation_message"]), + MarkdownDescription: fmt.Sprintf("%s\n\n!> %s", descriptions["main"], descriptions["deprecation_message"]), + DeprecationMessage: descriptions["deprecation_message"], Attributes: map[string]schema.Attribute{ "id": schema.StringAttribute{ Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`,`name`\".", diff --git a/stackit/internal/services/observability/credential/resource.go b/stackit/internal/services/observability/credential/resource.go new file mode 100644 index 00000000..0cdab8b0 --- /dev/null +++ b/stackit/internal/services/observability/credential/resource.go @@ -0,0 +1,252 @@ +package observability + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + "github.com/stackitcloud/stackit-sdk-go/services/observability" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &credentialResource{} + _ resource.ResourceWithConfigure = &credentialResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` + ProjectId types.String `tfsdk:"project_id"` + InstanceId types.String `tfsdk:"instance_id"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` +} + +// NewCredentialResource is a helper function to simplify the provider implementation. +func NewCredentialResource() resource.Resource { + return &credentialResource{} +} + +// credentialResource is the resource implementation. +type credentialResource struct { + client *observability.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_observability_credential" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *observability.APIClient + var err error + if providerData.ObservabilityCustomEndpoint != "" { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ObservabilityCustomEndpoint), + ) + } else { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err)) + return + } + + r.client = apiClient + tflog.Info(ctx, "Observability credential client configured") +} + +func (r *credentialResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Observability credential resource schema. Must have a `region` specified in the provider configuration.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`,`username`\".", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the credential is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "The Observability Instance ID the credential belongs to.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "username": schema.StringAttribute{ + Description: "Credential username", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "password": schema.StringAttribute{ + Description: "Credential password", + Computed: true, + Sensitive: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *credentialResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + + got, err := r.client.CreateCredentials(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credential", fmt.Sprintf("Calling API: %v", err)) + return + } + err = mapFields(got.Credentials, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credential", fmt.Sprintf("Processing API payload: %v", err)) + return + } + diags = resp.State.Set(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Observability credential created") +} + +func mapFields(r *observability.Credentials, model *Model) error { + if r == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + var userName string + if model.Username.ValueString() != "" { + userName = model.Username.ValueString() + } else if r.Username != nil { + userName = *r.Username + } else { + return fmt.Errorf("username id not present") + } + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + userName, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.Username = types.StringPointerValue(r.Username) + model.Password = types.StringPointerValue(r.Password) + return nil +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userName := model.Username.ValueString() + _, err := r.client.GetCredentials(ctx, instanceId, projectId, userName).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credential", fmt.Sprintf("Calling API: %v", err)) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Observability credential read") +} + +func (r *credentialResource) Update(ctx context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating credential", "Credential can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *credentialResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userName := model.Username.ValueString() + _, err := r.client.DeleteCredentials(ctx, instanceId, projectId, userName).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credential", fmt.Sprintf("Calling API: %v", err)) + return + } + tflog.Info(ctx, "Observability credential deleted") +} diff --git a/stackit/internal/services/observability/credential/resource_test.go b/stackit/internal/services/observability/credential/resource_test.go new file mode 100644 index 00000000..34ace309 --- /dev/null +++ b/stackit/internal/services/observability/credential/resource_test.go @@ -0,0 +1,77 @@ +package observability + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/observability" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *observability.Credentials + expected Model + isValid bool + }{ + { + "ok", + &observability.Credentials{ + Username: utils.Ptr("username"), + Password: utils.Ptr("password"), + }, + Model{ + Id: types.StringValue("pid,iid,username"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + Username: types.StringValue("username"), + Password: types.StringValue("password"), + }, + true, + }, + { + "response_nil_fail", + nil, + Model{}, + false, + }, + { + "response_fields_nil_fail", + &observability.Credentials{ + Password: nil, + Username: nil, + }, + Model{}, + false, + }, + { + "no_resource_id", + &observability.Credentials{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/internal/services/observability/instance/datasource.go b/stackit/internal/services/observability/instance/datasource.go new file mode 100644 index 00000000..b2d64534 --- /dev/null +++ b/stackit/internal/services/observability/instance/datasource.go @@ -0,0 +1,435 @@ +package observability + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + "github.com/stackitcloud/stackit-sdk-go/services/observability" + "github.com/stackitcloud/stackit-sdk-go/services/observability/wait" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *observability.APIClient +} + +// Metadata returns the data source type name. +func (d *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_observability_instance" +} + +func (d *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + var apiClient *observability.APIClient + var err error + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData)) + return + } + + if providerData.ObservabilityCustomEndpoint != "" { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ObservabilityCustomEndpoint), + ) + } else { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the data source configuration", err)) + return + } + d.client = apiClient + tflog.Info(ctx, "Observability instance client configured") +} + +// Schema defines the schema for the data source. +func (d *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Observability instance data source schema. Must have a `region` specified in the provider configuration.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal data source. ID. It is structured as \"`project_id`,`instance_id`\".", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the instance is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "The Observability instance ID.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The name of the Observability instance.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(300), + }, + }, + "plan_name": schema.StringAttribute{ + Description: "Specifies the Observability plan. E.g. `Monitoring-Medium-EU01`.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(200), + }, + }, + "plan_id": schema.StringAttribute{ + Description: "The Observability plan ID.", + Computed: true, + Validators: []validator.String{ + validate.UUID(), + }, + }, + "parameters": schema.MapAttribute{ + Description: "Additional parameters.", + Computed: true, + ElementType: types.StringType, + }, + "dashboard_url": schema.StringAttribute{ + Description: "Specifies Observability instance dashboard URL.", + Computed: true, + }, + "is_updatable": schema.BoolAttribute{ + Description: "Specifies if the instance can be updated.", + Computed: true, + }, + "grafana_public_read_access": schema.BoolAttribute{ + Description: "If true, anyone can access Grafana dashboards without logging in.", + Computed: true, + }, + "grafana_url": schema.StringAttribute{ + Description: "Specifies Grafana URL.", + Computed: true, + }, + "grafana_initial_admin_user": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin username.", + Computed: true, + }, + "grafana_initial_admin_password": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin password.", + Computed: true, + Sensitive: true, + }, + "metrics_retention_days": schema.Int64Attribute{ + Description: "Specifies for how many days the raw metrics are kept.", + Computed: true, + }, + "metrics_retention_days_5m_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled).", + Computed: true, + }, + "metrics_retention_days_1h_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled).", + Computed: true, + }, + "metrics_url": schema.StringAttribute{ + Description: "Specifies metrics URL.", + Computed: true, + }, + "metrics_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing metrics.", + Computed: true, + }, + "targets_url": schema.StringAttribute{ + Description: "Specifies Targets URL.", + Computed: true, + }, + "alerting_url": schema.StringAttribute{ + Description: "Specifies Alerting URL.", + Computed: true, + }, + "logs_url": schema.StringAttribute{ + Description: "Specifies Logs URL.", + Computed: true, + }, + "logs_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing logs.", + Computed: true, + }, + "jaeger_traces_url": schema.StringAttribute{ + Computed: true, + }, + "jaeger_ui_url": schema.StringAttribute{ + Computed: true, + }, + "otlp_traces_url": schema.StringAttribute{ + Computed: true, + }, + "zipkin_spans_url": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.SetAttribute{ + Description: "The access control list for this instance. Each entry is an IP address range that is permitted to access, in CIDR notation.", + ElementType: types.StringType, + Computed: true, + }, + "alert_config": schema.SingleNestedAttribute{ + Description: "Alert configuration for the instance.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "receivers": schema.ListNestedAttribute{ + Description: "List of alert receivers.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "Name of the receiver.", + Computed: true, + }, + "email_configs": schema.ListNestedAttribute{ + Description: "List of email configurations.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "auth_identity": schema.StringAttribute{ + Description: "SMTP authentication information. Must be a valid email address", + Computed: true, + }, + "auth_password": schema.StringAttribute{ + Description: "SMTP authentication password.", + Computed: true, + }, + "auth_username": schema.StringAttribute{ + Description: "SMTP authentication username.", + Computed: true, + }, + "from": schema.StringAttribute{ + Description: "The sender email address. Must be a valid email address", + Computed: true, + }, + "smart_host": schema.StringAttribute{ + Description: "The SMTP host through which emails are sent.", + Computed: true, + }, + "to": schema.StringAttribute{ + Description: "The email address to send notifications to. Must be a valid email address", + Computed: true, + }, + }, + }, + }, + "opsgenie_configs": schema.ListNestedAttribute{ + Description: "List of OpsGenie configurations.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "api_key": schema.StringAttribute{ + Description: "The API key for OpsGenie.", + Computed: true, + }, + "api_url": schema.StringAttribute{ + Description: "The host to send OpsGenie API requests to. Must be a valid URL", + Computed: true, + }, + "tags": schema.StringAttribute{ + Description: "Comma separated list of tags attached to the notifications.", + Computed: true, + }, + }, + }, + }, + "webhooks_configs": schema.ListNestedAttribute{ + Description: "List of Webhooks configurations.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "url": schema.StringAttribute{ + Description: "The endpoint to send HTTP POST requests to. Must be a valid URL", + Computed: true, + }, + "ms_teams": schema.BoolAttribute{ + Description: "Microsoft Teams webhooks require special handling, set this to true if the webhook is for Microsoft Teams.", + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "route": schema.SingleNestedAttribute{ + Description: "The route for the alert.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "group_by": schema.ListAttribute{ + Description: "The labels by which incoming alerts are grouped together. For example, multiple alerts coming in for cluster=A and alertname=LatencyHigh would be batched into a single group. To aggregate by all possible labels use the special value '...' as the sole label name, for example: group_by: ['...']. This effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping.", + Computed: true, + ElementType: types.StringType, + }, + "group_interval": schema.StringAttribute{ + Description: "How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent. (Usually ~5m or more.)", + Computed: true, + }, + "group_wait": schema.StringAttribute{ + Description: "How long to initially wait to send a notification for a group of alerts. Allows to wait for an inhibiting alert to arrive or collect more initial alerts for the same group. (Usually ~0s to few minutes.) .", + Computed: true, + }, + "match": schema.MapAttribute{ + Description: "A set of equality matchers an alert has to fulfill to match the node.", + Computed: true, + ElementType: types.StringType, + }, + "match_regex": schema.MapAttribute{ + Description: "A set of regex-matchers an alert has to fulfill to match the node.", + Computed: true, + ElementType: types.StringType, + }, + "receiver": schema.StringAttribute{ + Description: "The name of the receiver to route the alerts to.", + Computed: true, + }, + "repeat_interval": schema.StringAttribute{ + Description: "How long to wait before sending a notification again if it has already been sent successfully for an alert. (Usually ~3h or more).", + Computed: true, + }, + "routes": getDatasourceRouteNestedObject(), + }, + }, + "global": schema.SingleNestedAttribute{ + Description: "Global configuration for the alerts.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "opsgenie_api_key": schema.StringAttribute{ + Description: "The API key for OpsGenie.", + Computed: true, + Sensitive: true, + }, + "opsgenie_api_url": schema.StringAttribute{ + Description: "The host to send OpsGenie API requests to. Must be a valid URL", + Computed: true, + }, + "resolve_timeout": schema.StringAttribute{ + Description: "The default value used by alertmanager if the alert does not include EndsAt. After this time passes, it can declare the alert as resolved if it has not been updated. This has no impact on alerts from Prometheus, as they always include EndsAt.", + Computed: true, + }, + "smtp_auth_identity": schema.StringAttribute{ + Description: "SMTP authentication information. Must be a valid email address", + Computed: true, + }, + "smtp_auth_password": schema.StringAttribute{ + Description: "SMTP Auth using LOGIN and PLAIN.", + Computed: true, + Sensitive: true, + }, + "smtp_auth_username": schema.StringAttribute{ + Description: "SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server.", + Computed: true, + }, + "smtp_from": schema.StringAttribute{ + Description: "The default SMTP From header field. Must be a valid email address", + Computed: true, + }, + "smtp_smart_host": schema.StringAttribute{ + Description: "The default SMTP smarthost used for sending emails, including port number. Port number usually is 25, or 587 for SMTP over TLS (sometimes referred to as STARTTLS).", + Computed: true, + }, + }, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (d *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + instanceResp, err := d.client.GetInstance(ctx, instanceId, projectId).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API: %v", err)) + return + } + if instanceResp != nil && instanceResp.Status != nil && *instanceResp.Status == wait.DeleteSuccess { + resp.State.RemoveResource(ctx) + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", "Instance was deleted successfully") + return + } + + aclListResp, err := d.client.ListACL(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to list ACL data: %v", err)) + return + } + + alertConfigResp, err := d.client.GetAlertConfigs(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to get alert config: %v", err)) + return + } + + // Map response body to schema + err = mapFields(ctx, instanceResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + err = mapACLField(aclListResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API response for the ACL: %v", err)) + return + } + err = mapAlertConfigField(ctx, alertConfigResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API response for the alert config: %v", err)) + return + } + + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Observability instance read") +} diff --git a/stackit/internal/services/observability/instance/resource.go b/stackit/internal/services/observability/instance/resource.go new file mode 100644 index 00000000..ebf763dc --- /dev/null +++ b/stackit/internal/services/observability/instance/resource.go @@ -0,0 +1,2176 @@ +package observability + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/observability" + "github.com/stackitcloud/stackit-sdk-go/services/observability/wait" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate" +) + +// Currently, due to incorrect types in the API, the maximum recursion level for child routes is set to 1. +// Once this is fixed, the value should be set to 10. +const childRouteMaxRecursionLevel = 1 + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + ProjectId types.String `tfsdk:"project_id"` + InstanceId types.String `tfsdk:"instance_id"` + Name types.String `tfsdk:"name"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` + Parameters types.Map `tfsdk:"parameters"` + DashboardURL types.String `tfsdk:"dashboard_url"` + IsUpdatable types.Bool `tfsdk:"is_updatable"` + GrafanaURL types.String `tfsdk:"grafana_url"` + GrafanaPublicReadAccess types.Bool `tfsdk:"grafana_public_read_access"` + GrafanaInitialAdminPassword types.String `tfsdk:"grafana_initial_admin_password"` + GrafanaInitialAdminUser types.String `tfsdk:"grafana_initial_admin_user"` + MetricsRetentionDays types.Int64 `tfsdk:"metrics_retention_days"` + MetricsRetentionDays5mDownsampling types.Int64 `tfsdk:"metrics_retention_days_5m_downsampling"` + MetricsRetentionDays1hDownsampling types.Int64 `tfsdk:"metrics_retention_days_1h_downsampling"` + MetricsURL types.String `tfsdk:"metrics_url"` + MetricsPushURL types.String `tfsdk:"metrics_push_url"` + TargetsURL types.String `tfsdk:"targets_url"` + AlertingURL types.String `tfsdk:"alerting_url"` + LogsURL types.String `tfsdk:"logs_url"` + LogsPushURL types.String `tfsdk:"logs_push_url"` + JaegerTracesURL types.String `tfsdk:"jaeger_traces_url"` + JaegerUIURL types.String `tfsdk:"jaeger_ui_url"` + OtlpTracesURL types.String `tfsdk:"otlp_traces_url"` + ZipkinSpansURL types.String `tfsdk:"zipkin_spans_url"` + ACL types.Set `tfsdk:"acl"` + AlertConfig types.Object `tfsdk:"alert_config"` +} + +// Struct corresponding to Model.AlertConfig +type alertConfigModel struct { + GlobalConfiguration types.Object `tfsdk:"global"` + Receivers types.List `tfsdk:"receivers"` + Route types.Object `tfsdk:"route"` +} + +var alertConfigTypes = map[string]attr.Type{ + "receivers": types.ListType{ElemType: types.ObjectType{AttrTypes: receiversTypes}}, + "route": types.ObjectType{AttrTypes: routeTypes}, + "global": types.ObjectType{AttrTypes: globalConfigurationTypes}, +} + +// Struct corresponding to Model.AlertConfig.global +type globalConfigurationModel struct { + OpsgenieApiKey types.String `tfsdk:"opsgenie_api_key"` + OpsgenieApiUrl types.String `tfsdk:"opsgenie_api_url"` + ResolveTimeout types.String `tfsdk:"resolve_timeout"` + SmtpAuthIdentity types.String `tfsdk:"smtp_auth_identity"` + SmtpAuthPassword types.String `tfsdk:"smtp_auth_password"` + SmtpAuthUsername types.String `tfsdk:"smtp_auth_username"` + SmtpFrom types.String `tfsdk:"smtp_from"` + SmtpSmartHost types.String `tfsdk:"smtp_smart_host"` +} + +var globalConfigurationTypes = map[string]attr.Type{ + "opsgenie_api_key": types.StringType, + "opsgenie_api_url": types.StringType, + "resolve_timeout": types.StringType, + "smtp_auth_identity": types.StringType, + "smtp_auth_password": types.StringType, + "smtp_auth_username": types.StringType, + "smtp_from": types.StringType, + "smtp_smart_host": types.StringType, +} + +// Struct corresponding to Model.AlertConfig.route +type routeModel struct { + GroupBy types.List `tfsdk:"group_by"` + GroupInterval types.String `tfsdk:"group_interval"` + GroupWait types.String `tfsdk:"group_wait"` + Match types.Map `tfsdk:"match"` + MatchRegex types.Map `tfsdk:"match_regex"` + Receiver types.String `tfsdk:"receiver"` + RepeatInterval types.String `tfsdk:"repeat_interval"` + Routes types.List `tfsdk:"routes"` +} + +// Struct corresponding to Model.AlertConfig.route but without the recursive routes field +// This is used to map the last level of recursion of the routes field +type routeModelNoRoutes struct { + GroupBy types.List `tfsdk:"group_by"` + GroupInterval types.String `tfsdk:"group_interval"` + GroupWait types.String `tfsdk:"group_wait"` + Match types.Map `tfsdk:"match"` + MatchRegex types.Map `tfsdk:"match_regex"` + Receiver types.String `tfsdk:"receiver"` + RepeatInterval types.String `tfsdk:"repeat_interval"` +} + +var routeTypes = map[string]attr.Type{ + "group_by": types.ListType{ElemType: types.StringType}, + "group_interval": types.StringType, + "group_wait": types.StringType, + "match": types.MapType{ElemType: types.StringType}, + "match_regex": types.MapType{ElemType: types.StringType}, + "receiver": types.StringType, + "repeat_interval": types.StringType, + "routes": types.ListType{ElemType: getRouteListType()}, +} + +// Struct corresponding to Model.AlertConfig.receivers +type receiversModel struct { + Name types.String `tfsdk:"name"` + EmailConfigs types.List `tfsdk:"email_configs"` + OpsGenieConfigs types.List `tfsdk:"opsgenie_configs"` + WebHooksConfigs types.List `tfsdk:"webhooks_configs"` +} + +var receiversTypes = map[string]attr.Type{ + "name": types.StringType, + "email_configs": types.ListType{ElemType: types.ObjectType{AttrTypes: emailConfigsTypes}}, + "opsgenie_configs": types.ListType{ElemType: types.ObjectType{AttrTypes: opsgenieConfigsTypes}}, + "webhooks_configs": types.ListType{ElemType: types.ObjectType{AttrTypes: webHooksConfigsTypes}}, +} + +// Struct corresponding to Model.AlertConfig.receivers.emailConfigs +type emailConfigsModel struct { + AuthIdentity types.String `tfsdk:"auth_identity"` + AuthPassword types.String `tfsdk:"auth_password"` + AuthUsername types.String `tfsdk:"auth_username"` + From types.String `tfsdk:"from"` + Smarthost types.String `tfsdk:"smart_host"` + To types.String `tfsdk:"to"` +} + +var emailConfigsTypes = map[string]attr.Type{ + "auth_identity": types.StringType, + "auth_password": types.StringType, + "auth_username": types.StringType, + "from": types.StringType, + "smart_host": types.StringType, + "to": types.StringType, +} + +// Struct corresponding to Model.AlertConfig.receivers.opsGenieConfigs +type opsgenieConfigsModel struct { + ApiKey types.String `tfsdk:"api_key"` + ApiUrl types.String `tfsdk:"api_url"` + Tags types.String `tfsdk:"tags"` +} + +var opsgenieConfigsTypes = map[string]attr.Type{ + "api_key": types.StringType, + "api_url": types.StringType, + "tags": types.StringType, +} + +// Struct corresponding to Model.AlertConfig.receivers.webHooksConfigs +type webHooksConfigsModel struct { + Url types.String `tfsdk:"url"` + MsTeams types.Bool `tfsdk:"ms_teams"` +} + +var webHooksConfigsTypes = map[string]attr.Type{ + "url": types.StringType, + "ms_teams": types.BoolType, +} + +var routeDescriptions = map[string]string{ + "group_by": "The labels by which incoming alerts are grouped together. For example, multiple alerts coming in for cluster=A and alertname=LatencyHigh would be batched into a single group. To aggregate by all possible labels use the special value '...' as the sole label name, for example: group_by: ['...']. This effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping.", + "group_interval": "How long to wait before sending a notification about new alerts that are added to a group of alerts for which an initial notification has already been sent. (Usually ~5m or more.)", + "group_wait": "How long to initially wait to send a notification for a group of alerts. Allows to wait for an inhibiting alert to arrive or collect more initial alerts for the same group. (Usually ~0s to few minutes.)", + "match": "A set of equality matchers an alert has to fulfill to match the node.", + "match_regex": "A set of regex-matchers an alert has to fulfill to match the node.", + "receiver": "The name of the receiver to route the alerts to.", + "repeat_interval": "How long to wait before sending a notification again if it has already been sent successfully for an alert. (Usually ~3h or more).", + "routes": "List of child routes.", +} + +// getRouteListType is a helper function to return the route list attribute type. +func getRouteListType() types.ObjectType { + return getRouteListTypeAux(1, childRouteMaxRecursionLevel) +} + +// getRouteListTypeAux returns the type of the route list attribute with the given level of child routes recursion. +// The level is used to determine the current depth of the nested object. +// The limit is used to determine the maximum depth of the nested object. +// The level should be lower or equal to the limit, if higher, the function will produce a stack overflow. +func getRouteListTypeAux(level, limit int) types.ObjectType { + attributeTypes := map[string]attr.Type{ + "group_by": types.ListType{ElemType: types.StringType}, + "group_interval": types.StringType, + "group_wait": types.StringType, + "match": types.MapType{ElemType: types.StringType}, + "match_regex": types.MapType{ElemType: types.StringType}, + "receiver": types.StringType, + "repeat_interval": types.StringType, + } + + if level != limit { + attributeTypes["routes"] = types.ListType{ElemType: getRouteListTypeAux(level+1, limit)} + } + + return types.ObjectType{AttrTypes: attributeTypes} +} + +func getRouteNestedObject() schema.ListNestedAttribute { + return getRouteNestedObjectAux(false, 1, childRouteMaxRecursionLevel) +} + +func getDatasourceRouteNestedObject() schema.ListNestedAttribute { + return getRouteNestedObjectAux(true, 1, childRouteMaxRecursionLevel) +} + +// getRouteNestedObjectAux returns the nested object for the route attribute with the given level of child routes recursion. +// The isDatasource is used to determine if the route is used in a datasource schema or not. If it is a datasource, all fields are computed. +// The level is used to determine the current depth of the nested object. +// The limit is used to determine the maximum depth of the nested object. +// The level should be lower or equal to the limit, if higher, the function will produce a stack overflow. +func getRouteNestedObjectAux(isDatasource bool, level, limit int) schema.ListNestedAttribute { + attributesMap := map[string]schema.Attribute{ + "group_by": schema.ListAttribute{ + Description: routeDescriptions["group_by"], + Optional: !isDatasource, + Computed: isDatasource, + ElementType: types.StringType, + }, + "group_interval": schema.StringAttribute{ + Description: routeDescriptions["group_interval"], + Optional: !isDatasource, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "group_wait": schema.StringAttribute{ + Description: routeDescriptions["group_wait"], + Optional: !isDatasource, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "match": schema.MapAttribute{ + Description: routeDescriptions["match"], + Optional: !isDatasource, + Computed: isDatasource, + ElementType: types.StringType, + }, + "match_regex": schema.MapAttribute{ + Description: routeDescriptions["match_regex"], + Optional: !isDatasource, + Computed: isDatasource, + ElementType: types.StringType, + }, + "receiver": schema.StringAttribute{ + Description: routeDescriptions["receiver"], + Required: !isDatasource, + Computed: isDatasource, + }, + "repeat_interval": schema.StringAttribute{ + Description: routeDescriptions["repeat_interval"], + Optional: !isDatasource, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } + + if level != limit { + attributesMap["routes"] = getRouteNestedObjectAux(isDatasource, level+1, limit) + } + + return schema.ListNestedAttribute{ + Description: routeDescriptions["routes"], + Optional: !isDatasource, + Computed: isDatasource, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: attributesMap, + }, + } +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *observability.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_observability_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *observability.APIClient + var err error + if providerData.ObservabilityCustomEndpoint != "" { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ObservabilityCustomEndpoint), + ) + } else { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err)) + return + } + + r.client = apiClient + tflog.Info(ctx, "Observability instance client configured") +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Observability instance resource schema. Must have a `region` specified in the provider configuration.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`\".", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the instance is associated.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "The Observability instance ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The name of the Observability instance.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(200), + }, + }, + "plan_name": schema.StringAttribute{ + Description: "Specifies the Observability plan. E.g. `Monitoring-Medium-EU01`.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(200), + }, + }, + "plan_id": schema.StringAttribute{ + Description: "The Observability plan ID.", + Computed: true, + Validators: []validator.String{ + validate.UUID(), + }, + }, + "parameters": schema.MapAttribute{ + Description: "Additional parameters.", + Optional: true, + Computed: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.UseStateForUnknown(), + }, + }, + "dashboard_url": schema.StringAttribute{ + Description: "Specifies Observability instance dashboard URL.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "is_updatable": schema.BoolAttribute{ + Description: "Specifies if the instance can be updated.", + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, + }, + "grafana_public_read_access": schema.BoolAttribute{ + Description: "If true, anyone can access Grafana dashboards without logging in.", + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, + }, + "grafana_url": schema.StringAttribute{ + Description: "Specifies Grafana URL.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "grafana_initial_admin_user": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin username.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "grafana_initial_admin_password": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin password.", + Computed: true, + Sensitive: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "metrics_retention_days": schema.Int64Attribute{ + Description: "Specifies for how many days the raw metrics are kept.", + Optional: true, + Computed: true, + }, + "metrics_retention_days_5m_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled).", + Optional: true, + Computed: true, + }, + "metrics_retention_days_1h_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled).", + Optional: true, + Computed: true, + }, + "metrics_url": schema.StringAttribute{ + Description: "Specifies metrics URL.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "metrics_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing metrics.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "targets_url": schema.StringAttribute{ + Description: "Specifies Targets URL.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "alerting_url": schema.StringAttribute{ + Description: "Specifies Alerting URL.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "logs_url": schema.StringAttribute{ + Description: "Specifies Logs URL.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "logs_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing logs.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "jaeger_traces_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "jaeger_ui_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "otlp_traces_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "zipkin_spans_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "acl": schema.SetAttribute{ + Description: "The access control list for this instance. Each entry is an IP address range that is permitted to access, in CIDR notation.", + ElementType: types.StringType, + Optional: true, + Validators: []validator.Set{ + setvalidator.ValueStringsAre( + validate.CIDR(), + ), + }, + }, + "alert_config": schema.SingleNestedAttribute{ + Description: "Alert configuration for the instance.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "receivers": schema.ListNestedAttribute{ + Description: "List of alert receivers.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "Name of the receiver.", + Required: true, + }, + "email_configs": schema.ListNestedAttribute{ + Description: "List of email configurations.", + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "auth_identity": schema.StringAttribute{ + Description: "SMTP authentication information. Must be a valid email address", + Optional: true, + }, + "auth_password": schema.StringAttribute{ + Description: "SMTP authentication password.", + Optional: true, + }, + "auth_username": schema.StringAttribute{ + Description: "SMTP authentication username.", + Optional: true, + }, + "from": schema.StringAttribute{ + Description: "The sender email address. Must be a valid email address", + Optional: true, + }, + "smart_host": schema.StringAttribute{ + Description: "The SMTP host through which emails are sent.", + Optional: true, + }, + "to": schema.StringAttribute{ + Description: "The email address to send notifications to. Must be a valid email address", + Optional: true, + }, + }, + }, + }, + "opsgenie_configs": schema.ListNestedAttribute{ + Description: "List of OpsGenie configurations.", + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "api_key": schema.StringAttribute{ + Description: "The API key for OpsGenie.", + Optional: true, + }, + "api_url": schema.StringAttribute{ + Description: "The host to send OpsGenie API requests to. Must be a valid URL", + Optional: true, + }, + "tags": schema.StringAttribute{ + Description: "Comma separated list of tags attached to the notifications.", + Optional: true, + }, + }, + }, + }, + "webhooks_configs": schema.ListNestedAttribute{ + Description: "List of Webhooks configurations.", + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "url": schema.StringAttribute{ + Description: "The endpoint to send HTTP POST requests to. Must be a valid URL", + Optional: true, + }, + "ms_teams": schema.BoolAttribute{ + Description: "Microsoft Teams webhooks require special handling, set this to true if the webhook is for Microsoft Teams.", + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "route": schema.SingleNestedAttribute{ + Description: "Route configuration for the alerts.", + Required: true, + Attributes: map[string]schema.Attribute{ + "group_by": schema.ListAttribute{ + Description: routeDescriptions["group_by"], + Optional: true, + ElementType: types.StringType, + }, + "group_interval": schema.StringAttribute{ + Description: routeDescriptions["group_interval"], + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "group_wait": schema.StringAttribute{ + Description: routeDescriptions["group_wait"], + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "match": schema.MapAttribute{ + Description: routeDescriptions["match"], + Optional: true, + ElementType: types.StringType, + }, + "match_regex": schema.MapAttribute{ + Description: routeDescriptions["match_regex"], + Optional: true, + ElementType: types.StringType, + }, + "receiver": schema.StringAttribute{ + Description: routeDescriptions["receiver"], + Required: true, + }, + "repeat_interval": schema.StringAttribute{ + Description: routeDescriptions["repeat_interval"], + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "routes": getRouteNestedObject(), + }, + }, + "global": schema.SingleNestedAttribute{ + Description: "Global configuration for the alerts.", + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "opsgenie_api_key": schema.StringAttribute{ + Description: "The API key for OpsGenie.", + Optional: true, + Sensitive: true, + }, + "opsgenie_api_url": schema.StringAttribute{ + Description: "The host to send OpsGenie API requests to. Must be a valid URL", + Optional: true, + }, + "resolve_timeout": schema.StringAttribute{ + Description: "The default value used by alertmanager if the alert does not include EndsAt. After this time passes, it can declare the alert as resolved if it has not been updated. This has no impact on alerts from Prometheus, as they always include EndsAt.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "smtp_auth_identity": schema.StringAttribute{ + Description: "SMTP authentication information. Must be a valid email address", + Optional: true, + }, + "smtp_auth_password": schema.StringAttribute{ + Description: "SMTP Auth using LOGIN and PLAIN.", + Optional: true, + Sensitive: true, + }, + "smtp_auth_username": schema.StringAttribute{ + Description: "SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server.", + Optional: true, + }, + "smtp_from": schema.StringAttribute{ + Description: "The default SMTP From header field. Must be a valid email address", + Optional: true, + Computed: true, + }, + "smtp_smart_host": schema.StringAttribute{ + Description: "The default SMTP smarthost used for sending emails, including port number in format `host:port` (eg. `smtp.example.com:587`). Port number usually is 25, or 587 for SMTP over TLS (sometimes referred to as STARTTLS).", + Optional: true, + }, + }, + }, + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + acl := []string{} + if !(model.ACL.IsNull() || model.ACL.IsUnknown()) { + diags = model.ACL.ElementsAs(ctx, &acl, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + metricsRetentionDays := conversion.Int64ValueToPointer(model.MetricsRetentionDays) + metricsRetentionDays5mDownsampling := conversion.Int64ValueToPointer(model.MetricsRetentionDays5mDownsampling) + metricsRetentionDays1hDownsampling := conversion.Int64ValueToPointer(model.MetricsRetentionDays1hDownsampling) + + alertConfig := alertConfigModel{} + if !(model.AlertConfig.IsNull() || model.AlertConfig.IsUnknown()) { + diags = model.AlertConfig.As(ctx, &alertConfig, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + err := r.loadPlanId(ctx, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Loading service plan: %v", err)) + return + } + // Generate API request body from model + createPayload, err := toCreatePayload(&model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*createPayload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := createResp.InstanceId + ctx = tflog.SetField(ctx, "instance_id", instanceId) + waitResp, err := wait.CreateInstanceWaitHandler(ctx, r.client, *instanceId, projectId).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + + // Map response body to schema + err = mapFields(ctx, waitResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + + // Set state to instance populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Create ACL + err = updateACL(ctx, projectId, *instanceId, acl, r.client) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating ACL: %v", err)) + return + } + aclList, err := r.client.ListACL(ctx, *instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API to list ACL data: %v", err)) + return + } + + // Map response body to schema + err = mapACLField(aclList, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API response for the ACL: %v", err)) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // If any of the metrics retention days are set, set the metrics retention policy + if metricsRetentionDays != nil || metricsRetentionDays5mDownsampling != nil || metricsRetentionDays1hDownsampling != nil { + // Need to get the metrics retention policy because update endpoint is a PUT and we need to send all fields + metricsResp, err := r.client.GetMetricsStorageRetentionExecute(ctx, *instanceId, projectId) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Getting metrics retention policy: %v", err)) + return + } + + metricsRetentionPayload, err := toUpdateMetricsStorageRetentionPayload(metricsRetentionDays, metricsRetentionDays5mDownsampling, metricsRetentionDays1hDownsampling, metricsResp) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Building metrics retention policy payload: %v", err)) + return + } + + _, err = r.client.UpdateMetricsStorageRetention(ctx, *instanceId, projectId).UpdateMetricsStorageRetentionPayload(*metricsRetentionPayload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Setting metrics retention policy: %v", err)) + return + } + } + + // Get metrics retention policy after update + metricsResp, err := r.client.GetMetricsStorageRetentionExecute(ctx, *instanceId, projectId) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Getting metrics retention policy: %v", err)) + return + } + // Map response body to schema + err = mapMetricsRetentionField(metricsResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API response for the metrics retention: %v", err)) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Alert Config + if model.AlertConfig.IsUnknown() || model.AlertConfig.IsNull() { + alertConfig, err = getMockAlertConfig(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Getting mock alert config: %v", err)) + return + } + } + + alertConfigPayload, err := toUpdateAlertConfigPayload(ctx, &alertConfig) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Building alert config payload: %v", err)) + return + } + + if alertConfigPayload != nil { + _, err = r.client.UpdateAlertConfigs(ctx, *instanceId, projectId).UpdateAlertConfigsPayload(*alertConfigPayload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Setting alert config: %v", err)) + return + } + } + + // Get alert config after update + alertConfigResp, err := r.client.GetAlertConfigs(ctx, *instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Getting alert config: %v", err)) + return + } + + // Map response body to schema + err = mapAlertConfigField(ctx, alertConfigResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API response for the alert config: %v", err)) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "Observability instance created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstance(ctx, instanceId, projectId).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API: %v", err)) + return + } + if instanceResp != nil && instanceResp.Status != nil && *instanceResp.Status == wait.DeleteSuccess { + resp.State.RemoveResource(ctx) + return + } + + aclListResp, err := r.client.ListACL(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API for ACL data: %v", err)) + return + } + + metricsRetentionResp, err := r.client.GetMetricsStorageRetention(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to get metrics retention: %v", err)) + return + } + + alertConfigResp, err := r.client.GetAlertConfigs(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to get alert config: %v", err)) + return + } + + // Map response body to schema + err = mapFields(ctx, instanceResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + + // Map response body to schema + err = mapACLField(aclListResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API response for the ACL: %v", err)) + return + } + + // Map response body to schema + err = mapMetricsRetentionField(metricsRetentionResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API response for the metrics retention: %v", err)) + return + } + + // Map response body to schema + err = mapAlertConfigField(ctx, alertConfigResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API response for the alert config: %v", err)) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "Observability instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + + acl := []string{} + if !(model.ACL.IsNull() || model.ACL.IsUnknown()) { + diags = model.ACL.ElementsAs(ctx, &acl, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + metricsRetentionDays := conversion.Int64ValueToPointer(model.MetricsRetentionDays) + metricsRetentionDays5mDownsampling := conversion.Int64ValueToPointer(model.MetricsRetentionDays5mDownsampling) + metricsRetentionDays1hDownsampling := conversion.Int64ValueToPointer(model.MetricsRetentionDays1hDownsampling) + + alertConfig := alertConfigModel{} + if !(model.AlertConfig.IsNull() || model.AlertConfig.IsUnknown()) { + diags = model.AlertConfig.As(ctx, &alertConfig, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + err := r.loadPlanId(ctx, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Loading service plan: %v", err)) + return + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Update existing instance + _, err = r.client.UpdateInstance(ctx, instanceId, projectId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + waitResp, err := wait.UpdateInstanceWaitHandler(ctx, r.client, instanceId, projectId).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + + err = mapFields(ctx, waitResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Update ACL + err = updateACL(ctx, projectId, instanceId, acl, r.client) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Updating ACL: %v", err)) + return + } + aclList, err := r.client.ListACL(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Calling API to list ACL data: %v", err)) + return + } + + // Map response body to schema + err = mapACLField(aclList, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API response for the ACL: %v", err)) + return + } + + // Set state to ACL populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // If any of the metrics retention days are set, set the metrics retention policy + if metricsRetentionDays != nil || metricsRetentionDays5mDownsampling != nil || metricsRetentionDays1hDownsampling != nil { + // Need to get the metrics retention policy because update endpoint is a PUT and we need to send all fields + metricsResp, err := r.client.GetMetricsStorageRetentionExecute(ctx, instanceId, projectId) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Getting metrics retention policy: %v", err)) + return + } + + metricsRetentionPayload, err := toUpdateMetricsStorageRetentionPayload(metricsRetentionDays, metricsRetentionDays5mDownsampling, metricsRetentionDays1hDownsampling, metricsResp) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Building metrics retention policy payload: %v", err)) + return + } + _, err = r.client.UpdateMetricsStorageRetention(ctx, instanceId, projectId).UpdateMetricsStorageRetentionPayload(*metricsRetentionPayload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Setting metrics retention policy: %v", err)) + return + } + } + + // Get metrics retention policy after update + metricsResp, err := r.client.GetMetricsStorageRetentionExecute(ctx, instanceId, projectId) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Getting metrics retention policy: %v", err)) + return + } + + // Map response body to schema + err = mapMetricsRetentionField(metricsResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API response for the metrics retention %v", err)) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Alert Config + if model.AlertConfig.IsUnknown() || model.AlertConfig.IsNull() { + alertConfig, err = getMockAlertConfig(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Getting mock alert config: %v", err)) + return + } + } + + alertConfigPayload, err := toUpdateAlertConfigPayload(ctx, &alertConfig) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Building alert config payload: %v", err)) + return + } + + if alertConfigPayload != nil { + _, err = r.client.UpdateAlertConfigs(ctx, instanceId, projectId).UpdateAlertConfigsPayload(*alertConfigPayload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Setting alert config: %v", err)) + return + } + } + + // Get updated alert config + alertConfigResp, err := r.client.GetAlertConfigs(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Calling API to get alert config: %v", err)) + return + } + + // Map response body to schema + err = mapAlertConfigField(ctx, alertConfigResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API response for the alert config: %v", err)) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "Observability instance updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + + // Delete existing instance + _, err := r.client.DeleteInstance(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Calling API: %v", err)) + return + } + _, err = wait.DeleteInstanceWaitHandler(ctx, r.client, instanceId, projectId).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + + tflog.Info(ctx, "Observability instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Error importing instance", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "Observability instance state imported") +} + +func mapFields(ctx context.Context, r *observability.GetInstanceResponse, model *Model) error { + if r == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if r.Id != nil { + instanceId = *r.Id + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanName = types.StringPointerValue(r.PlanName) + model.PlanId = types.StringPointerValue(r.PlanId) + model.Name = types.StringPointerValue(r.Name) + + ps := r.Parameters + if ps == nil { + model.Parameters = types.MapNull(types.StringType) + } else { + params := make(map[string]attr.Value, len(*ps)) + for k, v := range *ps { + params[k] = types.StringValue(v) + } + res, diags := types.MapValueFrom(ctx, types.StringType, params) + if diags.HasError() { + return fmt.Errorf("parameter mapping %s", diags.Errors()) + } + model.Parameters = res + } + + model.IsUpdatable = types.BoolPointerValue(r.IsUpdatable) + model.DashboardURL = types.StringPointerValue(r.DashboardUrl) + if r.Instance != nil { + i := *r.Instance + model.GrafanaURL = types.StringPointerValue(i.GrafanaUrl) + model.GrafanaPublicReadAccess = types.BoolPointerValue(i.GrafanaPublicReadAccess) + model.GrafanaInitialAdminPassword = types.StringPointerValue(i.GrafanaAdminPassword) + model.GrafanaInitialAdminUser = types.StringPointerValue(i.GrafanaAdminUser) + model.MetricsRetentionDays = types.Int64Value(int64(*i.MetricsRetentionTimeRaw)) + model.MetricsRetentionDays5mDownsampling = types.Int64Value(int64(*i.MetricsRetentionTime5m)) + model.MetricsRetentionDays1hDownsampling = types.Int64Value(int64(*i.MetricsRetentionTime1h)) + model.MetricsURL = types.StringPointerValue(i.MetricsUrl) + model.MetricsPushURL = types.StringPointerValue(i.PushMetricsUrl) + model.TargetsURL = types.StringPointerValue(i.TargetsUrl) + model.AlertingURL = types.StringPointerValue(i.AlertingUrl) + model.LogsURL = types.StringPointerValue(i.LogsUrl) + model.LogsPushURL = types.StringPointerValue(i.LogsPushUrl) + model.JaegerTracesURL = types.StringPointerValue(i.JaegerTracesUrl) + model.JaegerUIURL = types.StringPointerValue(i.JaegerUiUrl) + model.OtlpTracesURL = types.StringPointerValue(i.OtlpTracesUrl) + model.ZipkinSpansURL = types.StringPointerValue(i.ZipkinSpansUrl) + } + + return nil +} + +func mapACLField(aclList *observability.ListACLResponse, model *Model) error { + if aclList == nil { + return fmt.Errorf("mapping ACL: nil API response") + } + + if aclList.Acl == nil || len(*aclList.Acl) == 0 { + if !(model.ACL.IsNull() || model.ACL.IsUnknown() || model.ACL.Equal(types.SetValueMust(types.StringType, []attr.Value{}))) { + model.ACL = types.SetNull(types.StringType) + } + return nil + } + + acl := []attr.Value{} + for _, cidr := range *aclList.Acl { + acl = append(acl, types.StringValue(cidr)) + } + aclTF, diags := types.SetValue(types.StringType, acl) + if diags.HasError() { + return fmt.Errorf("mapping ACL: %w", core.DiagsToError(diags)) + } + model.ACL = aclTF + return nil +} + +func mapMetricsRetentionField(r *observability.GetMetricsStorageRetentionResponse, model *Model) error { + if r == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + if r.MetricsRetentionTimeRaw == nil || r.MetricsRetentionTime5m == nil || r.MetricsRetentionTime1h == nil { + return fmt.Errorf("metrics retention time is nil") + } + + stripedMetricsRetentionDays := strings.TrimSuffix(*r.MetricsRetentionTimeRaw, "d") + metricsRetentionDays, err := strconv.ParseInt(stripedMetricsRetentionDays, 10, 64) + if err != nil { + return fmt.Errorf("parsing metrics retention days: %w", err) + } + model.MetricsRetentionDays = types.Int64Value(metricsRetentionDays) + + stripedMetricsRetentionDays5m := strings.TrimSuffix(*r.MetricsRetentionTime5m, "d") + metricsRetentionDays5m, err := strconv.ParseInt(stripedMetricsRetentionDays5m, 10, 64) + if err != nil { + return fmt.Errorf("parsing metrics retention days 5m: %w", err) + } + model.MetricsRetentionDays5mDownsampling = types.Int64Value(metricsRetentionDays5m) + + stripedMetricsRetentionDays1h := strings.TrimSuffix(*r.MetricsRetentionTime1h, "d") + metricsRetentionDays1h, err := strconv.ParseInt(stripedMetricsRetentionDays1h, 10, 64) + if err != nil { + return fmt.Errorf("parsing metrics retention days 1h: %w", err) + } + model.MetricsRetentionDays1hDownsampling = types.Int64Value(metricsRetentionDays1h) + + return nil +} + +func mapAlertConfigField(ctx context.Context, resp *observability.GetAlertConfigsResponse, model *Model) error { + if resp == nil || resp.Data == nil { + model.AlertConfig = types.ObjectNull(alertConfigTypes) + return nil + } + + if model == nil { + return fmt.Errorf("nil model") + } + + var alertConfigTF *alertConfigModel + if !(model.AlertConfig.IsNull() || model.AlertConfig.IsUnknown()) { + alertConfigTF = &alertConfigModel{} + diags := model.AlertConfig.As(ctx, &alertConfigTF, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return fmt.Errorf("mapping alert config: %w", core.DiagsToError(diags)) + } + } + + respReceivers := resp.Data.Receivers + respRoute := resp.Data.Route + respGlobalConfigs := resp.Data.Global + + receiversList, err := mapReceiversToAttributes(ctx, respReceivers) + if err != nil { + return fmt.Errorf("mapping alert config receivers: %w", err) + } + + route, err := mapRouteToAttributes(ctx, respRoute) + if err != nil { + return fmt.Errorf("mapping alert config route: %w", err) + } + + var globalConfigModel *globalConfigurationModel + if alertConfigTF != nil && !alertConfigTF.GlobalConfiguration.IsNull() && !alertConfigTF.GlobalConfiguration.IsUnknown() { + globalConfigModel = &globalConfigurationModel{} + diags := alertConfigTF.GlobalConfiguration.As(ctx, globalConfigModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return fmt.Errorf("mapping alert config: %w", core.DiagsToError(diags)) + } + } + + globalConfig, err := mapGlobalConfigToAttributes(respGlobalConfigs, globalConfigModel) + if err != nil { + return fmt.Errorf("mapping alert config global config: %w", err) + } + + alertConfig, diags := types.ObjectValue(alertConfigTypes, map[string]attr.Value{ + "receivers": receiversList, + "route": route, + "global": globalConfig, + }) + if diags.HasError() { + return fmt.Errorf("converting alert config to TF type: %w", core.DiagsToError(diags)) + } + + // Check if the alert config is equal to the mock alert config + // This is done because the Alert Config cannot be removed from the instance, but can be unset by the user in the Terraform configuration + // If the alert config is equal to the mock alert config, we will map the Alert Config to an empty object in the Terraform state + // This is done to avoid inconsistent applies or non-empty plans after applying + mockAlertConfig, err := getMockAlertConfig(ctx) + if err != nil { + return fmt.Errorf("getting mock alert config: %w", err) + } + modelMockAlertConfig, diags := types.ObjectValueFrom(ctx, alertConfigTypes, mockAlertConfig) + if diags.HasError() { + return fmt.Errorf("converting mock alert config to TF type: %w", core.DiagsToError(diags)) + } + if alertConfig.Equal(modelMockAlertConfig) { + alertConfig = types.ObjectNull(alertConfigTypes) + } + + model.AlertConfig = alertConfig + return nil +} + +// getMockAlertConfig returns a default alert config to be set in the instance if the alert config is unset in the Terraform configuration +// +// This is done because the Alert Config cannot be removed from the instance, but can be unset by the user in the Terraform configuration. +// So, we set the Alert Config in the instance to our mock configuration and +// map the Alert Config to an empty object in the Terraform state if it matches the mock alert config +func getMockAlertConfig(ctx context.Context) (alertConfigModel, error) { + mockEmailConfig, diags := types.ObjectValue(emailConfigsTypes, map[string]attr.Value{ + "to": types.StringValue("123@gmail.com"), + "smart_host": types.StringValue("smtp.gmail.com:587"), + "from": types.StringValue("xxxx@gmail.com"), + "auth_username": types.StringValue("xxxx@gmail.com"), + "auth_password": types.StringValue("xxxxxxxxx"), + "auth_identity": types.StringValue("xxxx@gmail.com"), + }) + if diags.HasError() { + return alertConfigModel{}, fmt.Errorf("mapping email config: %w", core.DiagsToError(diags)) + } + + mockEmailConfigs, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: emailConfigsTypes}, []attr.Value{ + mockEmailConfig, + }) + if diags.HasError() { + return alertConfigModel{}, fmt.Errorf("mapping email configs: %w", core.DiagsToError(diags)) + } + + mockReceiver, diags := types.ObjectValue(receiversTypes, map[string]attr.Value{ + "name": types.StringValue("email-me"), + "email_configs": mockEmailConfigs, + "opsgenie_configs": types.ListNull(types.ObjectType{AttrTypes: opsgenieConfigsTypes}), + "webhooks_configs": types.ListNull(types.ObjectType{AttrTypes: webHooksConfigsTypes}), + }) + if diags.HasError() { + return alertConfigModel{}, fmt.Errorf("mapping receiver: %w", core.DiagsToError(diags)) + } + + mockReceivers, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + mockReceiver, + }) + if diags.HasError() { + return alertConfigModel{}, fmt.Errorf("mapping receivers: %w", core.DiagsToError(diags)) + } + + mockGroupByList, diags := types.ListValueFrom(ctx, types.StringType, []attr.Value{ + types.StringValue("job"), + }) + if diags.HasError() { + return alertConfigModel{}, fmt.Errorf("mapping group by list: %w", core.DiagsToError(diags)) + } + + mockRoute, diags := types.ObjectValue(routeTypes, map[string]attr.Value{ + "receiver": types.StringValue("email-me"), + "group_by": mockGroupByList, + "group_wait": types.StringValue("30s"), + "group_interval": types.StringValue("5m"), + "repeat_interval": types.StringValue("4h"), + "match": types.MapNull(types.StringType), + "match_regex": types.MapNull(types.StringType), + "routes": types.ListNull(getRouteListType()), + }) + if diags.HasError() { + return alertConfigModel{}, fmt.Errorf("mapping route: %w", core.DiagsToError(diags)) + } + + mockGlobalConfig, diags := types.ObjectValue(globalConfigurationTypes, map[string]attr.Value{ + "opsgenie_api_key": types.StringNull(), + "opsgenie_api_url": types.StringNull(), + "resolve_timeout": types.StringValue("5m"), + "smtp_auth_identity": types.StringNull(), + "smtp_auth_password": types.StringNull(), + "smtp_auth_username": types.StringNull(), + "smtp_from": types.StringValue("observability@observability.stackit.cloud"), + "smtp_smart_host": types.StringNull(), + }) + if diags.HasError() { + return alertConfigModel{}, fmt.Errorf("mapping global config: %w", core.DiagsToError(diags)) + } + + return alertConfigModel{ + Receivers: mockReceivers, + Route: mockRoute, + GlobalConfiguration: mockGlobalConfig, + }, nil +} + +func mapGlobalConfigToAttributes(respGlobalConfigs *observability.Global, globalConfigsTF *globalConfigurationModel) (basetypes.ObjectValue, error) { + if respGlobalConfigs == nil { + return types.ObjectNull(globalConfigurationTypes), nil + } + + // This bypass is needed because these values are not returned in the API GET response + smtpSmartHost := respGlobalConfigs.SmtpSmarthost + smtpAuthIdentity := respGlobalConfigs.SmtpAuthIdentity + smtpAuthPassword := respGlobalConfigs.SmtpAuthPassword + smtpAuthUsername := respGlobalConfigs.SmtpAuthUsername + if globalConfigsTF != nil { + if respGlobalConfigs.SmtpSmarthost == nil && + !globalConfigsTF.SmtpSmartHost.IsNull() && !globalConfigsTF.SmtpSmartHost.IsUnknown() { + smtpSmartHost = utils.Ptr(globalConfigsTF.SmtpSmartHost.ValueString()) + } + if respGlobalConfigs.SmtpAuthIdentity == nil && + !globalConfigsTF.SmtpAuthIdentity.IsNull() && !globalConfigsTF.SmtpAuthIdentity.IsUnknown() { + smtpAuthIdentity = utils.Ptr(globalConfigsTF.SmtpAuthIdentity.ValueString()) + } + if respGlobalConfigs.SmtpAuthPassword == nil && + !globalConfigsTF.SmtpAuthPassword.IsNull() && !globalConfigsTF.SmtpAuthPassword.IsUnknown() { + smtpAuthPassword = utils.Ptr(globalConfigsTF.SmtpAuthPassword.ValueString()) + } + if respGlobalConfigs.SmtpAuthUsername == nil && + !globalConfigsTF.SmtpAuthUsername.IsNull() && !globalConfigsTF.SmtpAuthUsername.IsUnknown() { + smtpAuthUsername = utils.Ptr(globalConfigsTF.SmtpAuthUsername.ValueString()) + } + } + + globalConfigObject, diags := types.ObjectValue(globalConfigurationTypes, map[string]attr.Value{ + "opsgenie_api_key": types.StringPointerValue(respGlobalConfigs.OpsgenieApiKey), + "opsgenie_api_url": types.StringPointerValue(respGlobalConfigs.OpsgenieApiUrl), + "resolve_timeout": types.StringPointerValue(respGlobalConfigs.ResolveTimeout), + "smtp_from": types.StringPointerValue(respGlobalConfigs.SmtpFrom), + "smtp_auth_identity": types.StringPointerValue(smtpAuthIdentity), + "smtp_auth_password": types.StringPointerValue(smtpAuthPassword), + "smtp_auth_username": types.StringPointerValue(smtpAuthUsername), + "smtp_smart_host": types.StringPointerValue(smtpSmartHost), + }) + if diags.HasError() { + return types.ObjectNull(globalConfigurationTypes), fmt.Errorf("mapping global config: %w", core.DiagsToError(diags)) + } + + return globalConfigObject, nil +} + +func mapReceiversToAttributes(ctx context.Context, respReceivers *[]observability.Receivers) (basetypes.ListValue, error) { + if respReceivers == nil { + return types.ListNull(types.ObjectType{AttrTypes: receiversTypes}), nil + } + receiversList := []attr.Value{} + emptyList, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{}) + if diags.HasError() { + // Should not happen + return emptyList, fmt.Errorf("mapping empty list: %w", core.DiagsToError(diags)) + } + + if len(*respReceivers) == 0 { + return emptyList, nil + } + + for i := range *respReceivers { + receiver := (*respReceivers)[i] + + emailConfigList := []attr.Value{} + if receiver.EmailConfigs != nil { + for _, emailConfig := range *receiver.EmailConfigs { + emailConfigMap := map[string]attr.Value{ + "auth_identity": types.StringPointerValue(emailConfig.AuthIdentity), + "auth_password": types.StringPointerValue(emailConfig.AuthPassword), + "auth_username": types.StringPointerValue(emailConfig.AuthUsername), + "from": types.StringPointerValue(emailConfig.From), + "smart_host": types.StringPointerValue(emailConfig.Smarthost), + "to": types.StringPointerValue(emailConfig.To), + } + emailConfigModel, diags := types.ObjectValue(emailConfigsTypes, emailConfigMap) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping email config: %w", core.DiagsToError(diags)) + } + emailConfigList = append(emailConfigList, emailConfigModel) + } + } + + opsgenieConfigList := []attr.Value{} + if receiver.OpsgenieConfigs != nil { + for _, opsgenieConfig := range *receiver.OpsgenieConfigs { + opsGenieConfigMap := map[string]attr.Value{ + "api_key": types.StringPointerValue(opsgenieConfig.ApiKey), + "api_url": types.StringPointerValue(opsgenieConfig.ApiUrl), + "tags": types.StringPointerValue(opsgenieConfig.Tags), + } + opsGenieConfigModel, diags := types.ObjectValue(opsgenieConfigsTypes, opsGenieConfigMap) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping opsgenie config: %w", core.DiagsToError(diags)) + } + opsgenieConfigList = append(opsgenieConfigList, opsGenieConfigModel) + } + } + + webhooksConfigList := []attr.Value{} + if receiver.WebHookConfigs != nil { + for _, webhookConfig := range *receiver.WebHookConfigs { + webHookConfigsMap := map[string]attr.Value{ + "url": types.StringPointerValue(webhookConfig.Url), + "ms_teams": types.BoolPointerValue(webhookConfig.MsTeams), + } + webHookConfigsModel, diags := types.ObjectValue(webHooksConfigsTypes, webHookConfigsMap) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping webhooks config: %w", core.DiagsToError(diags)) + } + webhooksConfigList = append(webhooksConfigList, webHookConfigsModel) + } + } + + if receiver.Name == nil { + return emptyList, fmt.Errorf("receiver name is nil") + } + + var emailConfigs basetypes.ListValue + if len(emailConfigList) == 0 { + emailConfigs = types.ListNull(types.ObjectType{AttrTypes: emailConfigsTypes}) + } else { + emailConfigs, diags = types.ListValueFrom(ctx, types.ObjectType{AttrTypes: emailConfigsTypes}, emailConfigList) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping email configs: %w", core.DiagsToError(diags)) + } + } + + var opsGenieConfigs basetypes.ListValue + if len(opsgenieConfigList) == 0 { + opsGenieConfigs = types.ListNull(types.ObjectType{AttrTypes: opsgenieConfigsTypes}) + } else { + opsGenieConfigs, diags = types.ListValueFrom(ctx, types.ObjectType{AttrTypes: opsgenieConfigsTypes}, opsgenieConfigList) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping opsgenie configs: %w", core.DiagsToError(diags)) + } + } + + var webHooksConfigs basetypes.ListValue + if len(webhooksConfigList) == 0 { + webHooksConfigs = types.ListNull(types.ObjectType{AttrTypes: webHooksConfigsTypes}) + } else { + webHooksConfigs, diags = types.ListValueFrom(ctx, types.ObjectType{AttrTypes: webHooksConfigsTypes}, webhooksConfigList) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping webhooks configs: %w", core.DiagsToError(diags)) + } + } + + receiverMap := map[string]attr.Value{ + "name": types.StringPointerValue(receiver.Name), + "email_configs": emailConfigs, + "opsgenie_configs": opsGenieConfigs, + "webhooks_configs": webHooksConfigs, + } + + receiversModel, diags := types.ObjectValue(receiversTypes, receiverMap) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping receiver: %w", core.DiagsToError(diags)) + } + + receiversList = append(receiversList, receiversModel) + } + + returnReceiversList, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: receiversTypes}, receiversList) + if diags.HasError() { + return emptyList, fmt.Errorf("mapping receivers list: %w", core.DiagsToError(diags)) + } + return returnReceiversList, nil +} + +func mapRouteToAttributes(ctx context.Context, route *observability.Route) (attr.Value, error) { + if route == nil { + return types.ObjectNull(routeTypes), nil + } + + groupByModel, diags := types.ListValueFrom(ctx, types.StringType, route.GroupBy) + if diags.HasError() { + return types.ObjectNull(routeTypes), fmt.Errorf("mapping group by: %w", core.DiagsToError(diags)) + } + + matchModel, diags := types.MapValueFrom(ctx, types.StringType, route.Match) + if diags.HasError() { + return types.ObjectNull(routeTypes), fmt.Errorf("mapping match: %w", core.DiagsToError(diags)) + } + + matchRegexModel, diags := types.MapValueFrom(ctx, types.StringType, route.MatchRe) + if diags.HasError() { + return types.ObjectNull(routeTypes), fmt.Errorf("mapping match regex: %w", core.DiagsToError(diags)) + } + + childRoutes, err := mapChildRoutesToAttributes(ctx, route.Routes) + if err != nil { + return types.ObjectNull(routeTypes), fmt.Errorf("mapping child routes: %w", err) + } + + routeMap := map[string]attr.Value{ + "group_by": groupByModel, + "group_interval": types.StringPointerValue(route.GroupInterval), + "group_wait": types.StringPointerValue(route.GroupWait), + "match": matchModel, + "match_regex": matchRegexModel, + "receiver": types.StringPointerValue(route.Receiver), + "repeat_interval": types.StringPointerValue(route.RepeatInterval), + "routes": childRoutes, + } + + routeModel, diags := types.ObjectValue(routeTypes, routeMap) + if diags.HasError() { + return types.ObjectNull(routeTypes), fmt.Errorf("converting route to TF types: %w", core.DiagsToError(diags)) + } + + return routeModel, nil +} + +// mapChildRoutesToAttributes maps the child routes to the Terraform attributes +// This should be a recursive function to handle nested child routes +// However, the API does not currently have the correct type for the child routes +// In the future, the current implementation should be the final case of the recursive function +func mapChildRoutesToAttributes(ctx context.Context, routes *[]observability.RouteSerializer) (basetypes.ListValue, error) { + nullList := types.ListNull(getRouteListType()) + if routes == nil { + return nullList, nil + } + + routesList := []attr.Value{} + for _, route := range *routes { + groupByModel, diags := types.ListValueFrom(ctx, types.StringType, route.GroupBy) + if diags.HasError() { + return nullList, fmt.Errorf("mapping group by: %w", core.DiagsToError(diags)) + } + + matchModel, diags := types.MapValueFrom(ctx, types.StringType, route.Match) + if diags.HasError() { + return nullList, fmt.Errorf("mapping match: %w", core.DiagsToError(diags)) + } + + matchRegexModel, diags := types.MapValueFrom(ctx, types.StringType, route.MatchRe) + if diags.HasError() { + return nullList, fmt.Errorf("mapping match regex: %w", core.DiagsToError(diags)) + } + + routeMap := map[string]attr.Value{ + "group_by": groupByModel, + "group_interval": types.StringPointerValue(route.GroupInterval), + "group_wait": types.StringPointerValue(route.GroupWait), + "match": matchModel, + "match_regex": matchRegexModel, + "receiver": types.StringPointerValue(route.Receiver), + "repeat_interval": types.StringPointerValue(route.RepeatInterval), + } + + routeModel, diags := types.ObjectValue(getRouteListType().AttrTypes, routeMap) + if diags.HasError() { + return types.ListNull(getRouteListType()), fmt.Errorf("converting child route to TF types: %w", core.DiagsToError(diags)) + } + + routesList = append(routesList, routeModel) + } + + returnRoutesList, diags := types.ListValueFrom(ctx, getRouteListType(), routesList) + if diags.HasError() { + return nullList, fmt.Errorf("mapping child routes list: %w", core.DiagsToError(diags)) + } + return returnRoutesList, nil +} + +func toCreatePayload(model *Model) (*observability.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + elements := model.Parameters.Elements() + pa := make(map[string]interface{}, len(elements)) + for k := range elements { + pa[k] = elements[k].String() + } + return &observability.CreateInstancePayload{ + Name: conversion.StringValueToPointer(model.Name), + PlanId: conversion.StringValueToPointer(model.PlanId), + Parameter: &pa, + }, nil +} + +func toUpdateMetricsStorageRetentionPayload(retentionDaysRaw, retentionDays5m, retentionDays1h *int64, resp *observability.GetMetricsStorageRetentionResponse) (*observability.UpdateMetricsStorageRetentionPayload, error) { + var retentionTimeRaw string + var retentionTime5m string + var retentionTime1h string + + if resp == nil || resp.MetricsRetentionTimeRaw == nil || resp.MetricsRetentionTime5m == nil || resp.MetricsRetentionTime1h == nil { + return nil, fmt.Errorf("nil response") + } + + if retentionDaysRaw == nil { + retentionTimeRaw = *resp.MetricsRetentionTimeRaw + } else { + retentionTimeRaw = fmt.Sprintf("%dd", *retentionDaysRaw) + } + + if retentionDays5m == nil { + retentionTime5m = *resp.MetricsRetentionTime5m + } else { + retentionTime5m = fmt.Sprintf("%dd", *retentionDays5m) + } + + if retentionDays1h == nil { + retentionTime1h = *resp.MetricsRetentionTime1h + } else { + retentionTime1h = fmt.Sprintf("%dd", *retentionDays1h) + } + + return &observability.UpdateMetricsStorageRetentionPayload{ + MetricsRetentionTimeRaw: &retentionTimeRaw, + MetricsRetentionTime5m: &retentionTime5m, + MetricsRetentionTime1h: &retentionTime1h, + }, nil +} + +func updateACL(ctx context.Context, projectId, instanceId string, acl []string, client *observability.APIClient) error { + payload := observability.UpdateACLPayload{ + Acl: utils.Ptr(acl), + } + + _, err := client.UpdateACL(ctx, instanceId, projectId).UpdateACLPayload(payload).Execute() + if err != nil { + return fmt.Errorf("updating ACL: %w", err) + } + + return nil +} + +func toUpdatePayload(model *Model) (*observability.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + elements := model.Parameters.Elements() + pa := make(map[string]interface{}, len(elements)) + for k, v := range elements { + pa[k] = v.String() + } + return &observability.UpdateInstancePayload{ + Name: conversion.StringValueToPointer(model.Name), + PlanId: conversion.StringValueToPointer(model.PlanId), + Parameter: &pa, + }, nil +} + +func toUpdateAlertConfigPayload(ctx context.Context, model *alertConfigModel) (*observability.UpdateAlertConfigsPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if model.Receivers.IsNull() || model.Receivers.IsUnknown() { + return nil, fmt.Errorf("receivers in the model are null or unknown") + } + + if model.Route.IsNull() || model.Route.IsUnknown() { + return nil, fmt.Errorf("route in the model is null or unknown") + } + + var err error + + payload := observability.UpdateAlertConfigsPayload{} + + payload.Receivers, err = toReceiverPayload(ctx, model) + if err != nil { + return nil, fmt.Errorf("mapping receivers: %w", err) + } + + routeTF := routeModel{} + diags := model.Route.As(ctx, &routeTF, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, fmt.Errorf("mapping route: %w", core.DiagsToError(diags)) + } + + payload.Route, err = toRoutePayload(ctx, &routeTF) + if err != nil { + return nil, fmt.Errorf("mapping route: %w", err) + } + + if !model.GlobalConfiguration.IsNull() && !model.GlobalConfiguration.IsUnknown() { + payload.Global, err = toGlobalConfigPayload(ctx, model) + if err != nil { + return nil, fmt.Errorf("mapping global: %w", err) + } + } + + return &payload, nil +} + +func toReceiverPayload(ctx context.Context, model *alertConfigModel) (*[]observability.UpdateAlertConfigsPayloadReceiversInner, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + receiversModel := []receiversModel{} + diags := model.Receivers.ElementsAs(ctx, &receiversModel, false) + if diags.HasError() { + return nil, fmt.Errorf("mapping receivers: %w", core.DiagsToError(diags)) + } + + receivers := []observability.UpdateAlertConfigsPayloadReceiversInner{} + + for i := range receiversModel { + receiver := receiversModel[i] + receiverPayload := observability.UpdateAlertConfigsPayloadReceiversInner{ + Name: conversion.StringValueToPointer(receiver.Name), + } + + if !receiver.EmailConfigs.IsNull() && !receiver.EmailConfigs.IsUnknown() { + emailConfigs := []emailConfigsModel{} + diags := receiver.EmailConfigs.ElementsAs(ctx, &emailConfigs, false) + if diags.HasError() { + return nil, fmt.Errorf("mapping email configs: %w", core.DiagsToError(diags)) + } + payloadEmailConfigs := []observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{} + for i := range emailConfigs { + emailConfig := emailConfigs[i] + payloadEmailConfigs = append(payloadEmailConfigs, observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{ + AuthIdentity: conversion.StringValueToPointer(emailConfig.AuthIdentity), + AuthPassword: conversion.StringValueToPointer(emailConfig.AuthPassword), + AuthUsername: conversion.StringValueToPointer(emailConfig.AuthUsername), + From: conversion.StringValueToPointer(emailConfig.From), + Smarthost: conversion.StringValueToPointer(emailConfig.Smarthost), + To: conversion.StringValueToPointer(emailConfig.To), + }) + } + receiverPayload.EmailConfigs = &payloadEmailConfigs + } + + if !receiver.OpsGenieConfigs.IsNull() && !receiver.OpsGenieConfigs.IsUnknown() { + opsgenieConfigs := []opsgenieConfigsModel{} + diags := receiver.OpsGenieConfigs.ElementsAs(ctx, &opsgenieConfigs, false) + if diags.HasError() { + return nil, fmt.Errorf("mapping opsgenie configs: %w", core.DiagsToError(diags)) + } + payloadOpsGenieConfigs := []observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{} + for i := range opsgenieConfigs { + opsgenieConfig := opsgenieConfigs[i] + payloadOpsGenieConfigs = append(payloadOpsGenieConfigs, observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{ + ApiKey: conversion.StringValueToPointer(opsgenieConfig.ApiKey), + ApiUrl: conversion.StringValueToPointer(opsgenieConfig.ApiUrl), + Tags: conversion.StringValueToPointer(opsgenieConfig.Tags), + }) + } + receiverPayload.OpsgenieConfigs = &payloadOpsGenieConfigs + } + + if !receiver.WebHooksConfigs.IsNull() && !receiver.WebHooksConfigs.IsUnknown() { + receiverWebHooksConfigs := []webHooksConfigsModel{} + diags := receiver.WebHooksConfigs.ElementsAs(ctx, &receiverWebHooksConfigs, false) + if diags.HasError() { + return nil, fmt.Errorf("mapping webhooks configs: %w", core.DiagsToError(diags)) + } + payloadWebHooksConfigs := []observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner{} + for i := range receiverWebHooksConfigs { + webHooksConfig := receiverWebHooksConfigs[i] + payloadWebHooksConfigs = append(payloadWebHooksConfigs, observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner{ + Url: conversion.StringValueToPointer(webHooksConfig.Url), + MsTeams: conversion.BoolValueToPointer(webHooksConfig.MsTeams), + }) + } + receiverPayload.WebHookConfigs = &payloadWebHooksConfigs + } + + receivers = append(receivers, receiverPayload) + } + return &receivers, nil +} + +func toRoutePayload(ctx context.Context, routeTF *routeModel) (*observability.UpdateAlertConfigsPayloadRoute, error) { + if routeTF == nil { + return nil, fmt.Errorf("nil route model") + } + + var groupByPayload *[]string + var matchPayload *map[string]interface{} + var matchRegexPayload *map[string]interface{} + var childRoutesPayload *[]observability.CreateAlertConfigRoutePayloadRoutesInner + + if !routeTF.GroupBy.IsNull() && !routeTF.GroupBy.IsUnknown() { + groupByPayload = &[]string{} + diags := routeTF.GroupBy.ElementsAs(ctx, groupByPayload, false) + if diags.HasError() { + return nil, fmt.Errorf("mapping group by: %w", core.DiagsToError(diags)) + } + } + + if !routeTF.Match.IsNull() && !routeTF.Match.IsUnknown() { + matchMap, err := conversion.ToStringInterfaceMap(ctx, routeTF.Match) + if err != nil { + return nil, fmt.Errorf("mapping match: %w", err) + } + matchPayload = &matchMap + } + + if !routeTF.MatchRegex.IsNull() && !routeTF.MatchRegex.IsUnknown() { + matchRegexMap, err := conversion.ToStringInterfaceMap(ctx, routeTF.MatchRegex) + if err != nil { + return nil, fmt.Errorf("mapping match regex: %w", err) + } + matchRegexPayload = &matchRegexMap + } + + if !routeTF.Routes.IsNull() && !routeTF.Routes.IsUnknown() { + childRoutes := []routeModel{} + diags := routeTF.Routes.ElementsAs(ctx, &childRoutes, false) + if diags.HasError() { + // If there is an error, we will try to map the child routes as if they are the last child routes + // This is done because the last child routes in the recursion have a different structure (don't have the `routes` fields) + // and need to be unpacked to a different struct (routeModelNoRoutes) + lastChildRoutes := []routeModelNoRoutes{} + diags = routeTF.Routes.ElementsAs(ctx, &lastChildRoutes, true) + if diags.HasError() { + return nil, fmt.Errorf("mapping child routes: %w", core.DiagsToError(diags)) + } + for i := range lastChildRoutes { + childRoute := routeModel{ + GroupBy: lastChildRoutes[i].GroupBy, + GroupInterval: lastChildRoutes[i].GroupInterval, + GroupWait: lastChildRoutes[i].GroupWait, + Match: lastChildRoutes[i].Match, + MatchRegex: lastChildRoutes[i].MatchRegex, + Receiver: lastChildRoutes[i].Receiver, + RepeatInterval: lastChildRoutes[i].RepeatInterval, + Routes: types.ListNull(getRouteListType()), + } + childRoutes = append(childRoutes, childRoute) + } + } + + childRoutesList := []observability.CreateAlertConfigRoutePayloadRoutesInner{} + for i := range childRoutes { + childRoute := childRoutes[i] + childRoutePayload, err := toRoutePayload(ctx, &childRoute) + if err != nil { + return nil, fmt.Errorf("mapping child route: %w", err) + } + childRoutesList = append(childRoutesList, *toChildRoutePayload(childRoutePayload)) + } + + childRoutesPayload = &childRoutesList + } + + return &observability.UpdateAlertConfigsPayloadRoute{ + GroupBy: groupByPayload, + GroupInterval: conversion.StringValueToPointer(routeTF.GroupInterval), + GroupWait: conversion.StringValueToPointer(routeTF.GroupWait), + Match: matchPayload, + MatchRe: matchRegexPayload, + Receiver: conversion.StringValueToPointer(routeTF.Receiver), + RepeatInterval: conversion.StringValueToPointer(routeTF.RepeatInterval), + Routes: childRoutesPayload, + }, nil +} + +func toChildRoutePayload(in *observability.UpdateAlertConfigsPayloadRoute) *observability.CreateAlertConfigRoutePayloadRoutesInner { + if in == nil { + return nil + } + return &observability.CreateAlertConfigRoutePayloadRoutesInner{ + GroupBy: in.GroupBy, + GroupInterval: in.GroupInterval, + GroupWait: in.GroupWait, + Match: in.Match, + MatchRe: in.MatchRe, + Receiver: in.Receiver, + RepeatInterval: in.RepeatInterval, + // Routes not currently supported + } +} + +func toGlobalConfigPayload(ctx context.Context, model *alertConfigModel) (*observability.UpdateAlertConfigsPayloadGlobal, error) { + globalConfigModel := globalConfigurationModel{} + diags := model.GlobalConfiguration.As(ctx, &globalConfigModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, fmt.Errorf("mapping global configuration: %w", core.DiagsToError(diags)) + } + + return &observability.UpdateAlertConfigsPayloadGlobal{ + OpsgenieApiKey: conversion.StringValueToPointer(globalConfigModel.OpsgenieApiKey), + OpsgenieApiUrl: conversion.StringValueToPointer(globalConfigModel.OpsgenieApiUrl), + ResolveTimeout: conversion.StringValueToPointer(globalConfigModel.ResolveTimeout), + SmtpAuthIdentity: conversion.StringValueToPointer(globalConfigModel.SmtpAuthIdentity), + SmtpAuthPassword: conversion.StringValueToPointer(globalConfigModel.SmtpAuthPassword), + SmtpAuthUsername: conversion.StringValueToPointer(globalConfigModel.SmtpAuthUsername), + SmtpFrom: conversion.StringValueToPointer(globalConfigModel.SmtpFrom), + SmtpSmarthost: conversion.StringValueToPointer(globalConfigModel.SmtpSmartHost), + }, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, model *Model) error { + projectId := model.ProjectId.ValueString() + res, err := r.client.ListPlans(ctx, projectId).Execute() + if err != nil { + return err + } + + planName := model.PlanName.ValueString() + avl := "" + plans := *res.Plans + for i := range plans { + p := plans[i] + if p.Name == nil { + continue + } + if strings.EqualFold(*p.Name, planName) && p.PlanId != nil { + model.PlanId = types.StringPointerValue(p.PlanId) + break + } + avl = fmt.Sprintf("%s\n- %s", avl, *p.Name) + } + if model.PlanId.ValueString() == "" { + return fmt.Errorf("couldn't find plan_name '%s', available names are: %s", planName, avl) + } + return nil +} diff --git a/stackit/internal/services/observability/instance/resource_test.go b/stackit/internal/services/observability/instance/resource_test.go new file mode 100644 index 00000000..0c6a3aa5 --- /dev/null +++ b/stackit/internal/services/observability/instance/resource_test.go @@ -0,0 +1,1562 @@ +package observability + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/observability" +) + +func fixtureEmailConfigsModel() basetypes.ListValue { + return types.ListValueMust(types.ObjectType{AttrTypes: emailConfigsTypes}, []attr.Value{ + types.ObjectValueMust(emailConfigsTypes, map[string]attr.Value{ + "auth_identity": types.StringValue("identity"), + "auth_password": types.StringValue("password"), + "auth_username": types.StringValue("username"), + "from": types.StringValue("notification@example.com"), + "smart_host": types.StringValue("smtp.example.com"), + "to": types.StringValue("me@example.com"), + }), + }) +} + +func fixtureOpsGenieConfigsModel() basetypes.ListValue { + return types.ListValueMust(types.ObjectType{AttrTypes: opsgenieConfigsTypes}, []attr.Value{ + types.ObjectValueMust(opsgenieConfigsTypes, map[string]attr.Value{ + "api_key": types.StringValue("key"), + "tags": types.StringValue("tag"), + "api_url": types.StringValue("ops.example.com"), + }), + }) +} + +func fixtureWebHooksConfigsModel() basetypes.ListValue { + return types.ListValueMust(types.ObjectType{AttrTypes: webHooksConfigsTypes}, []attr.Value{ + types.ObjectValueMust(webHooksConfigsTypes, map[string]attr.Value{ + "url": types.StringValue("http://example.com"), + "ms_teams": types.BoolValue(true), + }), + }) +} + +func fixtureReceiverModel(emailConfigs, opsGenieConfigs, webHooksConfigs basetypes.ListValue) basetypes.ObjectValue { + return types.ObjectValueMust(receiversTypes, map[string]attr.Value{ + "name": types.StringValue("name"), + "email_configs": emailConfigs, + "opsgenie_configs": opsGenieConfigs, + "webhooks_configs": webHooksConfigs, + }) +} + +func fixtureRouteModel() basetypes.ObjectValue { + return types.ObjectValueMust(routeTypes, map[string]attr.Value{ + "group_by": types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("label1"), + types.StringValue("label2"), + }), + "group_interval": types.StringValue("1m"), + "group_wait": types.StringValue("1m"), + "match": types.MapValueMust(types.StringType, map[string]attr.Value{"key": types.StringValue("value")}), + "match_regex": types.MapValueMust(types.StringType, map[string]attr.Value{"key": types.StringValue("value")}), + "receiver": types.StringValue("name"), + "repeat_interval": types.StringValue("1m"), + // "routes": types.ListNull(getRouteListType()), + "routes": types.ListValueMust(getRouteListType(), []attr.Value{ + types.ObjectValueMust(getRouteListType().AttrTypes, map[string]attr.Value{ + "group_by": types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("label1"), + types.StringValue("label2"), + }), + "group_interval": types.StringValue("1m"), + "group_wait": types.StringValue("1m"), + "match": types.MapValueMust(types.StringType, map[string]attr.Value{"key": types.StringValue("value")}), + "match_regex": types.MapValueMust(types.StringType, map[string]attr.Value{"key": types.StringValue("value")}), + "receiver": types.StringValue("name"), + "repeat_interval": types.StringValue("1m"), + }), + }), + }) +} + +func fixtureNullRouteModel() basetypes.ObjectValue { + return types.ObjectValueMust(routeTypes, map[string]attr.Value{ + "group_by": types.ListNull(types.StringType), + "group_interval": types.StringNull(), + "group_wait": types.StringNull(), + "match": types.MapNull(types.StringType), + "match_regex": types.MapNull(types.StringType), + "receiver": types.StringNull(), + "repeat_interval": types.StringNull(), + "routes": types.ListNull(getRouteListType()), + }) +} + +func fixtureGlobalConfigModel() basetypes.ObjectValue { + return types.ObjectValueMust(globalConfigurationTypes, map[string]attr.Value{ + "opsgenie_api_key": types.StringValue("key"), + "opsgenie_api_url": types.StringValue("ops.example.com"), + "resolve_timeout": types.StringValue("1m"), + "smtp_auth_identity": types.StringValue("identity"), + "smtp_auth_username": types.StringValue("username"), + "smtp_auth_password": types.StringValue("password"), + "smtp_from": types.StringValue("me@example.com"), + "smtp_smart_host": types.StringValue("smtp.example.com:25"), + }) +} + +func fixtureNullGlobalConfigModel() basetypes.ObjectValue { + return types.ObjectValueMust(globalConfigurationTypes, map[string]attr.Value{ + "opsgenie_api_key": types.StringNull(), + "opsgenie_api_url": types.StringNull(), + "resolve_timeout": types.StringNull(), + "smtp_auth_identity": types.StringNull(), + "smtp_auth_username": types.StringNull(), + "smtp_auth_password": types.StringNull(), + "smtp_from": types.StringNull(), + "smtp_smart_host": types.StringNull(), + }) +} + +func fixtureEmailConfigsPayload() observability.CreateAlertConfigReceiverPayloadEmailConfigsInner { + return observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{ + AuthIdentity: utils.Ptr("identity"), + AuthPassword: utils.Ptr("password"), + AuthUsername: utils.Ptr("username"), + From: utils.Ptr("notification@example.com"), + Smarthost: utils.Ptr("smtp.example.com"), + To: utils.Ptr("me@example.com"), + } +} + +func fixtureOpsGenieConfigsPayload() observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner { + return observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{ + ApiKey: utils.Ptr("key"), + Tags: utils.Ptr("tag"), + ApiUrl: utils.Ptr("ops.example.com"), + } +} + +func fixtureWebHooksConfigsPayload() observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner { + return observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner{ + Url: utils.Ptr("http://example.com"), + MsTeams: utils.Ptr(true), + } +} + +func fixtureReceiverPayload(emailConfigs *[]observability.CreateAlertConfigReceiverPayloadEmailConfigsInner, opsGenieConfigs *[]observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner, webHooksConfigs *[]observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner) observability.UpdateAlertConfigsPayloadReceiversInner { + return observability.UpdateAlertConfigsPayloadReceiversInner{ + EmailConfigs: emailConfigs, + Name: utils.Ptr("name"), + OpsgenieConfigs: opsGenieConfigs, + WebHookConfigs: webHooksConfigs, + } +} + +func fixtureRoutePayload() *observability.UpdateAlertConfigsPayloadRoute { + return &observability.UpdateAlertConfigsPayloadRoute{ + GroupBy: utils.Ptr([]string{"label1", "label2"}), + GroupInterval: utils.Ptr("1m"), + GroupWait: utils.Ptr("1m"), + Match: &map[string]interface{}{"key": "value"}, + MatchRe: &map[string]interface{}{"key": "value"}, + Receiver: utils.Ptr("name"), + RepeatInterval: utils.Ptr("1m"), + Routes: &[]observability.CreateAlertConfigRoutePayloadRoutesInner{ + { + GroupBy: utils.Ptr([]string{"label1", "label2"}), + GroupInterval: utils.Ptr("1m"), + GroupWait: utils.Ptr("1m"), + Match: &map[string]interface{}{"key": "value"}, + MatchRe: &map[string]interface{}{"key": "value"}, + Receiver: utils.Ptr("name"), + RepeatInterval: utils.Ptr("1m"), + }, + }, + } +} + +func fixtureGlobalConfigPayload() *observability.UpdateAlertConfigsPayloadGlobal { + return &observability.UpdateAlertConfigsPayloadGlobal{ + OpsgenieApiKey: utils.Ptr("key"), + OpsgenieApiUrl: utils.Ptr("ops.example.com"), + ResolveTimeout: utils.Ptr("1m"), + SmtpAuthIdentity: utils.Ptr("identity"), + SmtpAuthUsername: utils.Ptr("username"), + SmtpAuthPassword: utils.Ptr("password"), + SmtpFrom: utils.Ptr("me@example.com"), + SmtpSmarthost: utils.Ptr("smtp.example.com:25"), + } +} + +func fixtureReceiverResponse(emailConfigs *[]observability.EmailConfig, opsGenieConfigs *[]observability.OpsgenieConfig, webhookConfigs *[]observability.WebHook) observability.Receivers { + return observability.Receivers{ + Name: utils.Ptr("name"), + EmailConfigs: emailConfigs, + OpsgenieConfigs: opsGenieConfigs, + WebHookConfigs: webhookConfigs, + } +} + +func fixtureEmailConfigsResponse() observability.EmailConfig { + return observability.EmailConfig{ + AuthIdentity: utils.Ptr("identity"), + AuthPassword: utils.Ptr("password"), + AuthUsername: utils.Ptr("username"), + From: utils.Ptr("notification@example.com"), + Smarthost: utils.Ptr("smtp.example.com"), + To: utils.Ptr("me@example.com"), + } +} + +func fixtureOpsGenieConfigsResponse() observability.OpsgenieConfig { + return observability.OpsgenieConfig{ + ApiKey: utils.Ptr("key"), + Tags: utils.Ptr("tag"), + ApiUrl: utils.Ptr("ops.example.com"), + } +} + +func fixtureWebHooksConfigsResponse() observability.WebHook { + return observability.WebHook{ + Url: utils.Ptr("http://example.com"), + MsTeams: utils.Ptr(true), + } +} + +func fixtureRouteResponse() *observability.Route { + return &observability.Route{ + GroupBy: utils.Ptr([]string{"label1", "label2"}), + GroupInterval: utils.Ptr("1m"), + GroupWait: utils.Ptr("1m"), + Match: &map[string]string{"key": "value"}, + MatchRe: &map[string]string{"key": "value"}, + Receiver: utils.Ptr("name"), + RepeatInterval: utils.Ptr("1m"), + Routes: &[]observability.RouteSerializer{ + { + GroupBy: utils.Ptr([]string{"label1", "label2"}), + GroupInterval: utils.Ptr("1m"), + GroupWait: utils.Ptr("1m"), + Match: &map[string]string{"key": "value"}, + MatchRe: &map[string]string{"key": "value"}, + Receiver: utils.Ptr("name"), + RepeatInterval: utils.Ptr("1m"), + }, + }, + } +} + +func fixtureGlobalConfigResponse() *observability.Global { + return &observability.Global{ + OpsgenieApiKey: utils.Ptr("key"), + OpsgenieApiUrl: utils.Ptr("ops.example.com"), + ResolveTimeout: utils.Ptr("1m"), + SmtpAuthIdentity: utils.Ptr("identity"), + SmtpAuthUsername: utils.Ptr("username"), + SmtpAuthPassword: utils.Ptr("password"), + SmtpFrom: utils.Ptr("me@example.com"), + SmtpSmarthost: utils.Ptr("smtp.example.com:25"), + } +} + +func fixtureRouteAttributeSchema(route *schema.ListNestedAttribute, isDatasource bool) map[string]schema.Attribute { + attributeMap := map[string]schema.Attribute{ + "group_by": schema.ListAttribute{ + Description: routeDescriptions["group_by"], + Optional: !isDatasource, + Computed: isDatasource, + ElementType: types.StringType, + }, + "group_interval": schema.StringAttribute{ + Description: routeDescriptions["group_interval"], + Optional: !isDatasource, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "group_wait": schema.StringAttribute{ + Description: routeDescriptions["group_wait"], + Optional: !isDatasource, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "match": schema.MapAttribute{ + Description: routeDescriptions["match"], + Optional: !isDatasource, + Computed: isDatasource, + ElementType: types.StringType, + }, + "match_regex": schema.MapAttribute{ + Description: routeDescriptions["match_regex"], + Optional: !isDatasource, + Computed: isDatasource, + ElementType: types.StringType, + }, + "receiver": schema.StringAttribute{ + Description: routeDescriptions["receiver"], + Required: !isDatasource, + Computed: isDatasource, + }, + "repeat_interval": schema.StringAttribute{ + Description: routeDescriptions["repeat_interval"], + Optional: !isDatasource, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } + if route != nil { + attributeMap["routes"] = *route + } + return attributeMap +} + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + instanceResp *observability.GetInstanceResponse + listACLResp *observability.ListACLResponse + getMetricsRetentionResp *observability.GetMetricsStorageRetentionResponse + expected Model + isValid bool + }{ + { + "default_ok", + &observability.GetInstanceResponse{ + Id: utils.Ptr("iid"), + }, + &observability.ListACLResponse{}, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringNull(), + PlanName: types.StringNull(), + Name: types.StringNull(), + Parameters: types.MapNull(types.StringType), + ACL: types.SetNull(types.StringType), + MetricsRetentionDays: types.Int64Value(60), + MetricsRetentionDays1hDownsampling: types.Int64Value(30), + MetricsRetentionDays5mDownsampling: types.Int64Value(7), + }, + true, + }, + { + "values_ok", + &observability.GetInstanceResponse{ + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + PlanName: utils.Ptr("plan1"), + PlanId: utils.Ptr("planId"), + Parameters: &map[string]string{"key": "value"}, + Instance: &observability.InstanceSensitiveData{ + MetricsRetentionTimeRaw: utils.Ptr(int64(60)), + MetricsRetentionTime1h: utils.Ptr(int64(30)), + MetricsRetentionTime5m: utils.Ptr(int64(7)), + }, + }, + &observability.ListACLResponse{ + Acl: &[]string{ + "1.1.1.1/32", + }, + Message: utils.Ptr("message"), + }, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringValue("planId"), + PlanName: types.StringValue("plan1"), + Parameters: toTerraformStringMapMust(context.Background(), map[string]string{"key": "value"}), + ACL: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("1.1.1.1/32"), + }), + MetricsRetentionDays: types.Int64Value(60), + MetricsRetentionDays1hDownsampling: types.Int64Value(30), + MetricsRetentionDays5mDownsampling: types.Int64Value(7), + }, + true, + }, + { + "values_ok_multiple_acls", + &observability.GetInstanceResponse{ + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + PlanName: utils.Ptr("plan1"), + PlanId: utils.Ptr("planId"), + Parameters: &map[string]string{"key": "value"}, + }, + &observability.ListACLResponse{ + Acl: &[]string{ + "1.1.1.1/32", + "8.8.8.8/32", + }, + Message: utils.Ptr("message"), + }, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringValue("planId"), + PlanName: types.StringValue("plan1"), + Parameters: toTerraformStringMapMust(context.Background(), map[string]string{"key": "value"}), + ACL: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("1.1.1.1/32"), + types.StringValue("8.8.8.8/32"), + }), + MetricsRetentionDays: types.Int64Value(60), + MetricsRetentionDays1hDownsampling: types.Int64Value(30), + MetricsRetentionDays5mDownsampling: types.Int64Value(7), + }, + true, + }, + { + "nullable_fields_ok", + &observability.GetInstanceResponse{ + Id: utils.Ptr("iid"), + Name: nil, + }, + &observability.ListACLResponse{ + Acl: &[]string{}, + Message: nil, + }, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringNull(), + PlanName: types.StringNull(), + Name: types.StringNull(), + Parameters: types.MapNull(types.StringType), + ACL: types.SetNull(types.StringType), + MetricsRetentionDays: types.Int64Value(60), + MetricsRetentionDays1hDownsampling: types.Int64Value(30), + MetricsRetentionDays5mDownsampling: types.Int64Value(7), + }, + true, + }, + { + "response_nil_fail", + nil, + nil, + nil, + Model{}, + false, + }, + { + "no_resource_id", + &observability.GetInstanceResponse{}, + nil, + nil, + Model{}, + false, + }, + { + "empty metrics retention", + &observability.GetInstanceResponse{ + Id: utils.Ptr("iid"), + Name: nil, + }, + &observability.ListACLResponse{ + Acl: &[]string{}, + Message: nil, + }, + &observability.GetMetricsStorageRetentionResponse{}, + Model{}, + false, + }, + { + "nil metrics retention", + &observability.GetInstanceResponse{ + Id: utils.Ptr("iid"), + Name: nil, + }, + &observability.ListACLResponse{ + Acl: &[]string{}, + Message: nil, + }, + nil, + Model{}, + false, + }, + { + "update metrics retention", + &observability.GetInstanceResponse{ + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + PlanName: utils.Ptr("plan1"), + PlanId: utils.Ptr("planId"), + Parameters: &map[string]string{"key": "value"}, + Instance: &observability.InstanceSensitiveData{ + MetricsRetentionTimeRaw: utils.Ptr(int64(30)), + MetricsRetentionTime1h: utils.Ptr(int64(15)), + MetricsRetentionTime5m: utils.Ptr(int64(10)), + }, + }, + &observability.ListACLResponse{ + Acl: &[]string{ + "1.1.1.1/32", + }, + Message: utils.Ptr("message"), + }, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringValue("planId"), + PlanName: types.StringValue("plan1"), + Parameters: toTerraformStringMapMust(context.Background(), map[string]string{"key": "value"}), + ACL: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("1.1.1.1/32"), + }), + MetricsRetentionDays: types.Int64Value(60), + MetricsRetentionDays1hDownsampling: types.Int64Value(30), + MetricsRetentionDays5mDownsampling: types.Int64Value(7), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + ACL: types.SetNull(types.StringType), + } + err := mapFields(context.Background(), tt.instanceResp, state) + aclErr := mapACLField(tt.listACLResp, state) + metricsErr := mapMetricsRetentionField(tt.getMetricsRetentionResp, state) + if !tt.isValid && err == nil && aclErr == nil && metricsErr == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && (err != nil || aclErr != nil || metricsErr != nil) { + t.Fatalf("Should not have failed: %v", err) + } + + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestMapAlertConfigField(t *testing.T) { + tests := []struct { + description string + alertConfigResp *observability.GetAlertConfigsResponse + expected Model + isValid bool + }{ + { + description: "basic_ok", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{ + fixtureReceiverResponse( + &[]observability.EmailConfig{ + fixtureEmailConfigsResponse(), + }, + &[]observability.OpsgenieConfig{ + fixtureOpsGenieConfigsResponse(), + }, + &[]observability.WebHook{ + fixtureWebHooksConfigsResponse(), + }, + ), + }, + Route: fixtureRouteResponse(), + Global: fixtureGlobalConfigResponse(), + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + }), + "route": fixtureRouteModel(), + "global": fixtureGlobalConfigModel(), + }), + }, + isValid: true, + }, + { + description: "receivers only emailconfigs", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{ + fixtureReceiverResponse( + &[]observability.EmailConfig{ + fixtureEmailConfigsResponse(), + }, + nil, + nil, + ), + }, + Route: fixtureRouteResponse(), + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + types.ListNull(types.ObjectType{AttrTypes: opsgenieConfigsTypes}), + types.ListNull(types.ObjectType{AttrTypes: webHooksConfigsTypes}), + ), + }), + "route": fixtureRouteModel(), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "receivers only opsgenieconfigs", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{ + fixtureReceiverResponse( + nil, + &[]observability.OpsgenieConfig{ + fixtureOpsGenieConfigsResponse(), + }, + nil, + ), + }, + Route: fixtureRouteResponse(), + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + types.ListNull(types.ObjectType{AttrTypes: emailConfigsTypes}), + fixtureOpsGenieConfigsModel(), + types.ListNull(types.ObjectType{AttrTypes: webHooksConfigsTypes}), + ), + }), + "route": fixtureRouteModel(), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "receivers only webhooksconfigs", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{ + fixtureReceiverResponse( + nil, + nil, + &[]observability.WebHook{ + fixtureWebHooksConfigsResponse(), + }, + ), + }, + Route: fixtureRouteResponse(), + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + types.ListNull(types.ObjectType{AttrTypes: emailConfigsTypes}), + types.ListNull(types.ObjectType{AttrTypes: opsgenieConfigsTypes}), + fixtureWebHooksConfigsModel(), + ), + }), + "route": fixtureRouteModel(), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "no receivers, no routes", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{}, + Route: &observability.Route{}, + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{}), + "route": fixtureNullRouteModel(), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "no receivers, default routes", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{}, + Route: fixtureRouteResponse(), + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{}), + "route": fixtureRouteModel(), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "default receivers, no routes", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{ + fixtureReceiverResponse( + &[]observability.EmailConfig{ + fixtureEmailConfigsResponse(), + }, + &[]observability.OpsgenieConfig{ + fixtureOpsGenieConfigsResponse(), + }, + &[]observability.WebHook{ + fixtureWebHooksConfigsResponse(), + }, + ), + }, + Route: &observability.Route{}, + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + }), + "route": fixtureNullRouteModel(), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "nil receivers", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: nil, + Route: fixtureRouteResponse(), + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListNull(types.ObjectType{AttrTypes: receiversTypes}), + "route": fixtureRouteModel(), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "nil route", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{ + fixtureReceiverResponse( + &[]observability.EmailConfig{ + fixtureEmailConfigsResponse(), + }, + &[]observability.OpsgenieConfig{ + fixtureOpsGenieConfigsResponse(), + }, + &[]observability.WebHook{ + fixtureWebHooksConfigsResponse(), + }, + ), + }, + Route: nil, + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + }), + "route": types.ObjectNull(routeTypes), + "global": types.ObjectNull(globalConfigurationTypes), + }), + }, + isValid: true, + }, + { + description: "empty global options", + alertConfigResp: &observability.GetAlertConfigsResponse{ + Data: &observability.Alert{ + Receivers: &[]observability.Receivers{ + fixtureReceiverResponse( + &[]observability.EmailConfig{ + fixtureEmailConfigsResponse(), + }, + &[]observability.OpsgenieConfig{ + fixtureOpsGenieConfigsResponse(), + }, + &[]observability.WebHook{ + fixtureWebHooksConfigsResponse(), + }, + ), + }, + Route: fixtureRouteResponse(), + Global: &observability.Global{}, + }, + }, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectValueMust(alertConfigTypes, map[string]attr.Value{ + "receivers": types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + }), + "route": fixtureRouteModel(), + "global": fixtureNullGlobalConfigModel(), + }), + }, + isValid: true, + }, + { + description: "nil resp", + alertConfigResp: nil, + expected: Model{ + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + AlertConfig: types.ObjectNull(receiversTypes), + }, + isValid: true, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + ACL: types.SetNull(types.StringType), + Parameters: types.MapNull(types.StringType), + } + err := mapAlertConfigField(context.Background(), tt.alertConfigResp, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + + if tt.isValid { + diff := cmp.Diff(state.AlertConfig, tt.expected.AlertConfig) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + expected *observability.CreateInstancePayload + isValid bool + }{ + { + "basic_ok", + &Model{ + PlanId: types.StringValue("planId"), + }, + &observability.CreateInstancePayload{ + Name: nil, + PlanId: utils.Ptr("planId"), + Parameter: &map[string]interface{}{}, + }, + true, + }, + { + "ok", + &Model{ + Name: types.StringValue("Name"), + PlanId: types.StringValue("planId"), + Parameters: makeTestMap(t), + }, + &observability.CreateInstancePayload{ + Name: utils.Ptr("Name"), + PlanId: utils.Ptr("planId"), + Parameter: &map[string]interface{}{"key": `"value"`}, + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToPayloadUpdate(t *testing.T) { + tests := []struct { + description string + input *Model + expected *observability.UpdateInstancePayload + isValid bool + }{ + { + "basic_ok", + &Model{ + PlanId: types.StringValue("planId"), + }, + &observability.UpdateInstancePayload{ + Name: nil, + PlanId: utils.Ptr("planId"), + Parameter: &map[string]any{}, + }, + true, + }, + { + "ok", + &Model{ + Name: types.StringValue("Name"), + PlanId: types.StringValue("planId"), + Parameters: makeTestMap(t), + }, + &observability.UpdateInstancePayload{ + Name: utils.Ptr("Name"), + PlanId: utils.Ptr("planId"), + Parameter: &map[string]any{"key": `"value"`}, + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdateMetricsStorageRetentionPayload(t *testing.T) { + tests := []struct { + description string + retentionDaysRaw *int64 + retentionDays1h *int64 + retentionDays5m *int64 + getMetricsResp *observability.GetMetricsStorageRetentionResponse + expected *observability.UpdateMetricsStorageRetentionPayload + isValid bool + }{ + { + "basic_ok", + utils.Ptr(int64(120)), + utils.Ptr(int64(60)), + utils.Ptr(int64(14)), + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + &observability.UpdateMetricsStorageRetentionPayload{ + MetricsRetentionTimeRaw: utils.Ptr("120d"), + MetricsRetentionTime1h: utils.Ptr("60d"), + MetricsRetentionTime5m: utils.Ptr("14d"), + }, + true, + }, + { + "only_raw_given", + utils.Ptr(int64(120)), + nil, + nil, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + &observability.UpdateMetricsStorageRetentionPayload{ + MetricsRetentionTimeRaw: utils.Ptr("120d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + true, + }, + { + "only_1h_given", + nil, + utils.Ptr(int64(60)), + nil, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + &observability.UpdateMetricsStorageRetentionPayload{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("60d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + true, + }, + { + "only_5m_given", + nil, + nil, + utils.Ptr(int64(14)), + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + &observability.UpdateMetricsStorageRetentionPayload{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("14d"), + }, + true, + }, + { + "none_given", + nil, + nil, + nil, + &observability.GetMetricsStorageRetentionResponse{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + &observability.UpdateMetricsStorageRetentionPayload{ + MetricsRetentionTimeRaw: utils.Ptr("60d"), + MetricsRetentionTime1h: utils.Ptr("30d"), + MetricsRetentionTime5m: utils.Ptr("7d"), + }, + true, + }, + { + "nil_response", + nil, + nil, + nil, + nil, + nil, + false, + }, + { + "empty_response", + nil, + nil, + nil, + &observability.GetMetricsStorageRetentionResponse{}, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdateMetricsStorageRetentionPayload(tt.retentionDaysRaw, tt.retentionDays5m, tt.retentionDays1h, tt.getMetricsResp) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdateAlertConfigPayload(t *testing.T) { + tests := []struct { + description string + input alertConfigModel + expected *observability.UpdateAlertConfigsPayload + isValid bool + }{ + { + description: "base", + input: alertConfigModel{ + Receivers: types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + }), + Route: fixtureRouteModel(), + GlobalConfiguration: fixtureGlobalConfigModel(), + }, + expected: &observability.UpdateAlertConfigsPayload{ + Receivers: &[]observability.UpdateAlertConfigsPayloadReceiversInner{ + fixtureReceiverPayload( + &[]observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{fixtureEmailConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{fixtureOpsGenieConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner{fixtureWebHooksConfigsPayload()}, + ), + }, + Route: fixtureRoutePayload(), + Global: fixtureGlobalConfigPayload(), + }, + isValid: true, + }, + { + description: "receivers only emailconfigs", + input: alertConfigModel{ + Receivers: types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + types.ListNull(types.ObjectType{AttrTypes: opsgenieConfigsTypes}), + types.ListNull(types.ObjectType{AttrTypes: webHooksConfigsTypes}), + ), + }), + Route: fixtureRouteModel(), + }, + expected: &observability.UpdateAlertConfigsPayload{ + Receivers: &[]observability.UpdateAlertConfigsPayloadReceiversInner{ + fixtureReceiverPayload( + &[]observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{fixtureEmailConfigsPayload()}, + nil, + nil, + ), + }, + Route: fixtureRoutePayload(), + }, + isValid: true, + }, + { + description: "receivers only opsgenieconfigs", + input: alertConfigModel{ + Receivers: types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + types.ListNull(types.ObjectType{AttrTypes: emailConfigsTypes}), + fixtureOpsGenieConfigsModel(), + types.ListNull(types.ObjectType{AttrTypes: webHooksConfigsTypes}), + ), + }), + Route: fixtureRouteModel(), + }, + expected: &observability.UpdateAlertConfigsPayload{ + Receivers: &[]observability.UpdateAlertConfigsPayloadReceiversInner{ + fixtureReceiverPayload( + nil, + &[]observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{fixtureOpsGenieConfigsPayload()}, + nil, + ), + }, + Route: fixtureRoutePayload(), + }, + isValid: true, + }, + { + description: "multiple receivers", + input: alertConfigModel{ + Receivers: types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + }), + Route: fixtureRouteModel(), + }, + expected: &observability.UpdateAlertConfigsPayload{ + Receivers: &[]observability.UpdateAlertConfigsPayloadReceiversInner{ + fixtureReceiverPayload( + &[]observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{fixtureEmailConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{fixtureOpsGenieConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner{fixtureWebHooksConfigsPayload()}, + ), + fixtureReceiverPayload( + &[]observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{fixtureEmailConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{fixtureOpsGenieConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner{fixtureWebHooksConfigsPayload()}, + ), + }, + Route: fixtureRoutePayload(), + }, + isValid: true, + }, + { + description: "empty global options", + input: alertConfigModel{ + Receivers: types.ListValueMust(types.ObjectType{AttrTypes: receiversTypes}, []attr.Value{ + fixtureReceiverModel( + fixtureEmailConfigsModel(), + fixtureOpsGenieConfigsModel(), + fixtureWebHooksConfigsModel(), + ), + }), + Route: fixtureRouteModel(), + GlobalConfiguration: fixtureNullGlobalConfigModel(), + }, + expected: &observability.UpdateAlertConfigsPayload{ + Receivers: &[]observability.UpdateAlertConfigsPayloadReceiversInner{ + fixtureReceiverPayload( + &[]observability.CreateAlertConfigReceiverPayloadEmailConfigsInner{fixtureEmailConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadOpsgenieConfigsInner{fixtureOpsGenieConfigsPayload()}, + &[]observability.CreateAlertConfigReceiverPayloadWebHookConfigsInner{fixtureWebHooksConfigsPayload()}, + ), + }, + Route: fixtureRoutePayload(), + Global: &observability.UpdateAlertConfigsPayloadGlobal{}, + }, + isValid: true, + }, + { + description: "empty alert config", + input: alertConfigModel{}, + isValid: false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdateAlertConfigPayload(context.Background(), &tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestGetRouteNestedObjectAux(t *testing.T) { + tests := []struct { + description string + startingLevel int + recursionLimit int + isDatasource bool + expected schema.ListNestedAttribute + }{ + { + "no recursion, resource", + 1, + 1, + false, + schema.ListNestedAttribute{ + Description: routeDescriptions["routes"], + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: fixtureRouteAttributeSchema(nil, false), + }, + }, + }, + { + "recursion 1, resource", + 1, + 2, + false, + schema.ListNestedAttribute{ + Description: routeDescriptions["routes"], + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: fixtureRouteAttributeSchema( + &schema.ListNestedAttribute{ + Description: routeDescriptions["routes"], + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: fixtureRouteAttributeSchema(nil, false), + }, + }, + false, + ), + }, + }, + }, + { + "no recursion,datasource", + 1, + 1, + true, + schema.ListNestedAttribute{ + Description: routeDescriptions["routes"], + Computed: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: fixtureRouteAttributeSchema(nil, true), + }, + }, + }, + { + "recursion 1, datasource", + 1, + 2, + true, + schema.ListNestedAttribute{ + Description: routeDescriptions["routes"], + Computed: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: fixtureRouteAttributeSchema( + &schema.ListNestedAttribute{ + Description: routeDescriptions["routes"], + Computed: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: fixtureRouteAttributeSchema(nil, true), + }, + }, + true, + ), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output := getRouteNestedObjectAux(tt.isDatasource, tt.startingLevel, tt.recursionLimit) + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + }) + } +} + +func TestGetRouteListTypeAux(t *testing.T) { + tests := []struct { + description string + startingLevel int + recursionLimit int + expected types.ObjectType + }{ + { + "no recursion", + 1, + 1, + types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "group_by": types.ListType{ElemType: types.StringType}, + "group_interval": types.StringType, + "group_wait": types.StringType, + "match": types.MapType{ElemType: types.StringType}, + "match_regex": types.MapType{ElemType: types.StringType}, + "receiver": types.StringType, + "repeat_interval": types.StringType, + }, + }, + }, + { + "recursion 1", + 1, + 2, + types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "group_by": types.ListType{ElemType: types.StringType}, + "group_interval": types.StringType, + "group_wait": types.StringType, + "match": types.MapType{ElemType: types.StringType}, + "match_regex": types.MapType{ElemType: types.StringType}, + "receiver": types.StringType, + "repeat_interval": types.StringType, + "routes": types.ListType{ElemType: types.ObjectType{AttrTypes: map[string]attr.Type{ + "group_by": types.ListType{ElemType: types.StringType}, + "group_interval": types.StringType, + "group_wait": types.StringType, + "match": types.MapType{ElemType: types.StringType}, + "match_regex": types.MapType{ElemType: types.StringType}, + "receiver": types.StringType, + "repeat_interval": types.StringType, + }}}, + }, + }, + }, + { + "recursion 2", + 2, + 2, + types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "group_by": types.ListType{ElemType: types.StringType}, + "group_interval": types.StringType, + "group_wait": types.StringType, + "match": types.MapType{ElemType: types.StringType}, + "match_regex": types.MapType{ElemType: types.StringType}, + "receiver": types.StringType, + "repeat_interval": types.StringType, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output := getRouteListTypeAux(tt.startingLevel, tt.recursionLimit) + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + }) + } +} + +func makeTestMap(t *testing.T) basetypes.MapValue { + p := make(map[string]attr.Value, 1) + p["key"] = types.StringValue("value") + params, diag := types.MapValueFrom(context.Background(), types.StringType, p) + if diag.HasError() { + t.Fail() + } + return params +} + +// ToTerraformStringMapMust Silently ignores the error +func toTerraformStringMapMust(ctx context.Context, m map[string]string) basetypes.MapValue { + labels := make(map[string]attr.Value, len(m)) + for l, v := range m { + stringValue := types.StringValue(v) + labels[l] = stringValue + } + res, diags := types.MapValueFrom(ctx, types.StringType, m) + if diags.HasError() { + return types.MapNull(types.StringType) + } + return res +} diff --git a/stackit/internal/services/observability/observability_acc_test.go b/stackit/internal/services/observability/observability_acc_test.go new file mode 100644 index 00000000..c025b831 --- /dev/null +++ b/stackit/internal/services/observability/observability_acc_test.go @@ -0,0 +1,896 @@ +package observability_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/observability" + "github.com/stackitcloud/stackit-sdk-go/services/observability/wait" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/testutil" +) + +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("observability"), + "plan_name": "Monitoring-Basic-EU01", + "new_plan_name": "Monitoring-Medium-EU01", + "acl-0": "1.2.3.4/32", + "acl-1": "111.222.111.222/32", + "acl-1-updated": "111.222.111.125/32", + "metrics_retention_days": "60", + "metrics_retention_days_5m_downsampling": "30", + "metrics_retention_days_1h_downsampling": "15", +} + +var scrapeConfigResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": fmt.Sprintf("scrapeconfig-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum)), + "urls": fmt.Sprintf(`{urls = ["www.%s.de","%s.de"]}`, acctest.RandStringFromCharSet(15, acctest.CharSetAlphaNum), acctest.RandStringFromCharSet(15, acctest.CharSetAlphaNum)), + "metrics_path": "/metrics", + "scheme": "https", + "scrape_interval": "4m", // non-default + "sample_limit": "7", // non-default + "saml2_enable_url_parameters": "false", +} + +var credentialResource = map[string]string{ + "project_id": testutil.ProjectId, +} + +func buildAlertConfigReceivers(hasOpsGenie, hasEmail, hasWebhook bool) string { + if !hasOpsGenie && !hasEmail && !hasWebhook { + return "" + } + + receivers := "[" + + if hasOpsGenie { + receivers += ` + { + name = "OpsGenieReceiverInfo" + opsgenie_configs = [ + { + tags = "iam,observability-alert" + api_key = "example-api-key" + } + ] + }, +` + } + + if hasEmail { + receivers += ` + { + name = "EmailReceiverInfo" + email_configs = [ + { + to = "me@example.com" + }, + ] + }, +` + } + + if hasWebhook { + receivers += ` + { + name = "WebhookReceiverInfo" + webhooks_configs = [ + { + url = "https://example.com" + ms_teams = true + }, + ] + }, +` + } + + return receivers + "]" +} + +func buildAlertConfigRoute(childRoutes bool) string { + route := `{ + receiver = "OpsGenieReceiverInfo" + group_by = ["alertname"] + group_interval = "10m" + group_wait = "1m" + repeat_interval = "1h"` + + if childRoutes { + route += ` + routes = [ + { + match = { + severity = "critical" + } + receiver = "OpsGenieReceiverInfo" + }, + { + match = { + severity = "warning" + } + receiver = "WebhookReceiverInfo" + } + ]` + } + + return route + "\n}" +} + +func buildAlertConfigGlobal(includeEmailOptions bool) string { + defaultOptions := `{ + resolve_timeout = "5m" + opsgenie_api_key = "example-api-key" + opsgenie_api_url = "https://api.eu.opsgenie.com"` + + if !includeEmailOptions { + return defaultOptions + "\n}" + } + return defaultOptions + ` + smtp_smart_host = "smtp.example.com:587" + smtp_from = "me@example.com" +}` +} + +func buildAlertConfig(receivers, route, global string) *string { + if receivers == "" && route == "" && global == "" { + return nil + } + returnStr := fmt.Sprintf(` + alert_config = { + receivers = %s, + route = %s, + global = %s + } + `, receivers, route, global) + return &returnStr +} + +func instanceResourceConfig(acl, metricsRetentionDays, metricsRetentionDays1hDownsampling, metricsRetentionDays5mDownsampling, alertConfig *string, instanceName, planName string) string { + var aclStr string + var metricsRetentionDaysStr string + var metricsRetentionDays1hDownsamplingStr string + var metricsRetentionDays5mDownsamplingStr string + var alertConfigStr string + + if acl != nil { + aclStr = fmt.Sprintf("acl = %s", *acl) + } + + if metricsRetentionDays != nil { + metricsRetentionDaysStr = fmt.Sprintf("metrics_retention_days = %s", *metricsRetentionDays) + } + + if metricsRetentionDays1hDownsampling != nil { + metricsRetentionDays1hDownsamplingStr = fmt.Sprintf("metrics_retention_days_1h_downsampling = %s", *metricsRetentionDays1hDownsampling) + } + + if metricsRetentionDays5mDownsampling != nil { + metricsRetentionDays5mDownsamplingStr = fmt.Sprintf("metrics_retention_days_5m_downsampling = %s", *metricsRetentionDays5mDownsampling) + } + + if alertConfig != nil { + alertConfigStr = *alertConfig + } + + optionalsStr := strings.Join([]string{aclStr, metricsRetentionDaysStr, metricsRetentionDays1hDownsamplingStr, metricsRetentionDays5mDownsamplingStr, alertConfigStr}, "\n") + + return fmt.Sprintf(` + resource "stackit_observability_instance" "instance" { + project_id = "%s" + name = "%s" + plan_name = "%s" + %s + } + `, + instanceResource["project_id"], + instanceName, + planName, + optionalsStr, + ) +} + +func scrapeConfigResourceConfig(target, saml2EnableUrlParameters string) string { + return fmt.Sprintf( + `resource "stackit_observability_scrapeconfig" "scrapeconfig" { + project_id = stackit_observability_instance.instance.project_id + instance_id = stackit_observability_instance.instance.instance_id + name = "%s" + metrics_path = "%s" + targets = [%s] + scrape_interval = "%s" + sample_limit = %s + saml2 = { + enable_url_parameters = %s + } + }`, + scrapeConfigResource["name"], + scrapeConfigResource["metrics_path"], + target, + scrapeConfigResource["scrape_interval"], + scrapeConfigResource["sample_limit"], + saml2EnableUrlParameters, + ) +} + +func credentialResourceConfig() string { + return `resource "stackit_observability_credential" "credential" { + project_id = stackit_observability_instance.instance.project_id + instance_id = stackit_observability_instance.instance.instance_id + }` +} + +func resourceConfig(acl, metricsRetentionDays, metricsRetentionDays1hDownsampling, metricsRetentionDays5mDownsampling, alertConfig *string, instanceName, planName, target, saml2EnableUrlParameters string) string { + return fmt.Sprintf("%s\n\n%s\n\n%s\n\n%s", + testutil.ObservabilityProviderConfig(), + instanceResourceConfig(acl, + metricsRetentionDays, + metricsRetentionDays1hDownsampling, + metricsRetentionDays5mDownsampling, + alertConfig, + instanceName, + planName), + scrapeConfigResourceConfig(target, saml2EnableUrlParameters), + credentialResourceConfig(), + ) +} + +func TestAccResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckObservabilityDestroy, + Steps: []resource.TestStep{ + // Creation + { + Config: resourceConfig( + utils.Ptr(fmt.Sprintf( + "[%q, %q, %q]", + instanceResource["acl-0"], + instanceResource["acl-1"], + instanceResource["acl-1"], + )), + utils.Ptr(instanceResource["metrics_retention_days"]), + utils.Ptr(instanceResource["metrics_retention_days_1h_downsampling"]), + utils.Ptr(instanceResource["metrics_retention_days_5m_downsampling"]), + buildAlertConfig(buildAlertConfigReceivers(true, false, true), buildAlertConfigRoute(false), buildAlertConfigGlobal(false)), + instanceResource["name"], + instanceResource["plan_name"], + scrapeConfigResource["urls"], + scrapeConfigResource["saml2_enable_url_parameters"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "plan_name", instanceResource["plan_name"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "dashboard_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "is_updatable"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_public_read_access"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_user"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_password"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days", instanceResource["metrics_retention_days"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_5m_downsampling", instanceResource["metrics_retention_days_5m_downsampling"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_1h_downsampling", instanceResource["metrics_retention_days_1h_downsampling"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "targets_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "alerting_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_ui_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "otlp_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "zipkin_spans_url"), + + // Alert Config + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.0", "alertname"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_interval", "10m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_wait", "1m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.repeat_interval", "1h"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.resolve_timeout", "5m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.opsgenie_api_key", "example-api-key"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.opsgenie_api_url", "https://api.eu.opsgenie.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.receiver", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.0", "alertname"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_interval", "10m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_wait", "1m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.repeat_interval", "1h"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.#", "0"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.name", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.0.tags", "iam,observability-alert"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.0.api_key", + "example-api-key"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.name", "WebhookReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.webhooks_configs.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.webhooks_configs.0.url", "https://example.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.webhooks_configs.0.ms_teams", "true"), + + // ACL + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.0", instanceResource["acl-0"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.1", instanceResource["acl-1"]), + + // scrape config data + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "project_id", + "stackit_observability_scrapeconfig.scrapeconfig", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_scrapeconfig.scrapeconfig", "instance_id", + ), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "targets.0.urls.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "sample_limit", scrapeConfigResource["sample_limit"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", scrapeConfigResource["saml2_enable_url_parameters"]), + + // credentials + resource.TestCheckResourceAttr("stackit_observability_credential.credential", "project_id", credentialResource["project_id"]), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_credential.credential", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "username"), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "password"), + ), + }, + // Update Alert Config with complete Receiver (email, webhook and opsgenie configs), global options and Route with child routes + { + Config: resourceConfig( + utils.Ptr(fmt.Sprintf( + "[%q, %q, %q]", + instanceResource["acl-0"], + instanceResource["acl-1"], + instanceResource["acl-1"], + )), + utils.Ptr(instanceResource["metrics_retention_days"]), + utils.Ptr(instanceResource["metrics_retention_days_1h_downsampling"]), + utils.Ptr(instanceResource["metrics_retention_days_5m_downsampling"]), + buildAlertConfig(buildAlertConfigReceivers(true, true, true), buildAlertConfigRoute(true), buildAlertConfigGlobal(true)), + instanceResource["name"], + instanceResource["plan_name"], + scrapeConfigResource["urls"], + scrapeConfigResource["saml2_enable_url_parameters"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "plan_name", instanceResource["plan_name"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "dashboard_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "is_updatable"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_public_read_access"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_user"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_password"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days", instanceResource["metrics_retention_days"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_5m_downsampling", instanceResource["metrics_retention_days_5m_downsampling"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_1h_downsampling", instanceResource["metrics_retention_days_1h_downsampling"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "targets_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "alerting_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_ui_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "otlp_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "zipkin_spans_url"), + + // Alert Config + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.#", "3"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.0", "alertname"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_interval", "10m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_wait", "1m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.repeat_interval", "1h"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.resolve_timeout", "5m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.opsgenie_api_key", "example-api-key"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.opsgenie_api_url", "https://api.eu.opsgenie.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.smtp_smart_host", "smtp.example.com:587"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.smtp_from", "me@example.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.receiver", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.0", "alertname"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_interval", "10m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_wait", "1m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.repeat_interval", "1h"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.0.match.severity", "critical"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.0.receiver", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.1.match.severity", "warning"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.1.receiver", "WebhookReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.name", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.0.tags", "iam,observability-alert"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.0.api_key", + "example-api-key"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.name", "EmailReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.email_configs.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.email_configs.0.to", "me@example.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.2.name", "WebhookReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.2.webhooks_configs.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.2.webhooks_configs.0.url", "https://example.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.2.webhooks_configs.0.ms_teams", "true"), + + // ACL + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.0", instanceResource["acl-0"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.1", instanceResource["acl-1"]), + + // scrape config data + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "project_id", + "stackit_observability_scrapeconfig.scrapeconfig", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_scrapeconfig.scrapeconfig", "instance_id", + ), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "targets.0.urls.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "sample_limit", scrapeConfigResource["sample_limit"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", scrapeConfigResource["saml2_enable_url_parameters"]), + + // credentials + resource.TestCheckResourceAttr("stackit_observability_credential.credential", "project_id", credentialResource["project_id"]), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_credential.credential", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "username"), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "password"), + ), + }, + // Update without ACL, partial metrics retention days and NO alert configs + { + Config: resourceConfig( + nil, + nil, + utils.Ptr(instanceResource["metrics_retention_days_1h_downsampling"]), + nil, + nil, + instanceResource["name"], + instanceResource["plan_name"], + scrapeConfigResource["urls"], + scrapeConfigResource["saml2_enable_url_parameters"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "plan_name", instanceResource["plan_name"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "dashboard_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "is_updatable"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_public_read_access"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_user"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_password"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_retention_days"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_retention_days_5m_downsampling"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_1h_downsampling", instanceResource["metrics_retention_days_1h_downsampling"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "targets_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "alerting_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_ui_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "otlp_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "zipkin_spans_url"), + + // ACL + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.#", "0"), + + // scrape config data + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "project_id", + "stackit_observability_scrapeconfig.scrapeconfig", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_scrapeconfig.scrapeconfig", "instance_id", + ), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "targets.0.urls.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "sample_limit", scrapeConfigResource["sample_limit"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", scrapeConfigResource["saml2_enable_url_parameters"]), + + // credentials + resource.TestCheckResourceAttr("stackit_observability_credential.credential", "project_id", credentialResource["project_id"]), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_credential.credential", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "username"), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "password"), + ), + }, + // Update with empty ACL, NO metrics retention days and NO alert configs + { + Config: resourceConfig( + utils.Ptr("[]"), + nil, + nil, + nil, + nil, + instanceResource["name"], + instanceResource["plan_name"], + scrapeConfigResource["urls"], + scrapeConfigResource["saml2_enable_url_parameters"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "plan_name", instanceResource["plan_name"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "dashboard_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "is_updatable"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_public_read_access"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_user"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "grafana_initial_admin_password"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_retention_days"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_retention_days_5m_downsampling"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_retention_days_1h_downsampling"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "metrics_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "targets_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "alerting_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "logs_push_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "jaeger_ui_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "otlp_traces_url"), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "zipkin_spans_url"), + + // ACL + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.#", "0"), + + // scrape config data + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "project_id", + "stackit_observability_scrapeconfig.scrapeconfig", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_scrapeconfig.scrapeconfig", "instance_id", + ), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "targets.0.urls.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "sample_limit", scrapeConfigResource["sample_limit"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", scrapeConfigResource["saml2_enable_url_parameters"]), + + // credentials + resource.TestCheckResourceAttr("stackit_observability_credential.credential", "project_id", credentialResource["project_id"]), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "stackit_observability_credential.credential", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "username"), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "password"), + ), + }, + // Data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_observability_instance" "instance" { + project_id = stackit_observability_instance.instance.project_id + instance_id = stackit_observability_instance.instance.instance_id + } + + data "stackit_observability_scrapeconfig" "scrapeconfig" { + project_id = stackit_observability_scrapeconfig.scrapeconfig.project_id + instance_id = stackit_observability_scrapeconfig.scrapeconfig.instance_id + name = stackit_observability_scrapeconfig.scrapeconfig.name + } + `, + resourceConfig( + utils.Ptr(fmt.Sprintf( + "[%q, %q]", + instanceResource["acl-0"], + instanceResource["acl-1"], + )), + utils.Ptr(instanceResource["metrics_retention_days"]), + utils.Ptr(instanceResource["metrics_retention_days_1h_downsampling"]), + utils.Ptr(instanceResource["metrics_retention_days_5m_downsampling"]), + buildAlertConfig(buildAlertConfigReceivers(true, false, true), buildAlertConfigRoute(true), buildAlertConfigGlobal(false)), + instanceResource["name"], + instanceResource["plan_name"], + scrapeConfigResource["urls"], + scrapeConfigResource["saml2_enable_url_parameters"], + ), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_observability_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "plan_name", instanceResource["plan_name"]), + resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "acl.#", "2"), + resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "acl.0", instanceResource["acl-0"]), + resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "acl.1", instanceResource["acl-1"]), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "project_id", + "data.stackit_observability_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_observability_instance.instance", "instance_id", + "data.stackit_observability_instance.instance", "instance_id", + ), + // scrape config data + resource.TestCheckResourceAttrPair( + "stackit_observability_scrapeconfig.scrapeconfig", "project_id", + "data.stackit_observability_scrapeconfig.scrapeconfig", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_observability_scrapeconfig.scrapeconfig", "instance_id", + "data.stackit_observability_scrapeconfig.scrapeconfig", "instance_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_observability_scrapeconfig.scrapeconfig", "name", + "data.stackit_observability_scrapeconfig.scrapeconfig", "name", + ), + resource.TestCheckResourceAttr("data.stackit_observability_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("data.stackit_observability_scrapeconfig.scrapeconfig", "targets.0.urls.#", "2"), + resource.TestCheckResourceAttr("data.stackit_observability_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("data.stackit_observability_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("data.stackit_observability_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "sample_limit", scrapeConfigResource["sample_limit"]), + resource.TestCheckResourceAttr("data.stackit_observability_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", scrapeConfigResource["saml2_enable_url_parameters"]), + ), + }, + // Import 1 + { + ResourceName: "stackit_observability_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_observability_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_observability_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Import 2 + { + ResourceName: "stackit_observability_scrapeconfig.scrapeconfig", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_observability_scrapeconfig.scrapeconfig"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_observability_scrapeconfig.scrapeconfig") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + name, ok := r.Primary.Attributes["name"] + if !ok { + return "", fmt.Errorf("couldn't find attribute name") + } + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, name), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfig( + utils.Ptr(fmt.Sprintf( + "[%q, %q]", + instanceResource["acl-0"], + instanceResource["acl-1-updated"], + )), + utils.Ptr(instanceResource["metrics_retention_days"]), + utils.Ptr(instanceResource["metrics_retention_days_1h_downsampling"]), + utils.Ptr(instanceResource["metrics_retention_days_5m_downsampling"]), + buildAlertConfig(buildAlertConfigReceivers(true, false, true), buildAlertConfigRoute(true), buildAlertConfigGlobal(false)), + fmt.Sprintf("%s-new", instanceResource["name"]), + instanceResource["new_plan_name"], + "", + "true", + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "name", instanceResource["name"]+"-new"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "plan_name", instanceResource["new_plan_name"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.0", instanceResource["acl-0"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.1", instanceResource["acl-1-updated"]), + + // Alert Config + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.0", "alertname"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_interval", "10m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_wait", "1m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.repeat_interval", "1h"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.resolve_timeout", "5m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.opsgenie_api_key", "example-api-key"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.global.opsgenie_api_url", "https://api.eu.opsgenie.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.receiver", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_by.0", "alertname"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_interval", "10m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.group_wait", "1m"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.repeat_interval", "1h"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.0.match.severity", "critical"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.0.receiver", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.1.match.severity", "warning"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.route.routes.1.receiver", "WebhookReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.#", "2"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.name", "OpsGenieReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.0.tags", "iam,observability-alert"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.0.opsgenie_configs.0.api_key", + "example-api-key"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.name", "WebhookReceiverInfo"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.webhooks_configs.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.webhooks_configs.0.url", "https://example.com"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "alert_config.receivers.1.webhooks_configs.0.ms_teams", "true"), + + // Scrape Config + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "targets.#", "0"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "sample_limit", scrapeConfigResource["sample_limit"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.%", "1"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", "true"), + + // Credentials + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "username"), + resource.TestCheckResourceAttrSet("stackit_observability_credential.credential", "password"), + ), + }, + // Update and remove saml2 attribute + { + Config: fmt.Sprintf(` + %s + + resource "stackit_observability_instance" "instance" { + project_id = "%s" + name = "%s" + plan_name = "%s" + } + + resource "stackit_observability_scrapeconfig" "scrapeconfig" { + project_id = stackit_observability_instance.instance.project_id + instance_id = stackit_observability_instance.instance.instance_id + name = "%s" + targets = [%s] + scrape_interval = "%s" + sample_limit = %s + metrics_path = "%s" + saml2 = { + enable_url_parameters = false + } + } + `, + testutil.ObservabilityProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["new_plan_name"], + scrapeConfigResource["name"], + scrapeConfigResource["urls"], + scrapeConfigResource["scrape_interval"], + scrapeConfigResource["sample_limit"], + scrapeConfigResource["metrics_path"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "plan_name", instanceResource["new_plan_name"]), + + // ACL + resource.TestCheckResourceAttr("stackit_observability_instance.instance", "acl.#", "0"), + + // Scrape Config + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "targets.#", "1"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "sample_limit", scrapeConfigResource["sample_limit"]), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.%", "1"), + resource.TestCheckResourceAttr("stackit_observability_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", "false"), + ), + }, + + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckObservabilityDestroy(s *terraform.State) error { + ctx := context.Background() + var client *observability.APIClient + var err error + if testutil.ObservabilityCustomEndpoint == "" { + client, err = observability.NewAPIClient( + config.WithRegion("eu01"), + ) + } else { + client, err = observability.NewAPIClient( + config.WithEndpoint(testutil.ObservabilityCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_observability_instance" { + continue + } + // instance terraform ID: = "[project_id],[instance_id],[name]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.ListInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if utils.Contains(instancesToDestroy, *instances[i].Id) { + if *instances[i].Status != wait.DeleteSuccess { + _, err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].Id) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].Id, err) + } + _, err = wait.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].Id).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].Id, err) + } + } + } + } + return nil +} diff --git a/stackit/internal/services/observability/scrapeconfig/datasource.go b/stackit/internal/services/observability/scrapeconfig/datasource.go new file mode 100644 index 00000000..a5aba2f9 --- /dev/null +++ b/stackit/internal/services/observability/scrapeconfig/datasource.go @@ -0,0 +1,236 @@ +package observability + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + "github.com/stackitcloud/stackit-sdk-go/services/observability" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &scrapeConfigDataSource{} +) + +// NewScrapeConfigDataSource is a helper function to simplify the provider implementation. +func NewScrapeConfigDataSource() datasource.DataSource { + return &scrapeConfigDataSource{} +} + +// scrapeConfigDataSource is the data source implementation. +type scrapeConfigDataSource struct { + client *observability.APIClient +} + +// Metadata returns the data source type name. +func (d *scrapeConfigDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_observability_scrapeconfig" +} + +func (d *scrapeConfigDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + var apiClient *observability.APIClient + var err error + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData)) + return + } + + if providerData.ObservabilityCustomEndpoint != "" { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ObservabilityCustomEndpoint), + ) + } else { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the data source configuration", err)) + return + } + d.client = apiClient +} + +// Schema defines the schema for the data source. +func (d *scrapeConfigDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Observability scrape config data source schema. Must have a `region` specified in the provider configuration.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal data source. ID. It is structured as \"`project_id`,`instance_id`,`name`\".", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "Observability instance ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "Specifies the name of the scraping job", + Required: true, + Validators: []validator.String{ + validate.NoSeparator(), + stringvalidator.LengthBetween(1, 200), + }, + }, + "metrics_path": schema.StringAttribute{ + Description: "Specifies the job scraping url path.", + Computed: true, + }, + + "scheme": schema.StringAttribute{ + Description: "Specifies the http scheme.", + Computed: true, + }, + + "scrape_interval": schema.StringAttribute{ + Description: "Specifies the scrape interval as duration string.", + Validators: []validator.String{ + stringvalidator.LengthBetween(2, 8), + }, + Computed: true, + }, + + "sample_limit": schema.Int64Attribute{ + Description: "Specifies the scrape sample limit.", + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(1, 3000000), + }, + }, + + "scrape_timeout": schema.StringAttribute{ + Description: "Specifies the scrape timeout as duration string.", + Computed: true, + }, + "saml2": schema.SingleNestedAttribute{ + Description: "A SAML2 configuration block.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "enable_url_parameters": schema.BoolAttribute{ + Description: "Specifies if URL parameters are enabled", + Computed: true, + }, + }, + }, + "basic_auth": schema.SingleNestedAttribute{ + Description: "A basic authentication block.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "username": schema.StringAttribute{ + Description: "Specifies basic auth username.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + "password": schema.StringAttribute{ + Description: "Specifies basic auth password.", + Computed: true, + Sensitive: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + }, + }, + "targets": schema.ListNestedAttribute{ + Description: "The targets list (specified by the static config).", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "urls": schema.ListAttribute{ + Description: "Specifies target URLs.", + Computed: true, + ElementType: types.StringType, + Validators: []validator.List{ + listvalidator.ValueStringsAre( + stringvalidator.LengthBetween(1, 500), + ), + }, + }, + "labels": schema.MapAttribute{ + Description: "Specifies labels.", + Computed: true, + ElementType: types.StringType, + Validators: []validator.Map{ + mapvalidator.SizeAtMost(10), + mapvalidator.ValueStringsAre(stringvalidator.LengthBetween(0, 200)), + mapvalidator.KeysAre(stringvalidator.LengthBetween(0, 200)), + }, + }, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (d *scrapeConfigDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + scResp, err := d.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read scrape config", err.Error()) + return + } + + err = mapFields(ctx, scResp.Data, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Observability scrape config read") +} diff --git a/stackit/internal/services/observability/scrapeconfig/resource.go b/stackit/internal/services/observability/scrapeconfig/resource.go new file mode 100644 index 00000000..173b731e --- /dev/null +++ b/stackit/internal/services/observability/scrapeconfig/resource.go @@ -0,0 +1,869 @@ +package observability + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/observability" + "github.com/stackitcloud/stackit-sdk-go/services/observability/wait" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate" +) + +const ( + DefaultScheme = "https" // API default is "http" + DefaultScrapeInterval = "5m" + DefaultScrapeTimeout = "2m" + DefaultSampleLimit = int64(5000) + DefaultSAML2EnableURLParameters = true +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &scrapeConfigResource{} + _ resource.ResourceWithConfigure = &scrapeConfigResource{} + _ resource.ResourceWithImportState = &scrapeConfigResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + ProjectId types.String `tfsdk:"project_id"` + InstanceId types.String `tfsdk:"instance_id"` + Name types.String `tfsdk:"name"` + MetricsPath types.String `tfsdk:"metrics_path"` + Scheme types.String `tfsdk:"scheme"` + ScrapeInterval types.String `tfsdk:"scrape_interval"` + ScrapeTimeout types.String `tfsdk:"scrape_timeout"` + SampleLimit types.Int64 `tfsdk:"sample_limit"` + SAML2 types.Object `tfsdk:"saml2"` + BasicAuth types.Object `tfsdk:"basic_auth"` + Targets types.List `tfsdk:"targets"` +} + +// Struct corresponding to Model.SAML2 +type saml2Model struct { + EnableURLParameters types.Bool `tfsdk:"enable_url_parameters"` +} + +// Types corresponding to saml2Model +var saml2Types = map[string]attr.Type{ + "enable_url_parameters": types.BoolType, +} + +// Struct corresponding to Model.BasicAuth +type basicAuthModel struct { + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` +} + +// Types corresponding to basicAuthModel +var basicAuthTypes = map[string]attr.Type{ + "username": types.StringType, + "password": types.StringType, +} + +// Struct corresponding to Model.Targets[i] +type targetModel struct { + URLs types.List `tfsdk:"urls"` + Labels types.Map `tfsdk:"labels"` +} + +// Types corresponding to targetModel +var targetTypes = map[string]attr.Type{ + "urls": types.ListType{ElemType: types.StringType}, + "labels": types.MapType{ElemType: types.StringType}, +} + +// NewScrapeConfigResource is a helper function to simplify the provider implementation. +func NewScrapeConfigResource() resource.Resource { + return &scrapeConfigResource{} +} + +// scrapeConfigResource is the resource implementation. +type scrapeConfigResource struct { + client *observability.APIClient +} + +// Metadata returns the resource type name. +func (r *scrapeConfigResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_observability_scrapeconfig" +} + +// Configure adds the provider configured client to the resource. +func (r *scrapeConfigResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *observability.APIClient + var err error + if providerData.ObservabilityCustomEndpoint != "" { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ObservabilityCustomEndpoint), + ) + } else { + apiClient, err = observability.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err)) + return + } + r.client = apiClient + tflog.Info(ctx, "Observability scrape config client configured") +} + +// Schema defines the schema for the resource. +func (r *scrapeConfigResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Observability scrape config resource schema. Must have a `region` specified in the provider configuration.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`,`name`\".", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "Observability instance ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "name": schema.StringAttribute{ + Description: "Specifies the name of the scraping job.", + Required: true, + Validators: []validator.String{ + validate.NoSeparator(), + stringvalidator.LengthBetween(1, 200), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "metrics_path": schema.StringAttribute{ + Description: "Specifies the job scraping url path. E.g. `/metrics`.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + + "scheme": schema.StringAttribute{ + Description: "Specifies the http scheme. Defaults to `https`.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultScheme), + }, + "scrape_interval": schema.StringAttribute{ + Description: "Specifies the scrape interval as duration string. Defaults to `5m`.", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(2, 8), + }, + Default: stringdefault.StaticString(DefaultScrapeInterval), + }, + "scrape_timeout": schema.StringAttribute{ + Description: "Specifies the scrape timeout as duration string. Defaults to `2m`.", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(2, 8), + }, + Default: stringdefault.StaticString(DefaultScrapeTimeout), + }, + "sample_limit": schema.Int64Attribute{ + Description: "Specifies the scrape sample limit. Upper limit depends on the service plan. Defaults to `5000`.", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(1, 3000000), + }, + Default: int64default.StaticInt64(DefaultSampleLimit), + }, + "saml2": schema.SingleNestedAttribute{ + Description: "A SAML2 configuration block.", + Optional: true, + Computed: true, + Default: objectdefault.StaticValue( + types.ObjectValueMust( + map[string]attr.Type{ + "enable_url_parameters": types.BoolType, + }, + map[string]attr.Value{ + "enable_url_parameters": types.BoolValue(DefaultSAML2EnableURLParameters), + }, + ), + ), + Attributes: map[string]schema.Attribute{ + "enable_url_parameters": schema.BoolAttribute{ + Description: "Specifies if URL parameters are enabled. Defaults to `true`", + Optional: true, + Computed: true, + Default: booldefault.StaticBool(DefaultSAML2EnableURLParameters), + }, + }, + }, + "basic_auth": schema.SingleNestedAttribute{ + Description: "A basic authentication block.", + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "username": schema.StringAttribute{ + Description: "Specifies basic auth username.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + "password": schema.StringAttribute{ + Description: "Specifies basic auth password.", + Required: true, + Sensitive: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + }, + }, + "targets": schema.ListNestedAttribute{ + Description: "The targets list (specified by the static config).", + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "urls": schema.ListAttribute{ + Description: "Specifies target URLs.", + Required: true, + ElementType: types.StringType, + Validators: []validator.List{ + listvalidator.ValueStringsAre( + stringvalidator.LengthBetween(1, 500), + ), + }, + }, + "labels": schema.MapAttribute{ + Description: "Specifies labels.", + Optional: true, + ElementType: types.StringType, + Validators: []validator.Map{ + mapvalidator.SizeAtMost(10), + mapvalidator.ValueStringsAre(stringvalidator.LengthBetween(0, 200)), + mapvalidator.KeysAre(stringvalidator.LengthBetween(0, 200)), + }, + }, + }, + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *scrapeConfigResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + saml2Model := saml2Model{} + if !model.SAML2.IsNull() && !model.SAML2.IsUnknown() { + diags = model.SAML2.As(ctx, &saml2Model, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + basicAuthModel := basicAuthModel{} + if !model.BasicAuth.IsNull() && !model.BasicAuth.IsUnknown() { + diags = model.BasicAuth.As(ctx, &basicAuthModel, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + targetsModel := []targetModel{} + if !model.Targets.IsNull() && !model.Targets.IsUnknown() { + diags = model.Targets.ElementsAs(ctx, &targetsModel, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(ctx, &model, &saml2Model, &basicAuthModel, targetsModel) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating scrape config", fmt.Sprintf("Creating API payload: %v", err)) + return + } + _, err = r.client.CreateScrapeConfig(ctx, instanceId, projectId).CreateScrapeConfigPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating scrape config", fmt.Sprintf("Calling API: %v", err)) + return + } + _, err = wait.CreateScrapeConfigWaitHandler(ctx, r.client, instanceId, scName, projectId).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating scrape config", fmt.Sprintf("Scrape config creation waiting: %v", err)) + return + } + got, err := r.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating scrape config", fmt.Sprintf("Calling API for updated data: %v", err)) + return + } + err = mapFields(ctx, got.Data, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating scrape config", fmt.Sprintf("Processing API payload: %v", err)) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Observability scrape config created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *scrapeConfigResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + scResp, err := r.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading scrape config", fmt.Sprintf("Calling API: %v", err)) + return + } + + // Map response body to schema + err = mapFields(ctx, scResp.Data, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading scrape config", fmt.Sprintf("Processing API payload: %v", err)) + return + } + // Set refreshed model + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Observability scrape config read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *scrapeConfigResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + saml2Model := saml2Model{} + if !model.SAML2.IsNull() && !model.SAML2.IsUnknown() { + diags = model.SAML2.As(ctx, &saml2Model, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + basicAuthModel := basicAuthModel{} + if !model.BasicAuth.IsNull() && !model.BasicAuth.IsUnknown() { + diags = model.BasicAuth.As(ctx, &basicAuthModel, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + targetsModel := []targetModel{} + if !model.Targets.IsNull() && !model.Targets.IsUnknown() { + diags = model.Targets.ElementsAs(ctx, &targetsModel, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(ctx, &model, &saml2Model, &basicAuthModel, targetsModel) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating scrape config", fmt.Sprintf("Creating API payload: %v", err)) + return + } + _, err = r.client.UpdateScrapeConfig(ctx, instanceId, scName, projectId).UpdateScrapeConfigPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating scrape config", fmt.Sprintf("Calling API: %v", err)) + return + } + // We do not have an update status provided by the observability scrape config api, so we cannot use a waiter here, hence a simple sleep is used. + time.Sleep(15 * time.Second) + + // Fetch updated ScrapeConfig + scResp, err := r.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating scrape config", fmt.Sprintf("Calling API for updated data: %v", err)) + return + } + err = mapFields(ctx, scResp.Data, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating scrape config", fmt.Sprintf("Processing API payload: %v", err)) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Observability scrape config updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *scrapeConfigResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + // Delete existing ScrapeConfig + _, err := r.client.DeleteScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting scrape config", fmt.Sprintf("Calling API: %v", err)) + return + } + _, err = wait.DeleteScrapeConfigWaitHandler(ctx, r.client, instanceId, scName, projectId).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting scrape config", fmt.Sprintf("Scrape config deletion waiting: %v", err)) + return + } + + tflog.Info(ctx, "Observability scrape config deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,name +func (r *scrapeConfigResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Error importing scrape config", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id],[name] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[2])...) + tflog.Info(ctx, "Observability scrape config state imported") +} + +func mapFields(ctx context.Context, sc *observability.Job, model *Model) error { + if sc == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var scName string + if model.Name.ValueString() != "" { + scName = model.Name.ValueString() + } else if sc.JobName != nil { + scName = *sc.JobName + } else { + return fmt.Errorf("scrape config name not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + scName, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.Name = types.StringValue(scName) + + model.MetricsPath = types.StringPointerValue(sc.MetricsPath) + model.Scheme = types.StringPointerValue(sc.Scheme) + model.ScrapeInterval = types.StringPointerValue(sc.ScrapeInterval) + model.ScrapeTimeout = types.StringPointerValue(sc.ScrapeTimeout) + model.SampleLimit = types.Int64PointerValue(sc.SampleLimit) + err := mapSAML2(sc, model) + if err != nil { + return fmt.Errorf("map saml2: %w", err) + } + err = mapBasicAuth(sc, model) + if err != nil { + return fmt.Errorf("map basic auth: %w", err) + } + err = mapTargets(ctx, sc, model) + if err != nil { + return fmt.Errorf("map targets: %w", err) + } + return nil +} + +func mapBasicAuth(sc *observability.Job, model *Model) error { + if sc.BasicAuth == nil { + model.BasicAuth = types.ObjectNull(basicAuthTypes) + return nil + } + basicAuthMap := map[string]attr.Value{ + "username": types.StringValue(*sc.BasicAuth.Username), + "password": types.StringValue(*sc.BasicAuth.Password), + } + basicAuthTF, diags := types.ObjectValue(basicAuthTypes, basicAuthMap) + if diags.HasError() { + return core.DiagsToError(diags) + } + model.BasicAuth = basicAuthTF + return nil +} + +func mapSAML2(sc *observability.Job, model *Model) error { + if (sc.Params == nil || *sc.Params == nil) && model.SAML2.IsNull() { + return nil + } + + if model.SAML2.IsNull() || model.SAML2.IsUnknown() { + model.SAML2 = types.ObjectNull(saml2Types) + } + + flag := true + if sc.Params == nil || *sc.Params == nil { + return nil + } + p := *sc.Params + if v, ok := p["saml2"]; ok { + if len(v) == 1 && v[0] == "disabled" { + flag = false + } + } + + saml2Map := map[string]attr.Value{ + "enable_url_parameters": types.BoolValue(flag), + } + saml2TF, diags := types.ObjectValue(saml2Types, saml2Map) + if diags.HasError() { + return core.DiagsToError(diags) + } + model.SAML2 = saml2TF + return nil +} + +func mapTargets(ctx context.Context, sc *observability.Job, model *Model) error { + if sc == nil || sc.StaticConfigs == nil { + model.Targets = types.ListNull(types.ObjectType{AttrTypes: targetTypes}) + return nil + } + + targetsModel := []targetModel{} + if !model.Targets.IsNull() && !model.Targets.IsUnknown() { + diags := model.Targets.ElementsAs(ctx, &targetsModel, false) + if diags.HasError() { + return core.DiagsToError(diags) + } + } + + newTargets := []attr.Value{} + for i, sc := range *sc.StaticConfigs { + nt := targetModel{} + + // Map URLs + urls := []attr.Value{} + if sc.Targets != nil { + for _, v := range *sc.Targets { + urls = append(urls, types.StringValue(v)) + } + } + nt.URLs = types.ListValueMust(types.StringType, urls) + + // Map Labels + if len(model.Targets.Elements()) > i && targetsModel[i].Labels.IsNull() || sc.Labels == nil { + nt.Labels = types.MapNull(types.StringType) + } else { + newl := map[string]attr.Value{} + for k, v := range *sc.Labels { + newl[k] = types.StringValue(v) + } + nt.Labels = types.MapValueMust(types.StringType, newl) + } + + // Build target + targetMap := map[string]attr.Value{ + "urls": nt.URLs, + "labels": nt.Labels, + } + targetTF, diags := types.ObjectValue(targetTypes, targetMap) + if diags.HasError() { + return core.DiagsToError(diags) + } + + newTargets = append(newTargets, targetTF) + } + + targetsTF, diags := types.ListValue(types.ObjectType{AttrTypes: targetTypes}, newTargets) + if diags.HasError() { + return core.DiagsToError(diags) + } + + model.Targets = targetsTF + return nil +} + +func toCreatePayload(ctx context.Context, model *Model, saml2Model *saml2Model, basicAuthModel *basicAuthModel, targetsModel []targetModel) (*observability.CreateScrapeConfigPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + sc := observability.CreateScrapeConfigPayload{ + JobName: conversion.StringValueToPointer(model.Name), + MetricsPath: conversion.StringValueToPointer(model.MetricsPath), + ScrapeInterval: conversion.StringValueToPointer(model.ScrapeInterval), + ScrapeTimeout: conversion.StringValueToPointer(model.ScrapeTimeout), + // potentially lossy conversion, depending on the allowed range for sample_limit + SampleLimit: utils.Ptr(float64(model.SampleLimit.ValueInt64())), + Scheme: conversion.StringValueToPointer(model.Scheme), + } + setDefaultsCreateScrapeConfig(&sc, model, saml2Model) + + if !saml2Model.EnableURLParameters.IsNull() && !saml2Model.EnableURLParameters.IsUnknown() { + m := make(map[string]interface{}) + if sc.Params != nil { + m = *sc.Params + } + if saml2Model.EnableURLParameters.ValueBool() { + m["saml2"] = []string{"enabled"} + } else { + m["saml2"] = []string{"disabled"} + } + sc.Params = &m + } + + if sc.BasicAuth == nil && !basicAuthModel.Username.IsNull() && !basicAuthModel.Password.IsNull() { + sc.BasicAuth = &observability.CreateScrapeConfigPayloadBasicAuth{ + Username: conversion.StringValueToPointer(basicAuthModel.Username), + Password: conversion.StringValueToPointer(basicAuthModel.Password), + } + } + + t := make([]observability.CreateScrapeConfigPayloadStaticConfigsInner, len(targetsModel)) + for i, target := range targetsModel { + ti := observability.CreateScrapeConfigPayloadStaticConfigsInner{} + + urls := []string{} + diags := target.URLs.ElementsAs(ctx, &urls, false) + if diags.HasError() { + return nil, core.DiagsToError(diags) + } + ti.Targets = &urls + + labels := map[string]interface{}{} + for k, v := range target.Labels.Elements() { + labels[k], _ = conversion.ToString(ctx, v) + } + ti.Labels = &labels + t[i] = ti + } + sc.StaticConfigs = &t + + return &sc, nil +} + +func setDefaultsCreateScrapeConfig(sc *observability.CreateScrapeConfigPayload, model *Model, saml2Model *saml2Model) { + if sc == nil { + return + } + if model.Scheme.IsNull() || model.Scheme.IsUnknown() { + sc.Scheme = utils.Ptr(DefaultScheme) + } + if model.ScrapeInterval.IsNull() || model.ScrapeInterval.IsUnknown() { + sc.ScrapeInterval = utils.Ptr(DefaultScrapeInterval) + } + if model.ScrapeTimeout.IsNull() || model.ScrapeTimeout.IsUnknown() { + sc.ScrapeTimeout = utils.Ptr(DefaultScrapeTimeout) + } + if model.SampleLimit.IsNull() || model.SampleLimit.IsUnknown() { + sc.SampleLimit = utils.Ptr(float64(DefaultSampleLimit)) + } + // Make the API default more explicit by setting the field. + if saml2Model.EnableURLParameters.IsNull() || saml2Model.EnableURLParameters.IsUnknown() { + m := map[string]interface{}{} + if sc.Params != nil { + m = *sc.Params + } + if DefaultSAML2EnableURLParameters { + m["saml2"] = []string{"enabled"} + } else { + m["saml2"] = []string{"disabled"} + } + sc.Params = &m + } +} + +func toUpdatePayload(ctx context.Context, model *Model, saml2Model *saml2Model, basicAuthModel *basicAuthModel, targetsModel []targetModel) (*observability.UpdateScrapeConfigPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + sc := observability.UpdateScrapeConfigPayload{ + MetricsPath: conversion.StringValueToPointer(model.MetricsPath), + ScrapeInterval: conversion.StringValueToPointer(model.ScrapeInterval), + ScrapeTimeout: conversion.StringValueToPointer(model.ScrapeTimeout), + // potentially lossy conversion, depending on the allowed range for sample_limit + SampleLimit: utils.Ptr(float64(model.SampleLimit.ValueInt64())), + Scheme: conversion.StringValueToPointer(model.Scheme), + } + setDefaultsUpdateScrapeConfig(&sc, model) + + if !saml2Model.EnableURLParameters.IsNull() && !saml2Model.EnableURLParameters.IsUnknown() { + m := make(map[string]interface{}) + if sc.Params != nil { + m = *sc.Params + } + if saml2Model.EnableURLParameters.ValueBool() { + m["saml2"] = []string{"enabled"} + } else { + m["saml2"] = []string{"disabled"} + } + sc.Params = &m + } + + if sc.BasicAuth == nil && !basicAuthModel.Username.IsNull() && !basicAuthModel.Password.IsNull() { + sc.BasicAuth = &observability.CreateScrapeConfigPayloadBasicAuth{ + Username: conversion.StringValueToPointer(basicAuthModel.Username), + Password: conversion.StringValueToPointer(basicAuthModel.Password), + } + } + + t := make([]observability.UpdateScrapeConfigPayloadStaticConfigsInner, len(targetsModel)) + for i, target := range targetsModel { + ti := observability.UpdateScrapeConfigPayloadStaticConfigsInner{} + + urls := []string{} + diags := target.URLs.ElementsAs(ctx, &urls, false) + if diags.HasError() { + return nil, core.DiagsToError(diags) + } + ti.Targets = &urls + + ls := map[string]interface{}{} + for k, v := range target.Labels.Elements() { + ls[k], _ = conversion.ToString(ctx, v) + } + ti.Labels = &ls + t[i] = ti + } + sc.StaticConfigs = &t + + return &sc, nil +} + +func setDefaultsUpdateScrapeConfig(sc *observability.UpdateScrapeConfigPayload, model *Model) { + if sc == nil { + return + } + if model.Scheme.IsNull() || model.Scheme.IsUnknown() { + sc.Scheme = utils.Ptr(DefaultScheme) + } + if model.ScrapeInterval.IsNull() || model.ScrapeInterval.IsUnknown() { + sc.ScrapeInterval = utils.Ptr(DefaultScrapeInterval) + } + if model.ScrapeTimeout.IsNull() || model.ScrapeTimeout.IsUnknown() { + sc.ScrapeTimeout = utils.Ptr(DefaultScrapeTimeout) + } + if model.SampleLimit.IsNull() || model.SampleLimit.IsUnknown() { + sc.SampleLimit = utils.Ptr(float64(DefaultSampleLimit)) + } +} diff --git a/stackit/internal/services/observability/scrapeconfig/resource_test.go b/stackit/internal/services/observability/scrapeconfig/resource_test.go new file mode 100644 index 00000000..9ce64c6e --- /dev/null +++ b/stackit/internal/services/observability/scrapeconfig/resource_test.go @@ -0,0 +1,504 @@ +package observability + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/observability" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *observability.Job + expected Model + isValid bool + }{ + { + "default_ok", + &observability.Job{ + JobName: utils.Ptr("name"), + }, + Model{ + Id: types.StringValue("pid,iid,name"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + Name: types.StringValue("name"), + MetricsPath: types.StringNull(), + Scheme: types.StringNull(), + ScrapeInterval: types.StringNull(), + ScrapeTimeout: types.StringNull(), + SAML2: types.ObjectNull(saml2Types), + BasicAuth: types.ObjectNull(basicAuthTypes), + Targets: types.ListNull(types.ObjectType{AttrTypes: targetTypes}), + }, + true, + }, + { + description: "values_ok", + input: &observability.Job{ + JobName: utils.Ptr("name"), + MetricsPath: utils.Ptr("/m"), + BasicAuth: &observability.BasicAuth{ + Password: utils.Ptr("p"), + Username: utils.Ptr("u"), + }, + Params: &map[string][]string{"saml2": {"disabled"}, "x": {"y", "z"}}, + Scheme: utils.Ptr("scheme"), + ScrapeInterval: utils.Ptr("1"), + ScrapeTimeout: utils.Ptr("2"), + SampleLimit: utils.Ptr(int64(17)), + StaticConfigs: &[]observability.StaticConfigs{ + { + Labels: &map[string]string{"k1": "v1"}, + Targets: &[]string{"url1"}, + }, + { + Labels: &map[string]string{"k2": "v2", "k3": "v3"}, + Targets: &[]string{"url1", "url3"}, + }, + { + Labels: nil, + Targets: &[]string{}, + }, + }, + }, + expected: Model{ + Id: types.StringValue("pid,iid,name"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + Name: types.StringValue("name"), + MetricsPath: types.StringValue("/m"), + Scheme: types.StringValue("scheme"), + ScrapeInterval: types.StringValue("1"), + ScrapeTimeout: types.StringValue("2"), + SampleLimit: types.Int64Value(17), + SAML2: types.ObjectValueMust(saml2Types, map[string]attr.Value{ + "enable_url_parameters": types.BoolValue(false), + }), + BasicAuth: types.ObjectValueMust(basicAuthTypes, map[string]attr.Value{ + "username": types.StringValue("u"), + "password": types.StringValue("p"), + }), + Targets: types.ListValueMust(types.ObjectType{AttrTypes: targetTypes}, []attr.Value{ + types.ObjectValueMust(targetTypes, map[string]attr.Value{ + "urls": types.ListValueMust(types.StringType, []attr.Value{types.StringValue("url1")}), + "labels": types.MapValueMust(types.StringType, map[string]attr.Value{ + "k1": types.StringValue("v1"), + }), + }), + types.ObjectValueMust(targetTypes, map[string]attr.Value{ + "urls": types.ListValueMust(types.StringType, []attr.Value{types.StringValue("url1"), types.StringValue("url3")}), + "labels": types.MapValueMust(types.StringType, map[string]attr.Value{ + "k2": types.StringValue("v2"), + "k3": types.StringValue("v3"), + }), + }), + types.ObjectValueMust(targetTypes, map[string]attr.Value{ + "urls": types.ListValueMust(types.StringType, []attr.Value{}), + "labels": types.MapNull(types.StringType), + }), + }), + }, + isValid: true, + }, + { + "response_nil_fail", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &observability.Job{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(context.Background(), tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputSAML2 *saml2Model + inputBasicAuth *basicAuthModel + inputTargets []targetModel + expected *observability.CreateScrapeConfigPayload + isValid bool + }{ + { + "basic_ok", + &Model{ + MetricsPath: types.StringValue("/metrics"), + }, + &saml2Model{}, + &basicAuthModel{}, + []targetModel{}, + &observability.CreateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.CreateScrapeConfigPayloadStaticConfigsInner{}, + Params: &map[string]any{"saml2": []string{"enabled"}}, + }, + true, + }, + { + "ok - false enable_url_parameters", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Name: types.StringValue("Name"), + }, + &saml2Model{ + EnableURLParameters: types.BoolValue(false), + }, + &basicAuthModel{}, + []targetModel{}, + &observability.CreateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + JobName: utils.Ptr("Name"), + Params: &map[string]any{"saml2": []string{"disabled"}}, + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.CreateScrapeConfigPayloadStaticConfigsInner{}, + }, + true, + }, + { + "ok - true enable_url_parameters", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Name: types.StringValue("Name"), + }, + &saml2Model{ + EnableURLParameters: types.BoolValue(true), + }, + &basicAuthModel{}, + []targetModel{}, + &observability.CreateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + JobName: utils.Ptr("Name"), + Params: &map[string]any{"saml2": []string{"enabled"}}, + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.CreateScrapeConfigPayloadStaticConfigsInner{}, + }, + true, + }, + { + "ok - with basic auth", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Name: types.StringValue("Name"), + }, + &saml2Model{}, + &basicAuthModel{ + Username: types.StringValue("u"), + Password: types.StringValue("p"), + }, + []targetModel{}, + &observability.CreateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + JobName: utils.Ptr("Name"), + BasicAuth: &observability.CreateScrapeConfigPayloadBasicAuth{ + Username: utils.Ptr("u"), + Password: utils.Ptr("p"), + }, + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.CreateScrapeConfigPayloadStaticConfigsInner{}, + Params: &map[string]any{"saml2": []string{"enabled"}}, + }, + true, + }, + { + "ok - with targets", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Name: types.StringValue("Name"), + }, + &saml2Model{}, + &basicAuthModel{}, + []targetModel{ + { + URLs: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("url1")}), + Labels: types.MapValueMust(types.StringType, map[string]attr.Value{"k1": types.StringValue("v1")}), + }, + { + URLs: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("url1"), types.StringValue("url3")}), + Labels: types.MapValueMust(types.StringType, map[string]attr.Value{"k2": types.StringValue("v2"), "k3": types.StringValue("v3")}), + }, + { + URLs: types.ListValueMust(types.StringType, []attr.Value{}), + Labels: types.MapNull(types.StringType), + }, + }, + &observability.CreateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + JobName: utils.Ptr("Name"), + StaticConfigs: &[]observability.CreateScrapeConfigPayloadStaticConfigsInner{ + { + Targets: &[]string{"url1"}, + Labels: &map[string]interface{}{"k1": "v1"}, + }, + { + Targets: &[]string{"url1", "url3"}, + Labels: &map[string]interface{}{"k2": "v2", "k3": "v3"}, + }, + { + Targets: &[]string{}, + Labels: &map[string]interface{}{}, + }, + }, + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + Params: &map[string]any{"saml2": []string{"enabled"}}, + }, + true, + }, + { + "nil_model", + nil, + nil, + nil, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(context.Background(), tt.input, tt.inputSAML2, tt.inputBasicAuth, tt.inputTargets) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputSAML2 *saml2Model + basicAuthModel *basicAuthModel + inputTargets []targetModel + expected *observability.UpdateScrapeConfigPayload + isValid bool + }{ + { + "basic_ok", + &Model{ + MetricsPath: types.StringValue("/metrics"), + }, + &saml2Model{}, + &basicAuthModel{}, + []targetModel{}, + &observability.UpdateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.UpdateScrapeConfigPayloadStaticConfigsInner{}, + }, + true, + }, + { + "ok - true enable_url_parameters", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Scheme: types.StringValue("http"), + }, + &saml2Model{ + EnableURLParameters: types.BoolValue(true), + }, + &basicAuthModel{}, + []targetModel{}, + &observability.UpdateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + // Defaults + Scheme: utils.Ptr("http"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.UpdateScrapeConfigPayloadStaticConfigsInner{}, + Params: &map[string]any{"saml2": []string{"enabled"}}, + }, + true, + }, + { + "ok - false enable_url_parameters", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Scheme: types.StringValue("http"), + }, + &saml2Model{ + EnableURLParameters: types.BoolValue(false), + }, + &basicAuthModel{}, + []targetModel{}, + &observability.UpdateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + // Defaults + Scheme: utils.Ptr("http"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.UpdateScrapeConfigPayloadStaticConfigsInner{}, + Params: &map[string]any{"saml2": []string{"disabled"}}, + }, + true, + }, + { + "ok - with basic auth", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Name: types.StringValue("Name"), + }, + &saml2Model{}, + &basicAuthModel{ + Username: types.StringValue("u"), + Password: types.StringValue("p"), + }, + []targetModel{}, + &observability.UpdateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + BasicAuth: &observability.CreateScrapeConfigPayloadBasicAuth{ + Username: utils.Ptr("u"), + Password: utils.Ptr("p"), + }, + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + StaticConfigs: &[]observability.UpdateScrapeConfigPayloadStaticConfigsInner{}, + }, + true, + }, + { + "ok - with targets", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Name: types.StringValue("Name"), + }, + &saml2Model{}, + &basicAuthModel{}, + []targetModel{ + { + URLs: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("url1")}), + Labels: types.MapValueMust(types.StringType, map[string]attr.Value{"k1": types.StringValue("v1")}), + }, + { + URLs: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("url1"), types.StringValue("url3")}), + Labels: types.MapValueMust(types.StringType, map[string]attr.Value{"k2": types.StringValue("v2"), "k3": types.StringValue("v3")}), + }, + { + URLs: types.ListValueMust(types.StringType, []attr.Value{}), + Labels: types.MapNull(types.StringType), + }, + }, + &observability.UpdateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + StaticConfigs: &[]observability.UpdateScrapeConfigPayloadStaticConfigsInner{ + { + Targets: &[]string{"url1"}, + Labels: &map[string]interface{}{"k1": "v1"}, + }, + { + Targets: &[]string{"url1", "url3"}, + Labels: &map[string]interface{}{"k2": "v2", "k3": "v3"}, + }, + { + Targets: &[]string{}, + Labels: &map[string]interface{}{}, + }, + }, + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + SampleLimit: utils.Ptr(float64(5000)), + }, + true, + }, + { + "nil_model", + nil, + nil, + nil, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(context.Background(), tt.input, tt.inputSAML2, tt.basicAuthModel, tt.inputTargets) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/internal/testutil/testutil.go b/stackit/internal/testutil/testutil.go index b6e35f0d..ed04fc37 100644 --- a/stackit/internal/testutil/testutil.go +++ b/stackit/internal/testutil/testutil.go @@ -52,6 +52,7 @@ var ( AuthorizationCustomEndpoint = os.Getenv("TF_ACC_authorization_custom_endpoint") MongoDBFlexCustomEndpoint = os.Getenv("TF_ACC_MONGODBFLEX_CUSTOM_ENDPOINT") OpenSearchCustomEndpoint = os.Getenv("TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT") + ObservabilityCustomEndpoint = os.Getenv("TF_ACC_OBSERVABILITY_CUSTOM_ENDPOINT") ObjectStorageCustomEndpoint = os.Getenv("TF_ACC_OBJECTSTORAGE_CUSTOM_ENDPOINT") PostgreSQLCustomEndpoint = os.Getenv("TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT") PostgresFlexCustomEndpoint = os.Getenv("TF_ACC_POSTGRESFLEX_CUSTOM_ENDPOINT") @@ -87,6 +88,22 @@ func ArgusProviderConfig() string { ) } +// Provider config helper functions + +func ObservabilityProviderConfig() string { + if ObservabilityCustomEndpoint == "" { + return `provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + observability_custom_endpoint = "%s" + }`, + ObservabilityCustomEndpoint, + ) +} + func DnsProviderConfig() string { if DnsCustomEndpoint == "" { return `provider "stackit" {}` diff --git a/stackit/provider.go b/stackit/provider.go index 7d49c400..a862b644 100644 --- a/stackit/provider.go +++ b/stackit/provider.go @@ -29,6 +29,9 @@ import ( objectStorageBucket "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/objectstorage/bucket" objecStorageCredential "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/objectstorage/credential" objecStorageCredentialsGroup "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/objectstorage/credentialsgroup" + observabilityCredential "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/observability/credential" + observabilityInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/observability/instance" + observabilityScrapeConfig "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/observability/scrapeconfig" openSearchCredential "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/opensearch/credential" openSearchInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/opensearch/instance" postgresFlexDatabase "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/postgresflex/database" @@ -101,6 +104,7 @@ type providerModel struct { MariaDBCustomEndpoint types.String `tfsdk:"mariadb_custom_endpoint"` AuthorizationCustomEndpoint types.String `tfsdk:"authorization_custom_endpoint"` ObjectStorageCustomEndpoint types.String `tfsdk:"objectstorage_custom_endpoint"` + ObservabilityCustomEndpoint types.String `tfsdk:"observability_custom_endpoint"` OpenSearchCustomEndpoint types.String `tfsdk:"opensearch_custom_endpoint"` RedisCustomEndpoint types.String `tfsdk:"redis_custom_endpoint"` SecretsManagerCustomEndpoint types.String `tfsdk:"secretsmanager_custom_endpoint"` @@ -135,6 +139,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro "mariadb_custom_endpoint": "Custom endpoint for the MariaDB service", "authorization_custom_endpoint": "Custom endpoint for the Membership service", "objectstorage_custom_endpoint": "Custom endpoint for the Object Storage service", + "observability_custom_endpoint": "Custom endpoint for the Observability service", "opensearch_custom_endpoint": "Custom endpoint for the OpenSearch service", "postgresql_custom_endpoint": "Custom endpoint for the PostgreSQL service", "postgresflex_custom_endpoint": "Custom endpoint for the PostgresFlex service", @@ -185,8 +190,9 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro Description: descriptions["region"], }, "argus_custom_endpoint": schema.StringAttribute{ - Optional: true, - Description: descriptions["argus_custom_endpoint"], + Optional: true, + Description: descriptions["argus_custom_endpoint"], + DeprecationMessage: "Argus service has been deprecated and integration will be removed after February 26th 2025. Please use `observability_custom_endpoint` and `observability` resources instead, which offer the exact same functionality.", }, "dns_custom_endpoint": schema.StringAttribute{ Optional: true, @@ -232,6 +238,10 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro Optional: true, Description: descriptions["objectstorage_custom_endpoint"], }, + "observability_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["observability_custom_endpoint"], + }, "opensearch_custom_endpoint": schema.StringAttribute{ Optional: true, Description: descriptions["opensearch_custom_endpoint"], @@ -355,6 +365,9 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, if !(providerConfig.ObjectStorageCustomEndpoint.IsUnknown() || providerConfig.ObjectStorageCustomEndpoint.IsNull()) { providerData.ObjectStorageCustomEndpoint = providerConfig.ObjectStorageCustomEndpoint.ValueString() } + if !(providerConfig.ObservabilityCustomEndpoint.IsUnknown() || providerConfig.ObservabilityCustomEndpoint.IsNull()) { + providerData.ObservabilityCustomEndpoint = providerConfig.ObservabilityCustomEndpoint.ValueString() + } if !(providerConfig.OpenSearchCustomEndpoint.IsUnknown() || providerConfig.OpenSearchCustomEndpoint.IsNull()) { providerData.OpenSearchCustomEndpoint = providerConfig.OpenSearchCustomEndpoint.ValueString() } @@ -415,6 +428,8 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource objectStorageBucket.NewBucketDataSource, objecStorageCredentialsGroup.NewCredentialsGroupDataSource, objecStorageCredential.NewCredentialDataSource, + observabilityInstance.NewInstanceDataSource, + observabilityScrapeConfig.NewScrapeConfigDataSource, openSearchInstance.NewInstanceDataSource, openSearchCredential.NewCredentialDataSource, postgresFlexDatabase.NewDatabaseDataSource, @@ -461,6 +476,9 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource { objectStorageBucket.NewBucketResource, objecStorageCredentialsGroup.NewCredentialsGroupResource, objecStorageCredential.NewCredentialResource, + observabilityCredential.NewCredentialResource, + observabilityInstance.NewInstanceResource, + observabilityScrapeConfig.NewScrapeConfigResource, openSearchInstance.NewInstanceResource, openSearchCredential.NewCredentialResource, postgresFlexDatabase.NewDatabaseResource, diff --git a/templates/guides/vault_secrets_manager.md.tmpl b/templates/guides/vault_secrets_manager.md.tmpl index 19f8ad60..c6e8bf39 100644 --- a/templates/guides/vault_secrets_manager.md.tmpl +++ b/templates/guides/vault_secrets_manager.md.tmpl @@ -51,10 +51,10 @@ This guide outlines the process of utilizing the HashiCorp Vault provider alongs } ``` -5. **Define Terraform Resource (Example: Argus Monitoring Instance)** +5. **Define Terraform Resource (Example: Observability Monitoring Instance)** ```hcl - resource "stackit_argus_instance" "example" { + resource "stackit_observability_instance" "example" { project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" name = "example-instance" plan_name = "Monitoring-Medium-EU01" @@ -71,7 +71,7 @@ This guide outlines the process of utilizing the HashiCorp Vault provider alongs delete_all_versions = true data_json = jsonencode( { - grafana_password = stackit_argus_instance.example.grafana_initial_admin_password, + grafana_password = stackit_observability_instance.example.grafana_initial_admin_password, other_secret = ..., } ) @@ -80,4 +80,4 @@ This guide outlines the process of utilizing the HashiCorp Vault provider alongs ## Note -This example can be adapted for various resources within the provider as well as any other Secret the user wants to set in the Secrets Manager instance. Adapting this examples means replacing the Argus Monitoring Grafana password with the appropriate value. \ No newline at end of file +This example can be adapted for various resources within the provider as well as any other Secret the user wants to set in the Secrets Manager instance. Adapting this examples means replacing the Observability Monitoring Grafana password with the appropriate value. \ No newline at end of file