From e4c8a6fbf4d508ad3dedc82aeb171b03451a9320 Mon Sep 17 00:00:00 2001 From: vicentepinto98 Date: Thu, 7 Sep 2023 11:34:45 +0100 Subject: [PATCH] Initial commit --- .github/actions/build/action.yaml | 15 + .github/workflows/ci.yaml | 22 + .github/workflows/tf-acc-test.yaml | 33 + .gitignore | 10 + CONTRIBUTION.md | 2 + LICENSE.md | 201 +++ Makefile | 36 + README.md | 35 + docs/data-sources/argus_instance.md | 55 + docs/data-sources/argus_scrapeconfig.md | 66 + docs/data-sources/dns_record_set.md | 42 + docs/data-sources/dns_zone.md | 51 + docs/data-sources/logme_credentials.md | 42 + docs/data-sources/logme_instance.md | 49 + docs/data-sources/mariadb_credentials.md | 42 + docs/data-sources/mariadb_instance.md | 49 + docs/data-sources/opensearch_credentials.md | 42 + docs/data-sources/opensearch_instance.md | 49 + docs/data-sources/postgresflex_instance.md | 58 + docs/data-sources/postgresflex_user.md | 39 + docs/data-sources/postgresql_credentials.md | 42 + docs/data-sources/postgresql_instance.md | 54 + docs/data-sources/rabbitmq_credentials.md | 42 + docs/data-sources/rabbitmq_instance.md | 49 + docs/data-sources/redis_credentials.md | 42 + docs/data-sources/redis_instance.md | 49 + docs/data-sources/resourcemanager_project.md | 34 + docs/data-sources/ske_cluster.md | 119 ++ docs/data-sources/ske_project.md | 30 + docs/index.md | 40 + docs/resources/argus_credential.md | 27 + docs/resources/argus_instance.md | 59 + docs/resources/argus_scrapeconfig.md | 84 ++ docs/resources/dns_record_set.md | 48 + docs/resources/dns_zone.md | 60 + docs/resources/logme_credentials.md | 34 + docs/resources/logme_instance.md | 57 + docs/resources/mariadb_credentials.md | 41 + docs/resources/mariadb_instance.md | 57 + docs/resources/opensearch_credentials.md | 41 + docs/resources/opensearch_instance.md | 57 + docs/resources/postgresflex_instance.md | 73 + docs/resources/postgresflex_user.md | 40 + docs/resources/postgresql_credentials.md | 41 + docs/resources/postgresql_instance.md | 62 + docs/resources/rabbitmq_credentials.md | 41 + docs/resources/rabbitmq_instance.md | 57 + docs/resources/redis_credentials.md | 41 + docs/resources/redis_instance.md | 57 + docs/resources/resourcemanager_project.md | 43 + docs/resources/ske_cluster.md | 152 +++ docs/resources/ske_project.md | 30 + .../stackit_argus_instance/data-source.tf | 4 + .../stackit_argus_scrapeconfig/data-source.tf | 5 + .../stackit_dns_record_set/data-source.tf | 5 + .../stackit_dns_zone/data-source.tf | 4 + .../stackit_logme_credentials/data-source.tf | 5 + .../stackit_logme_instance/data-source.tf | 4 + .../data-source.tf | 5 + .../stackit_mariadb_instance/data-source.tf | 4 + .../data-source.tf | 5 + .../data-source.tf | 4 + .../data-source.tf | 4 + .../stackit_postgresflex_user/data-source.tf | 5 + .../data-source.tf | 5 + .../data-source.tf | 4 + .../data-source.tf | 5 + .../stackit_rabbitmq_instance/data-source.tf | 4 + .../stackit_redis_credentials/data-source.tf | 5 + .../stackit_redis_instance/data-source.tf | 4 + .../data-source.tf | 4 + .../stackit_ske_cluster/data-source.tf | 4 + .../stackit_ske_project/data-source.tf | 3 + examples/provider/provider.tf | 3 + .../stackit_argus_instance/resource.tf | 5 + .../stackit_argus_scrapeconfig/resource.tf | 17 + .../stackit_dns_record_set/resource.tf | 8 + .../resources/stackit_dns_zone/resource.tf | 10 + .../stackit_logme_credentials /resource.tf | 4 + .../stackit_logme_instance/resource.tf | 9 + .../stackit_mariadb_credentials/resource.tf | 4 + .../stackit_mariadb_instance/resource.tf | 9 + .../resource.tf | 4 + .../stackit_opensearch_instance/resource.tf | 9 + .../stackit_postgresflex_instance/resource.tf | 16 + .../stackit_postgresflex_user/resource.tf | 6 + .../resource.tf | 4 + .../stackit_postgresql_instance/resource.tf | 9 + .../stackit_rabbitmq_credentials/resource.tf | 4 + .../stackit_rabbitmq_instance/resource.tf | 9 + .../stackit_redis_credentials/resource.tf | 4 + .../stackit_redis_instance/resource.tf | 9 + .../resource.tf | 9 + .../resources/stackit_ske_cluster/resource.tf | 21 + .../resources/stackit_ske_project/resource.tf | 3 + go.mod | 82 ++ go.sum | 222 ++++ golang-ci.yaml | 100 ++ main.go | 23 + scripts/lint-golangci-lint.sh | 18 + scripts/project.sh | 23 + scripts/tfplugindocs.sh | 17 + stackit/conversion/conversion.go | 75 ++ stackit/core/core.go | 56 + stackit/provider.go | 295 +++++ stackit/services/argus/argus_acc_test.go | 353 +++++ stackit/services/argus/credential/resource.go | 236 ++++ .../argus/credential/resource_test.go | 77 ++ stackit/services/argus/instance/datasource.go | 229 ++++ stackit/services/argus/instance/resource.go | 558 ++++++++ .../services/argus/instance/resource_test.go | 250 ++++ .../services/argus/scrapeconfig/datasource.go | 221 ++++ .../services/argus/scrapeconfig/resource.go | 676 ++++++++++ .../argus/scrapeconfig/resource_test.go | 272 ++++ stackit/services/dns/dns_acc_test.go | 557 ++++++++ stackit/services/dns/recordset/datasource.go | 174 +++ stackit/services/dns/recordset/resource.go | 497 +++++++ .../services/dns/recordset/resource_test.go | 307 +++++ stackit/services/dns/zone/datasource.go | 211 +++ stackit/services/dns/zone/resource.go | 608 +++++++++ stackit/services/dns/zone/resource_test.go | 351 +++++ .../services/logme/credentials/datasource.go | 178 +++ .../services/logme/credentials/resource.go | 371 ++++++ .../logme/credentials/resource_test.go | 156 +++ stackit/services/logme/instance/datasource.go | 181 +++ stackit/services/logme/instance/resource.go | 642 +++++++++ .../services/logme/instance/resource_test.go | 304 +++++ stackit/services/logme/logme_acc_test.go | 241 ++++ .../mariadb/credentials/datasource.go | 178 +++ .../services/mariadb/credentials/resource.go | 371 ++++++ .../mariadb/credentials/resource_test.go | 156 +++ .../services/mariadb/instance/datasource.go | 181 +++ stackit/services/mariadb/instance/resource.go | 624 +++++++++ .../mariadb/instance/resource_test.go | 304 +++++ stackit/services/mariadb/mariadb_acc_test.go | 241 ++++ .../opensearch/credentials/datasource.go | 178 +++ .../opensearch/credentials/resource.go | 371 ++++++ .../opensearch/credentials/resource_test.go | 156 +++ .../opensearch/instance/datasource.go | 181 +++ .../services/opensearch/instance/resource.go | 623 +++++++++ .../opensearch/instance/resource_test.go | 304 +++++ .../opensearch/opensearch_acc_test.go | 263 ++++ .../postgresflex/instance/datasource.go | 205 +++ .../postgresflex/instance/resource.go | 703 ++++++++++ .../postgresflex/instance/resource_test.go | 509 +++++++ .../postgresflex/postgresflex_acc_test.go | 324 +++++ .../services/postgresflex/user/datasource.go | 168 +++ .../services/postgresflex/user/resource.go | 431 ++++++ .../postgresflex/user/resource_test.go | 359 +++++ .../postgresql/credentials/datasource.go | 178 +++ .../postgresql/credentials/resource.go | 371 ++++++ .../postgresql/credentials/resource_test.go | 156 +++ .../postgresql/instance/datasource.go | 198 +++ .../services/postgresql/instance/resource.go | 704 ++++++++++ .../postgresql/instance/resource_test.go | 435 ++++++ .../postgresql/postgresql_acc_test.go | 252 ++++ .../rabbitmq/credentials/datasource.go | 178 +++ .../services/rabbitmq/credentials/resource.go | 371 ++++++ .../rabbitmq/credentials/resource_test.go | 156 +++ .../services/rabbitmq/instance/datasource.go | 181 +++ .../services/rabbitmq/instance/resource.go | 637 +++++++++ .../rabbitmq/instance/resource_test.go | 304 +++++ .../services/rabbitmq/rabbitmq_acc_test.go | 286 ++++ .../services/redis/credentials/datasource.go | 178 +++ .../services/redis/credentials/resource.go | 371 ++++++ .../redis/credentials/resource_test.go | 156 +++ stackit/services/redis/instance/datasource.go | 181 +++ stackit/services/redis/instance/resource.go | 634 +++++++++ .../services/redis/instance/resource_test.go | 304 +++++ stackit/services/redis/redis_acc_test.go | 286 ++++ .../resourcemanager/project/datasource.go | 216 +++ .../resourcemanager/project/resource.go | 434 ++++++ .../resourcemanager/project/resource_test.go | 278 ++++ .../resourcemanager_acc_test.go | 170 +++ stackit/services/ske/cluster/datasource.go | 319 +++++ stackit/services/ske/cluster/resource.go | 1170 +++++++++++++++++ stackit/services/ske/cluster/resource_test.go | 635 +++++++++ stackit/services/ske/project/datasource.go | 115 ++ stackit/services/ske/project/resource.go | 210 +++ stackit/services/ske/ske_acc_test.go | 541 ++++++++ stackit/testutil/sdk_credentials_invalid.json | 1 + stackit/testutil/sdk_credentials_valid.json | 3 + stackit/testutil/testutil.go | 268 ++++ stackit/validate/validate.go | 85 ++ stackit/validate/validate_test.go | 210 +++ website/docs/index.html.markdown | 29 + 186 files changed, 29501 insertions(+) create mode 100644 .github/actions/build/action.yaml create mode 100644 .github/workflows/ci.yaml create mode 100644 .github/workflows/tf-acc-test.yaml create mode 100644 .gitignore create mode 100644 CONTRIBUTION.md create mode 100644 LICENSE.md create mode 100644 Makefile create mode 100644 README.md create mode 100644 docs/data-sources/argus_instance.md create mode 100644 docs/data-sources/argus_scrapeconfig.md create mode 100644 docs/data-sources/dns_record_set.md create mode 100644 docs/data-sources/dns_zone.md create mode 100644 docs/data-sources/logme_credentials.md create mode 100644 docs/data-sources/logme_instance.md create mode 100644 docs/data-sources/mariadb_credentials.md create mode 100644 docs/data-sources/mariadb_instance.md create mode 100644 docs/data-sources/opensearch_credentials.md create mode 100644 docs/data-sources/opensearch_instance.md create mode 100644 docs/data-sources/postgresflex_instance.md create mode 100644 docs/data-sources/postgresflex_user.md create mode 100644 docs/data-sources/postgresql_credentials.md create mode 100644 docs/data-sources/postgresql_instance.md create mode 100644 docs/data-sources/rabbitmq_credentials.md create mode 100644 docs/data-sources/rabbitmq_instance.md create mode 100644 docs/data-sources/redis_credentials.md create mode 100644 docs/data-sources/redis_instance.md create mode 100644 docs/data-sources/resourcemanager_project.md create mode 100644 docs/data-sources/ske_cluster.md create mode 100644 docs/data-sources/ske_project.md create mode 100644 docs/index.md create mode 100644 docs/resources/argus_credential.md create mode 100644 docs/resources/argus_instance.md create mode 100644 docs/resources/argus_scrapeconfig.md create mode 100644 docs/resources/dns_record_set.md create mode 100644 docs/resources/dns_zone.md create mode 100644 docs/resources/logme_credentials.md create mode 100644 docs/resources/logme_instance.md create mode 100644 docs/resources/mariadb_credentials.md create mode 100644 docs/resources/mariadb_instance.md create mode 100644 docs/resources/opensearch_credentials.md create mode 100644 docs/resources/opensearch_instance.md create mode 100644 docs/resources/postgresflex_instance.md create mode 100644 docs/resources/postgresflex_user.md create mode 100644 docs/resources/postgresql_credentials.md create mode 100644 docs/resources/postgresql_instance.md create mode 100644 docs/resources/rabbitmq_credentials.md create mode 100644 docs/resources/rabbitmq_instance.md create mode 100644 docs/resources/redis_credentials.md create mode 100644 docs/resources/redis_instance.md create mode 100644 docs/resources/resourcemanager_project.md create mode 100644 docs/resources/ske_cluster.md create mode 100644 docs/resources/ske_project.md create mode 100644 examples/data-sources/stackit_argus_instance/data-source.tf create mode 100644 examples/data-sources/stackit_argus_scrapeconfig/data-source.tf create mode 100644 examples/data-sources/stackit_dns_record_set/data-source.tf create mode 100644 examples/data-sources/stackit_dns_zone/data-source.tf create mode 100644 examples/data-sources/stackit_logme_credentials/data-source.tf create mode 100644 examples/data-sources/stackit_logme_instance/data-source.tf create mode 100644 examples/data-sources/stackit_mariadb_credentials/data-source.tf create mode 100644 examples/data-sources/stackit_mariadb_instance/data-source.tf create mode 100644 examples/data-sources/stackit_opensearch_credentials/data-source.tf create mode 100644 examples/data-sources/stackit_opensearch_instance/data-source.tf create mode 100644 examples/data-sources/stackit_postgresflex_instance/data-source.tf create mode 100644 examples/data-sources/stackit_postgresflex_user/data-source.tf create mode 100644 examples/data-sources/stackit_postgresql_credentials/data-source.tf create mode 100644 examples/data-sources/stackit_postgresql_instance/data-source.tf create mode 100644 examples/data-sources/stackit_rabbitmq_credentials/data-source.tf create mode 100644 examples/data-sources/stackit_rabbitmq_instance/data-source.tf create mode 100644 examples/data-sources/stackit_redis_credentials/data-source.tf create mode 100644 examples/data-sources/stackit_redis_instance/data-source.tf create mode 100644 examples/data-sources/stackit_resourcemanager_project/data-source.tf create mode 100644 examples/data-sources/stackit_ske_cluster/data-source.tf create mode 100644 examples/data-sources/stackit_ske_project/data-source.tf create mode 100644 examples/provider/provider.tf create mode 100644 examples/resources/stackit_argus_instance/resource.tf create mode 100644 examples/resources/stackit_argus_scrapeconfig/resource.tf create mode 100644 examples/resources/stackit_dns_record_set/resource.tf create mode 100644 examples/resources/stackit_dns_zone/resource.tf create mode 100644 examples/resources/stackit_logme_credentials /resource.tf create mode 100644 examples/resources/stackit_logme_instance/resource.tf create mode 100644 examples/resources/stackit_mariadb_credentials/resource.tf create mode 100644 examples/resources/stackit_mariadb_instance/resource.tf create mode 100644 examples/resources/stackit_opensearch_credentials/resource.tf create mode 100644 examples/resources/stackit_opensearch_instance/resource.tf create mode 100644 examples/resources/stackit_postgresflex_instance/resource.tf create mode 100644 examples/resources/stackit_postgresflex_user/resource.tf create mode 100644 examples/resources/stackit_postgresql_credentials/resource.tf create mode 100644 examples/resources/stackit_postgresql_instance/resource.tf create mode 100644 examples/resources/stackit_rabbitmq_credentials/resource.tf create mode 100644 examples/resources/stackit_rabbitmq_instance/resource.tf create mode 100644 examples/resources/stackit_redis_credentials/resource.tf create mode 100644 examples/resources/stackit_redis_instance/resource.tf create mode 100644 examples/resources/stackit_resourcemanager_project/resource.tf create mode 100644 examples/resources/stackit_ske_cluster/resource.tf create mode 100644 examples/resources/stackit_ske_project/resource.tf create mode 100644 go.mod create mode 100644 go.sum create mode 100644 golang-ci.yaml create mode 100644 main.go create mode 100755 scripts/lint-golangci-lint.sh create mode 100755 scripts/project.sh create mode 100755 scripts/tfplugindocs.sh create mode 100644 stackit/conversion/conversion.go create mode 100644 stackit/core/core.go create mode 100644 stackit/provider.go create mode 100644 stackit/services/argus/argus_acc_test.go create mode 100644 stackit/services/argus/credential/resource.go create mode 100644 stackit/services/argus/credential/resource_test.go create mode 100644 stackit/services/argus/instance/datasource.go create mode 100644 stackit/services/argus/instance/resource.go create mode 100644 stackit/services/argus/instance/resource_test.go create mode 100644 stackit/services/argus/scrapeconfig/datasource.go create mode 100644 stackit/services/argus/scrapeconfig/resource.go create mode 100644 stackit/services/argus/scrapeconfig/resource_test.go create mode 100644 stackit/services/dns/dns_acc_test.go create mode 100644 stackit/services/dns/recordset/datasource.go create mode 100644 stackit/services/dns/recordset/resource.go create mode 100644 stackit/services/dns/recordset/resource_test.go create mode 100644 stackit/services/dns/zone/datasource.go create mode 100644 stackit/services/dns/zone/resource.go create mode 100644 stackit/services/dns/zone/resource_test.go create mode 100644 stackit/services/logme/credentials/datasource.go create mode 100644 stackit/services/logme/credentials/resource.go create mode 100644 stackit/services/logme/credentials/resource_test.go create mode 100644 stackit/services/logme/instance/datasource.go create mode 100644 stackit/services/logme/instance/resource.go create mode 100644 stackit/services/logme/instance/resource_test.go create mode 100644 stackit/services/logme/logme_acc_test.go create mode 100644 stackit/services/mariadb/credentials/datasource.go create mode 100644 stackit/services/mariadb/credentials/resource.go create mode 100644 stackit/services/mariadb/credentials/resource_test.go create mode 100644 stackit/services/mariadb/instance/datasource.go create mode 100644 stackit/services/mariadb/instance/resource.go create mode 100644 stackit/services/mariadb/instance/resource_test.go create mode 100644 stackit/services/mariadb/mariadb_acc_test.go create mode 100644 stackit/services/opensearch/credentials/datasource.go create mode 100644 stackit/services/opensearch/credentials/resource.go create mode 100644 stackit/services/opensearch/credentials/resource_test.go create mode 100644 stackit/services/opensearch/instance/datasource.go create mode 100644 stackit/services/opensearch/instance/resource.go create mode 100644 stackit/services/opensearch/instance/resource_test.go create mode 100644 stackit/services/opensearch/opensearch_acc_test.go create mode 100644 stackit/services/postgresflex/instance/datasource.go create mode 100644 stackit/services/postgresflex/instance/resource.go create mode 100644 stackit/services/postgresflex/instance/resource_test.go create mode 100644 stackit/services/postgresflex/postgresflex_acc_test.go create mode 100644 stackit/services/postgresflex/user/datasource.go create mode 100644 stackit/services/postgresflex/user/resource.go create mode 100644 stackit/services/postgresflex/user/resource_test.go create mode 100644 stackit/services/postgresql/credentials/datasource.go create mode 100644 stackit/services/postgresql/credentials/resource.go create mode 100644 stackit/services/postgresql/credentials/resource_test.go create mode 100644 stackit/services/postgresql/instance/datasource.go create mode 100644 stackit/services/postgresql/instance/resource.go create mode 100644 stackit/services/postgresql/instance/resource_test.go create mode 100644 stackit/services/postgresql/postgresql_acc_test.go create mode 100644 stackit/services/rabbitmq/credentials/datasource.go create mode 100644 stackit/services/rabbitmq/credentials/resource.go create mode 100644 stackit/services/rabbitmq/credentials/resource_test.go create mode 100644 stackit/services/rabbitmq/instance/datasource.go create mode 100644 stackit/services/rabbitmq/instance/resource.go create mode 100644 stackit/services/rabbitmq/instance/resource_test.go create mode 100644 stackit/services/rabbitmq/rabbitmq_acc_test.go create mode 100644 stackit/services/redis/credentials/datasource.go create mode 100644 stackit/services/redis/credentials/resource.go create mode 100644 stackit/services/redis/credentials/resource_test.go create mode 100644 stackit/services/redis/instance/datasource.go create mode 100644 stackit/services/redis/instance/resource.go create mode 100644 stackit/services/redis/instance/resource_test.go create mode 100644 stackit/services/redis/redis_acc_test.go create mode 100644 stackit/services/resourcemanager/project/datasource.go create mode 100644 stackit/services/resourcemanager/project/resource.go create mode 100644 stackit/services/resourcemanager/project/resource_test.go create mode 100644 stackit/services/resourcemanager/resourcemanager_acc_test.go create mode 100644 stackit/services/ske/cluster/datasource.go create mode 100644 stackit/services/ske/cluster/resource.go create mode 100644 stackit/services/ske/cluster/resource_test.go create mode 100644 stackit/services/ske/project/datasource.go create mode 100644 stackit/services/ske/project/resource.go create mode 100644 stackit/services/ske/ske_acc_test.go create mode 100644 stackit/testutil/sdk_credentials_invalid.json create mode 100644 stackit/testutil/sdk_credentials_valid.json create mode 100644 stackit/testutil/testutil.go create mode 100644 stackit/validate/validate.go create mode 100644 stackit/validate/validate_test.go create mode 100644 website/docs/index.html.markdown diff --git a/.github/actions/build/action.yaml b/.github/actions/build/action.yaml new file mode 100644 index 00000000..ae8ebf3a --- /dev/null +++ b/.github/actions/build/action.yaml @@ -0,0 +1,15 @@ +name: Build +inputs: + go-version: + description: "Go version to install" + required: true +runs: + using: "composite" + steps: + - name: Install Go ${{ inputs.go-version }} + uses: actions/setup-go@v4 + with: + go-version: ${{ inputs.go-version }} + - name: Install project tools and dependencies + shell: bash + run: make project-tools \ No newline at end of file diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..a6902a65 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,22 @@ +name: CI Workflow + +on: [pull_request, workflow_dispatch] + +env: + GO_VERSION: '1.20' + +jobs: + main: + name: Main + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Build + uses: ./.github/actions/build + with: + go-version: ${{ env.GO_VERSION }} + - name: Lint + run: make lint + - name: Test + run: make test \ No newline at end of file diff --git a/.github/workflows/tf-acc-test.yaml b/.github/workflows/tf-acc-test.yaml new file mode 100644 index 00000000..c419276f --- /dev/null +++ b/.github/workflows/tf-acc-test.yaml @@ -0,0 +1,33 @@ +name: TF Acceptance Tests + +on: + push: + branches: + - master + workflow_dispatch: + +jobs: + main: + name: Main + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Install project tools and dependencies + run: make project-tools + - name: Run tests + run: make test-acceptance-tf TF_ACC_PROJECT_ID=$${{ secrets.TF_ACC_PROJECT_ID }} + env: + TF_ACC_ARGUS_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_ARGUS_CUSTOM_ENDPOINT }} + TF_ACC_DNS_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_DNS_CUSTOM_ENDPOINT }} + TF_ACC_LOGME_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_LOGME_CUSTOM_ENDPOINT }} + TF_ACC_MARIADB_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_MARIADB_CUSTOM_ENDPOINT }} + TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT }} + TF_ACC_POSTGRESFLEX_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_POSTGRESFLEX_CUSTOM_ENDPOINT }} + TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT }} + TF_ACC_RABBITMQ_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_RABBITMQ_CUSTOM_ENDPOINT }} + TF_ACC_REDIS_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_REDIS_CUSTOM_ENDPOINT }} + TF_ACC_RESOURCEMANAGER_CUSTOM_ENDPOINT: ${{ secrets.TF_ACC_RESOURCEMANAGER_CUSTOM_ENDPOINT }} + TF_ACC_SERVICE_ACCOUNT_TOKEN: ${{ secrets.TF_ACC_SERVICE_ACCOUNT_TOKEN }} + TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_EMAIL: ${{ secrets.TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_EMAIL }} + TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN: ${{ secrets.TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN }} \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..de462cea --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +# Binaries +bin/ + +## IDE +*.idea/ +*.vscode/ + +# Terraform +**/.terraform +**/terraform.tfstate** diff --git a/CONTRIBUTION.md b/CONTRIBUTION.md new file mode 100644 index 00000000..00138aa6 --- /dev/null +++ b/CONTRIBUTION.md @@ -0,0 +1,2 @@ +## Contribute +Your contribution is welcome! Please create a pull request (PR). The STACKIT Developer Tools team will review it. A more detailed contribution guideline is planned to come. \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..5820843d --- /dev/null +++ b/Makefile @@ -0,0 +1,36 @@ +ROOT_DIR ?= $(shell git rev-parse --show-toplevel) +SCRIPTS_BASE ?= $(ROOT_DIR)/scripts + +# SETUP AND TOOL INITIALIZATION TASKS +project-help: + @$(SCRIPTS_BASE)/project.sh help + +project-tools: + @$(SCRIPTS_BASE)/project.sh tools + +# LINT +lint-golangci-lint: + @echo "Linting with golangci-lint" + @$(SCRIPTS_BASE)/lint-golangci-lint.sh + +lint-tf: + @echo "Linting examples" + @terraform fmt -check -diff -recursive examples + +lint: lint-golangci-lint lint-tf + +# DOCUMENTATION GENERATION +generate-docs: + @echo "Generating documentation with tfplugindocs" + @$(SCRIPTS_BASE)/tfplugindocs.sh + +# TEST +test: + @echo "Running tests for the terraform provider" + @cd $(ROOT_DIR)/stackit && go test ./... -count=1 && cd $(ROOT_DIR) + +test-acceptance-tf: + @if [ -z $(TF_ACC_PROJECT_ID) ]; then echo "Input TF_ACC_PROJECT_ID missing"; exit 1; fi + @echo "Running acceptance tests for the terraform provider" + @cd $(ROOT_DIR)/stackit && TF_ACC=1 TF_ACC_PROJECT_ID=$(TF_ACC_PROJECT_ID) go test ./... -count=1 -timeout=30m && cd $(ROOT_DIR) + diff --git a/README.md b/README.md new file mode 100644 index 00000000..ac801096 --- /dev/null +++ b/README.md @@ -0,0 +1,35 @@ +# Introduction + +This project is the official Terraform provider for STACKIT. + +# Getting Started + +Check one of the examples in the [examples](examples/) folder. + +# Authentication + +Currently, only the *token flow* is supported. The Terraform provider will first try to find a token in the `STACKIT_SERVICE_ACCOUNT_TOKEN` env var. If not present, it will check the credentials file located in the path defined by the `STACKIT_CREDENTIALS_PATH` env var, if specified, or in `$HOME/.stackit/credentials.json` as a fallback. If the token is found, all the requests are authenticated using that token. + +## Acceptance tests + +Terraform acceptance tests are run using the command `make test-acceptance-tf`. For all services, +- The env var `TF_ACC_PROJECT_ID` must be set with the ID of the STACKIT test project to test it. +- Authentication is set as usual. +- Optionally, the env var `TF_ACC_XXXXXX_CUSTOM_ENDPOINT` (where `XXXXXX` is the uppercase name of the service) can be set to use endpoints other than the default value. + +Additionally, for the Resource Manager service, +- A service account with permissions to create and delete projects is required. +- The env var `TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_EMAIL` must be set as the email of the service account. +- The env var `TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN` must be set as a valid token of the service account. Can also be set in the credentials file used by authentication (see [Authentication](#authentication) for more details) +- The env var `TF_ACC_PROJECT_ID` is ignored. + +**WARNING:** Acceptance tests will create real resources, which may incur in costs. + +## Reporting issues +If you encounter any issues or have suggestions for improvements, please open an issue in the repository. + +## Contribute +Your contribution is welcome! Please create a pull request (PR). The STACKIT Developer Tools team will review it. A more detailed contribution guideline is planned to come. + +## License +Apache 2.0 \ No newline at end of file diff --git a/docs/data-sources/argus_instance.md b/docs/data-sources/argus_instance.md new file mode 100644 index 00000000..7eab13df --- /dev/null +++ b/docs/data-sources/argus_instance.md @@ -0,0 +1,55 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_argus_instance Data Source - stackit" +subcategory: "" +description: |- + +--- + +# stackit_argus_instance (Data Source) + + + +## Example Usage + +```terraform +data "stackit_argus_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) The Argus instance ID. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `alerting_url` (String) Specifies Alerting URL. +- `dashboard_url` (String) Specifies Argus instance dashboard URL. +- `grafana_initial_admin_password` (String, Sensitive) Specifies an initial Grafana admin password. +- `grafana_initial_admin_user` (String) Specifies an initial Grafana admin username. +- `grafana_public_read_access` (Boolean) If true, anyone can access Grafana dashboards without logging in. +- `grafana_url` (String) Specifies Grafana URL. +- `id` (String) Terraform's internal resource ID. +- `is_updatable` (Boolean) Specifies if the instance can be updated. +- `jaeger_traces_url` (String) +- `jaeger_ui_url` (String) +- `logs_push_url` (String) Specifies URL for pushing logs. +- `logs_url` (String) Specifies Logs URL. +- `metrics_push_url` (String) Specifies URL for pushing metrics. +- `metrics_retention_days` (Number) Specifies for how many days the raw metrics are kept. +- `metrics_retention_days_1h_downsampling` (Number) Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled). +- `metrics_retention_days_5m_downsampling` (Number) Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled). +- `metrics_url` (String) Specifies metrics URL. +- `name` (String) The name of the Argus instance. +- `otlp_traces_url` (String) +- `parameters` (Map of String) Additional parameters. +- `plan_id` (String) The Argus plan ID. +- `plan_name` (String) Specifies the Argus plan. E.g. `Monitoring-Medium-EU01`. +- `targets_url` (String) Specifies Targets URL. +- `zipkin_spans_url` (String) diff --git a/docs/data-sources/argus_scrapeconfig.md b/docs/data-sources/argus_scrapeconfig.md new file mode 100644 index 00000000..db9e0455 --- /dev/null +++ b/docs/data-sources/argus_scrapeconfig.md @@ -0,0 +1,66 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_argus_scrapeconfig Data Source - stackit" +subcategory: "" +description: |- + +--- + +# stackit_argus_scrapeconfig (Data Source) + + + +## Example Usage + +```terraform +data "stackit_argus_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + job_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) Argus instance ID to which the scraping job is associated. +- `name` (String) Specifies the name of the scraping job +- `project_id` (String) STACKIT project ID to which the scraping job is associated. + +### Read-Only + +- `basic_auth` (Attributes) A basic authentication block. (see [below for nested schema](#nestedatt--basic_auth)) +- `id` (String) Terraform's internal resource ID. +- `metrics_path` (String) Specifies the job scraping url path. +- `saml2` (Attributes) A SAML2 configuration block (see [below for nested schema](#nestedatt--saml2)) +- `scheme` (String) Specifies the http scheme. +- `scrape_interval` (String) Specifies the scrape interval as duration string. +- `scrape_timeout` (String) Specifies the scrape timeout as duration string. +- `targets` (Attributes List) The targets list (specified by the static config). (see [below for nested schema](#nestedatt--targets)) + + +### Nested Schema for `basic_auth` + +Read-Only: + +- `password` (String, Sensitive) Specifies basic auth password. +- `username` (String) Specifies basic auth username. + + + +### Nested Schema for `saml2` + +Read-Only: + +- `enable_url_parameters` (Boolean) Are URL parameters be enabled? + + + +### Nested Schema for `targets` + +Read-Only: + +- `labels` (Map of String) Specifies labels. +- `urls` (List of String) Specifies target URLs. diff --git a/docs/data-sources/dns_record_set.md b/docs/data-sources/dns_record_set.md new file mode 100644 index 00000000..3474f04b --- /dev/null +++ b/docs/data-sources/dns_record_set.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_dns_record_set Data Source - stackit" +subcategory: "" +description: |- + DNS Record Set Resource schema. +--- + +# stackit_dns_record_set (Data Source) + +DNS Record Set Resource schema. + +## Example Usage + +```terraform +data "stackit_dns_record_set" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + zone_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + record_set_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `project_id` (String) STACKIT project ID to which the dns record set is associated. +- `record_set_id` (String) The rr set id. +- `zone_id` (String) The zone ID to which is dns record set is associated. + +### Read-Only + +- `active` (Boolean) Specifies if the record set is active or not. +- `comment` (String) Comment. +- `error` (String) Error shows error in case create/update/delete failed. +- `id` (String) Terraform's internal resource ID. +- `name` (String) Name of the record which should be a valid domain according to rfc1035 Section 2.3.4. E.g. `example.com` +- `records` (List of String) Records. +- `state` (String) Record set state. +- `ttl` (Number) Time to live. E.g. 3600 +- `type` (String) The record set type. E.g. `A` or `CNAME` diff --git a/docs/data-sources/dns_zone.md b/docs/data-sources/dns_zone.md new file mode 100644 index 00000000..cf58ab5e --- /dev/null +++ b/docs/data-sources/dns_zone.md @@ -0,0 +1,51 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_dns_zone Data Source - stackit" +subcategory: "" +description: |- + DNS Zone resource schema. +--- + +# stackit_dns_zone (Data Source) + +DNS Zone resource schema. + +## Example Usage + +```terraform +data "stackit_dns_zone" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + zone_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `project_id` (String) STACKIT project ID to which the dns zone is associated. +- `zone_id` (String) The zone ID. + +### Read-Only + +- `acl` (String) The access control list. +- `active` (Boolean) +- `contact_email` (String) A contact e-mail for the zone. +- `default_ttl` (Number) Default time to live. +- `description` (String) Description of the zone. +- `dns_name` (String) The zone name. E.g. `example.com` +- `expire_time` (Number) Expire time. +- `id` (String) Terraform's internal resource ID. +- `is_reverse_zone` (Boolean) Specifies, if the zone is a reverse zone or not. +- `name` (String) The user given name of the zone. +- `negative_cache` (Number) Negative caching. +- `primaries` (List of String) Primary name server for secondary zone. +- `primary_name_server` (String) Primary name server. FQDN. +- `record_count` (Number) Record count how many records are in the zone. +- `refresh_time` (Number) Refresh time. +- `retry_time` (Number) Retry time. +- `serial_number` (Number) Serial number. +- `state` (String) Zone state. +- `type` (String) Zone type. +- `visibility` (String) Visibility of the zone. diff --git a/docs/data-sources/logme_credentials.md b/docs/data-sources/logme_credentials.md new file mode 100644 index 00000000..182c83bd --- /dev/null +++ b/docs/data-sources/logme_credentials.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_logme_credentials Data Source - stackit" +subcategory: "" +description: |- + LogMe credentials data source schema. +--- + +# stackit_logme_credentials (Data Source) + +LogMe credentials data source schema. + +## Example Usage + +```terraform +data "stackit_logme_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `credentials_id` (String) The credentials ID. +- `instance_id` (String) ID of the LogMe instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/data-sources/logme_instance.md b/docs/data-sources/logme_instance.md new file mode 100644 index 00000000..1bc0287a --- /dev/null +++ b/docs/data-sources/logme_instance.md @@ -0,0 +1,49 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_logme_instance Data Source - stackit" +subcategory: "" +description: |- + LogMe instance data source schema. +--- + +# stackit_logme_instance (Data Source) + +LogMe instance data source schema. + +## Example Usage + +```terraform +data "stackit_logme_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the LogMe instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource identifier. +- `image_url` (String) +- `name` (String) Instance name. +- `organization_guid` (String) +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) +- `plan_id` (String) The selected plan ID. +- `plan_name` (String) The selected plan name. +- `version` (String) The service version. + + +### Nested Schema for `parameters` + +Read-Only: + +- `sgw_acl` (String) diff --git a/docs/data-sources/mariadb_credentials.md b/docs/data-sources/mariadb_credentials.md new file mode 100644 index 00000000..d8f56861 --- /dev/null +++ b/docs/data-sources/mariadb_credentials.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_mariadb_credentials Data Source - stackit" +subcategory: "" +description: |- + MariaDB credentials data source schema. +--- + +# stackit_mariadb_credentials (Data Source) + +MariaDB credentials data source schema. + +## Example Usage + +```terraform +data "stackit_mariadb_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `credentials_id` (String) The credentials ID. +- `instance_id` (String) ID of the MariaDB instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/data-sources/mariadb_instance.md b/docs/data-sources/mariadb_instance.md new file mode 100644 index 00000000..1f42fab7 --- /dev/null +++ b/docs/data-sources/mariadb_instance.md @@ -0,0 +1,49 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_mariadb_instance Data Source - stackit" +subcategory: "" +description: |- + MariaDB instance data source schema. +--- + +# stackit_mariadb_instance (Data Source) + +MariaDB instance data source schema. + +## Example Usage + +```terraform +data "stackit_mariadb_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the MariaDB instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource identifier. +- `image_url` (String) +- `name` (String) Instance name. +- `organization_guid` (String) +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) +- `plan_id` (String) The selected plan ID. +- `plan_name` (String) The selected plan name. +- `version` (String) The service version. + + +### Nested Schema for `parameters` + +Read-Only: + +- `sgw_acl` (String) diff --git a/docs/data-sources/opensearch_credentials.md b/docs/data-sources/opensearch_credentials.md new file mode 100644 index 00000000..5c1f4889 --- /dev/null +++ b/docs/data-sources/opensearch_credentials.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_opensearch_credentials Data Source - stackit" +subcategory: "" +description: |- + OpenSearch credentials data source schema. +--- + +# stackit_opensearch_credentials (Data Source) + +OpenSearch credentials data source schema. + +## Example Usage + +```terraform +data "stackit_opensearch_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `credentials_id` (String) The credentials ID. +- `instance_id` (String) ID of the OpenSearch instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/data-sources/opensearch_instance.md b/docs/data-sources/opensearch_instance.md new file mode 100644 index 00000000..b1690b93 --- /dev/null +++ b/docs/data-sources/opensearch_instance.md @@ -0,0 +1,49 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_opensearch_instance Data Source - stackit" +subcategory: "" +description: |- + OpenSearch instance data source schema. +--- + +# stackit_opensearch_instance (Data Source) + +OpenSearch instance data source schema. + +## Example Usage + +```terraform +data "stackit_opensearch_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the OpenSearch instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource identifier. +- `image_url` (String) +- `name` (String) Instance name. +- `organization_guid` (String) +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) +- `plan_id` (String) The selected plan ID. +- `plan_name` (String) The selected plan name. +- `version` (String) The service version. + + +### Nested Schema for `parameters` + +Read-Only: + +- `sgw_acl` (String) diff --git a/docs/data-sources/postgresflex_instance.md b/docs/data-sources/postgresflex_instance.md new file mode 100644 index 00000000..8c76fa66 --- /dev/null +++ b/docs/data-sources/postgresflex_instance.md @@ -0,0 +1,58 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresflex_instance Data Source - stackit" +subcategory: "" +description: |- + PostgresFlex instance data source schema. +--- + +# stackit_postgresflex_instance (Data Source) + +PostgresFlex instance data source schema. + +## Example Usage + +```terraform +data "stackit_postgresflex_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the PostgresFlex instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `acl` (List of String) The Access Control List (ACL) for the PostgresFlex instance. +- `backup_schedule` (String) +- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor)) +- `id` (String) Terraform's internal resource ID. +- `name` (String) Instance name. +- `replicas` (Number) +- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage)) +- `version` (String) + + +### Nested Schema for `flavor` + +Read-Only: + +- `cpu` (Number) +- `description` (String) +- `id` (String) +- `ram` (Number) + + + +### Nested Schema for `storage` + +Read-Only: + +- `class` (String) +- `size` (Number) diff --git a/docs/data-sources/postgresflex_user.md b/docs/data-sources/postgresflex_user.md new file mode 100644 index 00000000..27a43691 --- /dev/null +++ b/docs/data-sources/postgresflex_user.md @@ -0,0 +1,39 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresflex_user Data Source - stackit" +subcategory: "" +description: |- + PostgresFlex user data source schema. +--- + +# stackit_postgresflex_user (Data Source) + +PostgresFlex user data source schema. + +## Example Usage + +```terraform +data "stackit_postgresflex_user" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + user_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the PostgresFlex instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `user_id` (String) User ID. + +### Read-Only + +- `host` (String) +- `id` (String) Terraform's internal resource ID. +- `password` (String, Sensitive) +- `port` (Number) +- `roles` (Set of String) +- `username` (String) diff --git a/docs/data-sources/postgresql_credentials.md b/docs/data-sources/postgresql_credentials.md new file mode 100644 index 00000000..9a79846a --- /dev/null +++ b/docs/data-sources/postgresql_credentials.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresql_credentials Data Source - stackit" +subcategory: "" +description: |- + PostgreSQL credentials data source schema. +--- + +# stackit_postgresql_credentials (Data Source) + +PostgreSQL credentials data source schema. + +## Example Usage + +```terraform +data "stackit_postgresql_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `credentials_id` (String) The credentials ID. +- `instance_id` (String) ID of the PostgreSQL instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/data-sources/postgresql_instance.md b/docs/data-sources/postgresql_instance.md new file mode 100644 index 00000000..0baf6131 --- /dev/null +++ b/docs/data-sources/postgresql_instance.md @@ -0,0 +1,54 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresql_instance Data Source - stackit" +subcategory: "" +description: |- + PostgreSQL instance data source schema. +--- + +# stackit_postgresql_instance (Data Source) + +PostgreSQL instance data source schema. + +## Example Usage + +```terraform +data "stackit_postgresql_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the PostgreSQL instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource identifier. +- `image_url` (String) +- `name` (String) Instance name. +- `organization_guid` (String) +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) +- `plan_id` (String) The selected plan ID. +- `plan_name` (String) The selected plan name. +- `version` (String) The service version. + + +### Nested Schema for `parameters` + +Read-Only: + +- `enable_monitoring` (Boolean) +- `metrics_frequency` (Number) +- `metrics_prefix` (String) +- `monitoring_instance_id` (String) +- `plugins` (List of String) +- `sgw_acl` (String) diff --git a/docs/data-sources/rabbitmq_credentials.md b/docs/data-sources/rabbitmq_credentials.md new file mode 100644 index 00000000..bdd43585 --- /dev/null +++ b/docs/data-sources/rabbitmq_credentials.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_rabbitmq_credentials Data Source - stackit" +subcategory: "" +description: |- + RabbitMQ credentials data source schema. +--- + +# stackit_rabbitmq_credentials (Data Source) + +RabbitMQ credentials data source schema. + +## Example Usage + +```terraform +data "stackit_rabbitmq_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `credentials_id` (String) The credentials ID. +- `instance_id` (String) ID of the RabbitMQ instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/data-sources/rabbitmq_instance.md b/docs/data-sources/rabbitmq_instance.md new file mode 100644 index 00000000..ad49f8e5 --- /dev/null +++ b/docs/data-sources/rabbitmq_instance.md @@ -0,0 +1,49 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_rabbitmq_instance Data Source - stackit" +subcategory: "" +description: |- + RabbitMQ instance data source schema. +--- + +# stackit_rabbitmq_instance (Data Source) + +RabbitMQ instance data source schema. + +## Example Usage + +```terraform +data "stackit_rabbitmq_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the RabbitMQ instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource identifier. +- `image_url` (String) +- `name` (String) Instance name. +- `organization_guid` (String) +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) +- `plan_id` (String) The selected plan ID. +- `plan_name` (String) The selected plan name. +- `version` (String) The service version. + + +### Nested Schema for `parameters` + +Read-Only: + +- `sgw_acl` (String) diff --git a/docs/data-sources/redis_credentials.md b/docs/data-sources/redis_credentials.md new file mode 100644 index 00000000..650305e4 --- /dev/null +++ b/docs/data-sources/redis_credentials.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_redis_credentials Data Source - stackit" +subcategory: "" +description: |- + Redis credentials data source schema. +--- + +# stackit_redis_credentials (Data Source) + +Redis credentials data source schema. + +## Example Usage + +```terraform +data "stackit_redis_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `credentials_id` (String) The credentials ID. +- `instance_id` (String) ID of the Redis instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Read-Only + +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/data-sources/redis_instance.md b/docs/data-sources/redis_instance.md new file mode 100644 index 00000000..bc20bcae --- /dev/null +++ b/docs/data-sources/redis_instance.md @@ -0,0 +1,49 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_redis_instance Data Source - stackit" +subcategory: "" +description: |- + Redis instance data source schema. +--- + +# stackit_redis_instance (Data Source) + +Redis instance data source schema. + +## Example Usage + +```terraform +data "stackit_redis_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the Redis instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource identifier. +- `image_url` (String) +- `name` (String) Instance name. +- `organization_guid` (String) +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) +- `plan_id` (String) The selected plan ID. +- `plan_name` (String) The selected plan name. +- `version` (String) The service version. + + +### Nested Schema for `parameters` + +Read-Only: + +- `sgw_acl` (String) diff --git a/docs/data-sources/resourcemanager_project.md b/docs/data-sources/resourcemanager_project.md new file mode 100644 index 00000000..2e2d4348 --- /dev/null +++ b/docs/data-sources/resourcemanager_project.md @@ -0,0 +1,34 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_resourcemanager_project Data Source - stackit" +subcategory: "" +description: |- + Resource Manager project data source schema. +--- + +# stackit_resourcemanager_project (Data Source) + +Resource Manager project data source schema. + +## Example Usage + +```terraform +data "stackit_resourcemanager_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + container_id = "example-container-abc123" +} +``` + + +## Schema + +### Required + +- `container_id` (String) Project container ID. + +### Read-Only + +- `id` (String) Terraform's internal unique identifier of the project, equivalent to the container ID +- `labels` (Map of String) Labels are key-value string pairs which can be attached to a resource container. A label key must match the regex [A-ZÄÜÖa-zäüöß0-9_-]{1,64}. A label value must match the regex ^$|[A-ZÄÜÖa-zäüöß0-9_-]{1,64} +- `name` (String) Project name. +- `parent_container_id` (String) Parent container ID diff --git a/docs/data-sources/ske_cluster.md b/docs/data-sources/ske_cluster.md new file mode 100644 index 00000000..e650dfd2 --- /dev/null +++ b/docs/data-sources/ske_cluster.md @@ -0,0 +1,119 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_ske_cluster Data Source - stackit" +subcategory: "" +description: |- + SKE Cluster data source schema. +--- + +# stackit_ske_cluster (Data Source) + +SKE Cluster data source schema. + +## Example Usage + +```terraform +data "stackit_ske_cluster" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-name" +} +``` + + +## Schema + +### Required + +- `name` (String) The cluster name. +- `project_id` (String) STACKIT project ID to which the cluster is associated. + +### Read-Only + +- `allow_privileged_containers` (Boolean, Deprecated) DEPRECATED as of Kubernetes 1.25+ + Flag to specify if privileged mode for containers is enabled or not. +This should be used with care since it also disables a couple of other features like the use of some volume type (e.g. PVCs). +- `extensions` (Attributes) A single extensions block as defined below (see [below for nested schema](#nestedatt--extensions)) +- `hibernations` (Attributes List) One or more hibernation block as defined below. (see [below for nested schema](#nestedatt--hibernations)) +- `id` (String) Terraform's internal resource ID. +- `kube_config` (String, Sensitive) Kube config file used for connecting to the cluster +- `kubernetes_version` (String) Kubernetes version. +- `kubernetes_version_used` (String) Full Kubernetes version used. For example, if `1.22` was selected, this value may result to `1.22.15` +- `maintenance` (Attributes) A single maintenance block as defined below (see [below for nested schema](#nestedatt--maintenance)) +- `node_pools` (Attributes List) One or more `node_pool` block as defined below. (see [below for nested schema](#nestedatt--node_pools)) + + +### Nested Schema for `extensions` + +Read-Only: + +- `acl` (Attributes) Cluster access control configuration (see [below for nested schema](#nestedatt--extensions--acl)) +- `argus` (Attributes) A single argus block as defined below (see [below for nested schema](#nestedatt--extensions--argus)) + + +### Nested Schema for `extensions.acl` + +Read-Only: + +- `allowed_cidrs` (List of String) Specify a list of CIDRs to whitelist +- `enabled` (Boolean) Is ACL enabled? + + + +### Nested Schema for `extensions.argus` + +Read-Only: + +- `argus_instance_id` (String) Instance ID of argus +- `enabled` (Boolean) Flag to enable/disable argus extensions. + + + + +### Nested Schema for `hibernations` + +Read-Only: + +- `end` (String) End time of hibernation, in crontab syntax. +- `start` (String) Start time of cluster hibernation in crontab syntax. +- `timezone` (String) Timezone name corresponding to a file in the IANA Time Zone database. + + + +### Nested Schema for `maintenance` + +Read-Only: + +- `enable_kubernetes_version_updates` (Boolean) Flag to enable/disable auto-updates of the Kubernetes version. +- `enable_machine_image_version_updates` (Boolean) Flag to enable/disable auto-updates of the OS image version. +- `end` (String) Date time for maintenance window end. +- `start` (String) Date time for maintenance window start. + + + +### Nested Schema for `node_pools` + +Read-Only: + +- `availability_zones` (List of String) Specify a list of availability zones. +- `cri` (String) Specifies the container runtime. +- `labels` (Map of String) Labels to add to each node. +- `machine_type` (String) The machine type. +- `max_surge` (Number) The maximum number of nodes upgraded simultaneously. +- `max_unavailable` (Number) The maximum number of nodes unavailable during upgraded. +- `maximum` (Number) Maximum number of nodes in the pool. +- `minimum` (Number) Minimum number of nodes in the pool. +- `name` (String) Specifies the name of the node pool. +- `os_name` (String) The name of the OS image. +- `os_version` (String) The OS image version. +- `taints` (Attributes List) Specifies a taint list as defined below. (see [below for nested schema](#nestedatt--node_pools--taints)) +- `volume_size` (Number) The volume size in GB. +- `volume_type` (String) Specifies the volume type. + + +### Nested Schema for `node_pools.taints` + +Read-Only: + +- `effect` (String) The taint effect. +- `key` (String) Taint key to be applied to a node. +- `value` (String) Taint value corresponding to the taint key. diff --git a/docs/data-sources/ske_project.md b/docs/data-sources/ske_project.md new file mode 100644 index 00000000..f48cd109 --- /dev/null +++ b/docs/data-sources/ske_project.md @@ -0,0 +1,30 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_ske_project Data Source - stackit" +subcategory: "" +description: |- + +--- + +# stackit_ske_project (Data Source) + + + +## Example Usage + +```terraform +data "stackit_ske_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `project_id` (String) STACKIT Project ID in which the kubernetes project is enabled. + +### Read-Only + +- `id` (String) Terraform's internal resource ID. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..95d031ae --- /dev/null +++ b/docs/index.md @@ -0,0 +1,40 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit Provider" +subcategory: "" +description: |- + +--- + +# stackit Provider + + + +## Example Usage + +```terraform +provider "stackit" { + region = "eu01" +} +``` + + +## Schema + +### Optional + +- `argus_custom_endpoint` (String) Custom endpoint for the Argus service +- `credentials_path` (String) Path of JSON from where the credentials are read. Takes precedence over the env var `STACKIT_CREDENTIALS_PATH`. Default value is `~/.stackit/credentials.json`. +- `dns_custom_endpoint` (String) Custom endpoint for the DNS service +- `logme_custom_endpoint` (String) Custom endpoint for the LogMe service +- `mariadb_custom_endpoint` (String) Custom endpoint for the MariaDB service +- `opensearch_custom_endpoint` (String) Custom endpoint for the OpenSearch service +- `postgresflex_custom_endpoint` (String) Custom endpoint for the PostgresFlex service +- `postgresql_custom_endpoint` (String) Custom endpoint for the PostgreSQL service +- `rabbitmq_custom_endpoint` (String) Custom endpoint for the RabbitMQ service +- `redis_custom_endpoint` (String) +- `region` (String) Region will be used as the default location for regional services. Not all services require a region, some are global +- `resourcemanager_custom_endpoint` (String) Custom endpoint for the Resource Manager service +- `service_account_email` (String) Service account email. It can also be set using the environment variable STACKIT_SERVICE_ACCOUNT_EMAIL +- `service_account_token` (String) Token used for authentication. If set, the token flow will be used to authenticate all operations. +- `ske_custom_endpoint` (String) Custom endpoint for the Kubernetes Engine (SKE) service diff --git a/docs/resources/argus_credential.md b/docs/resources/argus_credential.md new file mode 100644 index 00000000..d3d47cac --- /dev/null +++ b/docs/resources/argus_credential.md @@ -0,0 +1,27 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_argus_credential Resource - stackit" +subcategory: "" +description: |- + +--- + +# stackit_argus_credential (Resource) + + + + + + +## Schema + +### Required + +- `instance_id` (String) The Argus Instance ID the credential belongs to. +- `project_id` (String) STACKIT project ID to which the credential is associated. + +### Read-Only + +- `id` (String) Terraform's internal resource ID. +- `password` (String, Sensitive) Credential password +- `username` (String) Credential username diff --git a/docs/resources/argus_instance.md b/docs/resources/argus_instance.md new file mode 100644 index 00000000..4abec4dd --- /dev/null +++ b/docs/resources/argus_instance.md @@ -0,0 +1,59 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_argus_instance Resource - stackit" +subcategory: "" +description: |- + +--- + +# stackit_argus_instance (Resource) + + + +## Example Usage + +```terraform +resource "stackit_argus_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + plan_name = "Monitoring-Medium-EU01" +} +``` + + +## Schema + +### Required + +- `name` (String) The name of the Argus instance. +- `plan_name` (String) Specifies the Argus plan. E.g. `Monitoring-Medium-EU01`. +- `project_id` (String) STACKIT project ID to which the instance is associated. + +### Optional + +- `parameters` (Map of String) Additional parameters. + +### Read-Only + +- `alerting_url` (String) Specifies Alerting URL. +- `dashboard_url` (String) Specifies Argus instance dashboard URL. +- `grafana_initial_admin_password` (String, Sensitive) Specifies an initial Grafana admin password. +- `grafana_initial_admin_user` (String) Specifies an initial Grafana admin username. +- `grafana_public_read_access` (Boolean) If true, anyone can access Grafana dashboards without logging in. +- `grafana_url` (String) Specifies Grafana URL. +- `id` (String) Terraform's internal resource ID. +- `instance_id` (String) The Argus instance ID. +- `is_updatable` (Boolean) Specifies if the instance can be updated. +- `jaeger_traces_url` (String) +- `jaeger_ui_url` (String) +- `logs_push_url` (String) Specifies URL for pushing logs. +- `logs_url` (String) Specifies Logs URL. +- `metrics_push_url` (String) Specifies URL for pushing metrics. +- `metrics_retention_days` (Number) Specifies for how many days the raw metrics are kept. +- `metrics_retention_days_1h_downsampling` (Number) Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled). +- `metrics_retention_days_5m_downsampling` (Number) Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled). +- `metrics_url` (String) Specifies metrics URL. +- `otlp_traces_url` (String) +- `plan_id` (String) The Argus plan ID. +- `targets_url` (String) Specifies Targets URL. +- `zipkin_spans_url` (String) diff --git a/docs/resources/argus_scrapeconfig.md b/docs/resources/argus_scrapeconfig.md new file mode 100644 index 00000000..82f07f83 --- /dev/null +++ b/docs/resources/argus_scrapeconfig.md @@ -0,0 +1,84 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_argus_scrapeconfig Resource - stackit" +subcategory: "" +description: |- + +--- + +# stackit_argus_scrapeconfig (Resource) + + + +## Example Usage + +```terraform +resource "stackit_argus_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-job" + metrics_path = "/my-metrics" + saml2 = { + enable_url_parameters = true + } + targets = [ + { + urls = ["url1", "urls2"] + labels = { + "url1" = "dev" + } + } + ] +} +``` + + +## Schema + +### Required + +- `instance_id` (String) Argus instance ID to which the scraping job is associated. +- `metrics_path` (String) Specifies the job scraping url path. E.g. `/metrics`. +- `name` (String) Specifies the name of the scraping job. +- `project_id` (String) STACKIT project ID to which the scraping job is associated. +- `targets` (Attributes List) The targets list (specified by the static config). (see [below for nested schema](#nestedatt--targets)) + +### Optional + +- `basic_auth` (Attributes) A basic authentication block. (see [below for nested schema](#nestedatt--basic_auth)) +- `saml2` (Attributes) A SAML2 configuration block. (see [below for nested schema](#nestedatt--saml2)) +- `scheme` (String) Specifies the http scheme. E.g. `https`. +- `scrape_interval` (String) Specifies the scrape interval as duration string. E.g. `5m`. +- `scrape_timeout` (String) Specifies the scrape timeout as duration string. E.g.`2m`. + +### Read-Only + +- `id` (String) Terraform's internal resource ID. + + +### Nested Schema for `targets` + +Required: + +- `urls` (List of String) Specifies target URLs. + +Optional: + +- `labels` (Map of String) Specifies labels. + + + +### Nested Schema for `basic_auth` + +Required: + +- `password` (String, Sensitive) Specifies basic auth password. +- `username` (String) Specifies basic auth username. + + + +### Nested Schema for `saml2` + +Optional: + +- `enable_url_parameters` (Boolean) Are URL parameters be enabled? diff --git a/docs/resources/dns_record_set.md b/docs/resources/dns_record_set.md new file mode 100644 index 00000000..8ac43e9d --- /dev/null +++ b/docs/resources/dns_record_set.md @@ -0,0 +1,48 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_dns_record_set Resource - stackit" +subcategory: "" +description: |- + DNS Record Set Resource schema. +--- + +# stackit_dns_record_set (Resource) + +DNS Record Set Resource schema. + +## Example Usage + +```terraform +resource "stackit_dns_record_set" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + zone_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-record-set.www.example-zone.com" + type = "A" + comment = "Example comment" + records = ["1.2.3.4"] +} +``` + + +## Schema + +### Required + +- `name` (String) Name of the record which should be a valid domain according to rfc1035 Section 2.3.4. E.g. `example.com` +- `project_id` (String) STACKIT project ID to which the dns record set is associated. +- `records` (List of String) Records. +- `zone_id` (String) The zone ID to which is dns record set is associated. + +### Optional + +- `active` (Boolean) Specifies if the record set is active or not. +- `comment` (String) Comment. +- `ttl` (Number) Time to live. E.g. 3600 +- `type` (String) The record set type. E.g. `A` or `CNAME` + +### Read-Only + +- `error` (String) Error shows error in case create/update/delete failed. +- `id` (String) Terraform's internal resource ID. +- `record_set_id` (String) The rr set id. +- `state` (String) Record set state. diff --git a/docs/resources/dns_zone.md b/docs/resources/dns_zone.md new file mode 100644 index 00000000..0d84f404 --- /dev/null +++ b/docs/resources/dns_zone.md @@ -0,0 +1,60 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_dns_zone Resource - stackit" +subcategory: "" +description: |- + DNS Zone resource schema. +--- + +# stackit_dns_zone (Resource) + +DNS Zone resource schema. + +## Example Usage + +```terraform +resource "stackit_dns_zone" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "Example zone" + dns_name = "www.example-zone.com" + contact_email = "aa@bb.ccc" + type = "primary" + acl = "192.168.0.0/24" + description = "Example description" + default_ttl = 1230 +} +``` + + +## Schema + +### Required + +- `dns_name` (String) The zone name. E.g. `example.com` +- `name` (String) The user given name of the zone. +- `project_id` (String) STACKIT project ID to which the dns zone is associated. + +### Optional + +- `acl` (String) The access control list. E.g. `0.0.0.0/0,::/0` +- `active` (Boolean) +- `contact_email` (String) A contact e-mail for the zone. +- `default_ttl` (Number) Default time to live. E.g. 3600. +- `description` (String) Description of the zone. +- `expire_time` (Number) Expire time. E.g. 1209600. +- `is_reverse_zone` (Boolean) Specifies, if the zone is a reverse zone or not. +- `negative_cache` (Number) Negative caching. E.g. 60 +- `primaries` (List of String) Primary name server for secondary zone. E.g. ["1.2.3.4"] +- `refresh_time` (Number) Refresh time. E.g. 3600 +- `retry_time` (Number) Retry time. E.g. 600 +- `type` (String) Zone type. E.g. `primary` + +### Read-Only + +- `id` (String) Terraform's internal resource ID. +- `primary_name_server` (String) Primary name server. FQDN. +- `record_count` (Number) Record count how many records are in the zone. +- `serial_number` (Number) Serial number. E.g. `2022111400`. +- `state` (String) Zone state. E.g. `CREATE_SUCCEEDED`. +- `visibility` (String) Visibility of the zone. E.g. `public`. +- `zone_id` (String) The zone ID. diff --git a/docs/resources/logme_credentials.md b/docs/resources/logme_credentials.md new file mode 100644 index 00000000..23b7e6e4 --- /dev/null +++ b/docs/resources/logme_credentials.md @@ -0,0 +1,34 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_logme_credentials Resource - stackit" +subcategory: "" +description: |- + LogMe credentials resource schema. +--- + +# stackit_logme_credentials (Resource) + +LogMe credentials resource schema. + + + + +## Schema + +### Required + +- `instance_id` (String) ID of the LogMe instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `credentials_id` (String) The credentials ID. +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/resources/logme_instance.md b/docs/resources/logme_instance.md new file mode 100644 index 00000000..c8eea5b4 --- /dev/null +++ b/docs/resources/logme_instance.md @@ -0,0 +1,57 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_logme_instance Resource - stackit" +subcategory: "" +description: |- + LogMe instance resource schema. +--- + +# stackit_logme_instance (Resource) + +LogMe instance resource schema. + +## Example Usage + +```terraform +resource "stackit_logme_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} +``` + + +## Schema + +### Required + +- `name` (String) Instance name. +- `plan_name` (String) The selected plan name. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `version` (String) The service version. + +### Optional + +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource ID. +- `image_url` (String) +- `instance_id` (String) ID of the LogMe instance. +- `organization_guid` (String) +- `plan_id` (String) The selected plan ID. + + +### Nested Schema for `parameters` + +Optional: + +- `sgw_acl` (String) diff --git a/docs/resources/mariadb_credentials.md b/docs/resources/mariadb_credentials.md new file mode 100644 index 00000000..692c8bad --- /dev/null +++ b/docs/resources/mariadb_credentials.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_mariadb_credentials Resource - stackit" +subcategory: "" +description: |- + MariaDB credentials resource schema. +--- + +# stackit_mariadb_credentials (Resource) + +MariaDB credentials resource schema. + +## Example Usage + +```terraform +resource "stackit_mariadb_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the MariaDB instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `credentials_id` (String) The credentials ID. +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/resources/mariadb_instance.md b/docs/resources/mariadb_instance.md new file mode 100644 index 00000000..14d082d8 --- /dev/null +++ b/docs/resources/mariadb_instance.md @@ -0,0 +1,57 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_mariadb_instance Resource - stackit" +subcategory: "" +description: |- + MariaDB instance resource schema. +--- + +# stackit_mariadb_instance (Resource) + +MariaDB instance resource schema. + +## Example Usage + +```terraform +resource "stackit_mariadb_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} +``` + + +## Schema + +### Required + +- `name` (String) Instance name. +- `plan_name` (String) The selected plan name. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `version` (String) The service version. + +### Optional + +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource ID. +- `image_url` (String) +- `instance_id` (String) ID of the MariaDB instance. +- `organization_guid` (String) +- `plan_id` (String) The selected plan ID. + + +### Nested Schema for `parameters` + +Optional: + +- `sgw_acl` (String) diff --git a/docs/resources/opensearch_credentials.md b/docs/resources/opensearch_credentials.md new file mode 100644 index 00000000..47750cf1 --- /dev/null +++ b/docs/resources/opensearch_credentials.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_opensearch_credentials Resource - stackit" +subcategory: "" +description: |- + OpenSearch credentials resource schema. +--- + +# stackit_opensearch_credentials (Resource) + +OpenSearch credentials resource schema. + +## Example Usage + +```terraform +resource "stackit_opensearch_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the OpenSearch instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `credentials_id` (String) The credentials ID. +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/resources/opensearch_instance.md b/docs/resources/opensearch_instance.md new file mode 100644 index 00000000..e30df486 --- /dev/null +++ b/docs/resources/opensearch_instance.md @@ -0,0 +1,57 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_opensearch_instance Resource - stackit" +subcategory: "" +description: |- + OpenSearch instance resource schema. +--- + +# stackit_opensearch_instance (Resource) + +OpenSearch instance resource schema. + +## Example Usage + +```terraform +resource "stackit_opensearch_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} +``` + + +## Schema + +### Required + +- `name` (String) Instance name. +- `plan_name` (String) The selected plan name. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `version` (String) The service version. + +### Optional + +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource ID. +- `image_url` (String) +- `instance_id` (String) ID of the OpenSearch instance. +- `organization_guid` (String) +- `plan_id` (String) The selected plan ID. + + +### Nested Schema for `parameters` + +Optional: + +- `sgw_acl` (String) diff --git a/docs/resources/postgresflex_instance.md b/docs/resources/postgresflex_instance.md new file mode 100644 index 00000000..f6db6bdd --- /dev/null +++ b/docs/resources/postgresflex_instance.md @@ -0,0 +1,73 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresflex_instance Resource - stackit" +subcategory: "" +description: |- + PostgresFlex instance resource schema. +--- + +# stackit_postgresflex_instance (Resource) + +PostgresFlex instance resource schema. + +## Example Usage + +```terraform +resource "stackit_postgresflex_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + acl = ["XXX.XXX.XXX.X/XX", "XX.XXX.XX.X/XX"] + backup_schedule = "00 00 * * *" + flavor = { + cpu = 2 + ram = 4 + } + replicas = 3 + storage = { + class = "class" + size = 5 + } + version = 14 +} +``` + + +## Schema + +### Required + +- `acl` (List of String) The Access Control List (ACL) for the PostgresFlex instance. +- `backup_schedule` (String) +- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor)) +- `name` (String) Instance name. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `replicas` (Number) +- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage)) +- `version` (String) + +### Read-Only + +- `id` (String) Terraform's internal resource ID. +- `instance_id` (String) ID of the PostgresFlex instance. + + +### Nested Schema for `flavor` + +Required: + +- `cpu` (Number) +- `ram` (Number) + +Read-Only: + +- `description` (String) +- `id` (String) + + + +### Nested Schema for `storage` + +Required: + +- `class` (String) +- `size` (Number) diff --git a/docs/resources/postgresflex_user.md b/docs/resources/postgresflex_user.md new file mode 100644 index 00000000..8f44bacb --- /dev/null +++ b/docs/resources/postgresflex_user.md @@ -0,0 +1,40 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresflex_user Resource - stackit" +subcategory: "" +description: |- + PostgresFlex user resource schema. +--- + +# stackit_postgresflex_user (Resource) + +PostgresFlex user resource schema. + +## Example Usage + +```terraform +resource "stackit_postgresflex_user" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + username = "username" + roles = ["role"] +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the PostgresFlex instance. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `roles` (Set of String) +- `username` (String) + +### Read-Only + +- `host` (String) +- `id` (String) Terraform's internal resource ID. +- `password` (String, Sensitive) +- `port` (Number) +- `user_id` (String) User ID. diff --git a/docs/resources/postgresql_credentials.md b/docs/resources/postgresql_credentials.md new file mode 100644 index 00000000..ebdfe839 --- /dev/null +++ b/docs/resources/postgresql_credentials.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresql_credentials Resource - stackit" +subcategory: "" +description: |- + PostgreSQL credentials resource schema. +--- + +# stackit_postgresql_credentials (Resource) + +PostgreSQL credentials resource schema. + +## Example Usage + +```terraform +resource "stackit_postgresql_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the PostgreSQL instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `credentials_id` (String) The credentials ID. +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/resources/postgresql_instance.md b/docs/resources/postgresql_instance.md new file mode 100644 index 00000000..c53d82a0 --- /dev/null +++ b/docs/resources/postgresql_instance.md @@ -0,0 +1,62 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_postgresql_instance Resource - stackit" +subcategory: "" +description: |- + PostgreSQL instance resource schema. +--- + +# stackit_postgresql_instance (Resource) + +PostgreSQL instance resource schema. + +## Example Usage + +```terraform +resource "stackit_postgresql_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} +``` + + +## Schema + +### Required + +- `name` (String) Instance name. +- `plan_name` (String) The selected plan name. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `version` (String) The service version. + +### Optional + +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource ID. +- `image_url` (String) +- `instance_id` (String) ID of the PostgreSQL instance. +- `organization_guid` (String) +- `plan_id` (String) The selected plan ID. + + +### Nested Schema for `parameters` + +Optional: + +- `enable_monitoring` (Boolean) +- `metrics_frequency` (Number) +- `metrics_prefix` (String) +- `monitoring_instance_id` (String) +- `plugins` (List of String) +- `sgw_acl` (String) diff --git a/docs/resources/rabbitmq_credentials.md b/docs/resources/rabbitmq_credentials.md new file mode 100644 index 00000000..04cc9288 --- /dev/null +++ b/docs/resources/rabbitmq_credentials.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_rabbitmq_credentials Resource - stackit" +subcategory: "" +description: |- + RabbitMQ credentials resource schema. +--- + +# stackit_rabbitmq_credentials (Resource) + +RabbitMQ credentials resource schema. + +## Example Usage + +```terraform +resource "stackit_rabbitmq_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the RabbitMQ instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `credentials_id` (String) The credentials ID. +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/resources/rabbitmq_instance.md b/docs/resources/rabbitmq_instance.md new file mode 100644 index 00000000..db3dd4bd --- /dev/null +++ b/docs/resources/rabbitmq_instance.md @@ -0,0 +1,57 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_rabbitmq_instance Resource - stackit" +subcategory: "" +description: |- + RabbitMQ instance resource schema. +--- + +# stackit_rabbitmq_instance (Resource) + +RabbitMQ instance resource schema. + +## Example Usage + +```terraform +resource "stackit_rabbitmq_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} +``` + + +## Schema + +### Required + +- `name` (String) Instance name. +- `plan_name` (String) The selected plan name. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `version` (String) The service version. + +### Optional + +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource ID. +- `image_url` (String) +- `instance_id` (String) ID of the RabbitMQ instance. +- `organization_guid` (String) +- `plan_id` (String) The selected plan ID. + + +### Nested Schema for `parameters` + +Optional: + +- `sgw_acl` (String) diff --git a/docs/resources/redis_credentials.md b/docs/resources/redis_credentials.md new file mode 100644 index 00000000..bd773245 --- /dev/null +++ b/docs/resources/redis_credentials.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_redis_credentials Resource - stackit" +subcategory: "" +description: |- + Redis credentials resource schema. +--- + +# stackit_redis_credentials (Resource) + +Redis credentials resource schema. + +## Example Usage + +```terraform +resource "stackit_redis_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `instance_id` (String) ID of the Redis instance. +- `project_id` (String) STACKIT Project ID to which the instance is associated. + +### Read-Only + +- `credentials_id` (String) The credentials ID. +- `host` (String) +- `hosts` (List of String) +- `http_api_uri` (String) +- `id` (String) Terraform's internal resource identifier. +- `name` (String) +- `password` (String, Sensitive) +- `port` (Number) +- `uri` (String) +- `username` (String) diff --git a/docs/resources/redis_instance.md b/docs/resources/redis_instance.md new file mode 100644 index 00000000..5b323d71 --- /dev/null +++ b/docs/resources/redis_instance.md @@ -0,0 +1,57 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_redis_instance Resource - stackit" +subcategory: "" +description: |- + Redis instance resource schema. +--- + +# stackit_redis_instance (Resource) + +Redis instance resource schema. + +## Example Usage + +```terraform +resource "stackit_redis_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} +``` + + +## Schema + +### Required + +- `name` (String) Instance name. +- `plan_name` (String) The selected plan name. +- `project_id` (String) STACKIT project ID to which the instance is associated. +- `version` (String) The service version. + +### Optional + +- `parameters` (Attributes) (see [below for nested schema](#nestedatt--parameters)) + +### Read-Only + +- `cf_guid` (String) +- `cf_space_guid` (String) +- `dashboard_url` (String) +- `id` (String) Terraform's internal resource ID. +- `image_url` (String) +- `instance_id` (String) ID of the Redis instance. +- `organization_guid` (String) +- `plan_id` (String) The selected plan ID. + + +### Nested Schema for `parameters` + +Optional: + +- `sgw_acl` (String) diff --git a/docs/resources/resourcemanager_project.md b/docs/resources/resourcemanager_project.md new file mode 100644 index 00000000..27bcffc2 --- /dev/null +++ b/docs/resources/resourcemanager_project.md @@ -0,0 +1,43 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_resourcemanager_project Resource - stackit" +subcategory: "" +description: |- + Resource Manager project resource schema. +--- + +# stackit_resourcemanager_project (Resource) + +Resource Manager project resource schema. + +## Example Usage + +```terraform +resource "stackit_resourcemanager_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + parent_container_id = "example-parent-container-abc123" + name = "example-container" + labels = { + "Label 1" = "foo" + } + owner_email = "aa@bb.ccc" +} +``` + + +## Schema + +### Required + +- `name` (String) Project name. +- `owner_email` (String) Email address of the owner of the project. This value is only considered during creation. Changing it afterwards will have no effect. +- `parent_container_id` (String) Parent container ID + +### Optional + +- `labels` (Map of String) Labels are key-value string pairs which can be attached to a resource container. A label key must match the regex [A-ZÄÜÖa-zäüöß0-9_-]{1,64}. A label value must match the regex ^$|[A-ZÄÜÖa-zäüöß0-9_-]{1,64} + +### Read-Only + +- `container_id` (String) Project container ID. Globally unique, user-friendly identifier. +- `id` (String) Terraform's internal unique identifier of the project, equivalent to the container ID diff --git a/docs/resources/ske_cluster.md b/docs/resources/ske_cluster.md new file mode 100644 index 00000000..9aeb0ef2 --- /dev/null +++ b/docs/resources/ske_cluster.md @@ -0,0 +1,152 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_ske_cluster Resource - stackit" +subcategory: "" +description: |- + SKE Cluster Resource schema. +--- + +# stackit_ske_cluster (Resource) + +SKE Cluster Resource schema. + +## Example Usage + +```terraform +resource "stackit_ske_cluster" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-name" + kubernetes_version = "1.25" + node_pools = [ + { + name = "np-example" + machine_type = "b1.2" + os_version = "3510.2.5" + minimum = "2" + maximum = "3" + availability_zones = ["eu01-3"] + } + ] + maintenance = { + enable_kubernetes_version_updates = true + enable_machine_image_version_updates = true + start = "01:00:00Z" + end = "02:00:00Z" + } +} +``` + + +## Schema + +### Required + +- `kubernetes_version` (String) Kubernetes version. Must only contain major and minor version (e.g. 1.22) +- `name` (String) The cluster name. +- `node_pools` (Attributes List) One or more `node_pool` block as defined below. (see [below for nested schema](#nestedatt--node_pools)) +- `project_id` (String) STACKIT project ID to which the cluster is associated. + +### Optional + +- `allow_privileged_containers` (Boolean) Flag to specify if privileged mode for containers is enabled or not. +This should be used with care since it also disables a couple of other features like the use of some volume type (e.g. PVCs). +Deprecated as of Kubernetes 1.25 and later +- `extensions` (Attributes) A single extensions block as defined below. (see [below for nested schema](#nestedatt--extensions)) +- `hibernations` (Attributes List) One or more hibernation block as defined below. (see [below for nested schema](#nestedatt--hibernations)) +- `maintenance` (Attributes) A single maintenance block as defined below. (see [below for nested schema](#nestedatt--maintenance)) + +### Read-Only + +- `id` (String) Terraform's internal resource ID. +- `kube_config` (String, Sensitive) Kube config file used for connecting to the cluster +- `kubernetes_version_used` (String) Full Kubernetes version used. For example, if 1.22 was selected, this value may result to 1.22.15 + + +### Nested Schema for `node_pools` + +Required: + +- `availability_zones` (List of String) Specify a list of availability zones. E.g. `eu01-m` +- `machine_type` (String) The machine type. +- `maximum` (Number) Maximum number of nodes in the pool. +- `minimum` (Number) Minimum number of nodes in the pool. +- `name` (String) Specifies the name of the node pool. +- `os_version` (String) The OS image version. + +Optional: + +- `cri` (String) Specifies the container runtime. E.g. `containerd` +- `labels` (Map of String) Labels to add to each node. +- `max_surge` (Number) Maximum number of additional VMs that are created during an update. +- `max_unavailable` (Number) Maximum number of VMs that that can be unavailable during an update. +- `os_name` (String) The name of the OS image. E.g. `flatcar`. +- `taints` (Attributes List) Specifies a taint list as defined below. (see [below for nested schema](#nestedatt--node_pools--taints)) +- `volume_size` (Number) The volume size in GB. E.g. `20` +- `volume_type` (String) Specifies the volume type. E.g. `storage_premium_perf1`. + + +### Nested Schema for `node_pools.taints` + +Required: + +- `effect` (String) The taint effect. E.g `PreferNoSchedule`. +- `key` (String) Taint key to be applied to a node. + +Optional: + +- `value` (String) Taint value corresponding to the taint key. + + + + +### Nested Schema for `extensions` + +Optional: + +- `acl` (Attributes) Cluster access control configuration. (see [below for nested schema](#nestedatt--extensions--acl)) +- `argus` (Attributes) A single argus block as defined below. (see [below for nested schema](#nestedatt--extensions--argus)) + + +### Nested Schema for `extensions.acl` + +Required: + +- `allowed_cidrs` (List of String) Specify a list of CIDRs to whitelist. +- `enabled` (Boolean) Is ACL enabled? + + + +### Nested Schema for `extensions.argus` + +Required: + +- `enabled` (Boolean) Flag to enable/disable Argus extensions. + +Optional: + +- `argus_instance_id` (String) Argus instance ID to choose which Argus instance is used. Required when enabled is set to `true`. + + + + +### Nested Schema for `hibernations` + +Required: + +- `end` (String) End time of hibernation in crontab syntax. E.g. `0 8 * * *` for waking up the cluster at 8am. +- `start` (String) Start time of cluster hibernation in crontab syntax. E.g. `0 18 * * *` for starting everyday at 6pm. + +Optional: + +- `timezone` (String) Timezone name corresponding to a file in the IANA Time Zone database. i.e. `Europe/Berlin`. + + + +### Nested Schema for `maintenance` + +Required: + +- `enable_kubernetes_version_updates` (Boolean) Flag to enable/disable auto-updates of the Kubernetes version. +- `enable_machine_image_version_updates` (Boolean) Flag to enable/disable auto-updates of the OS image version. +- `end` (String) Time for maintenance window end. E.g. `01:23:45Z`, `05:00:00+02:00`. +- `start` (String) Time for maintenance window start. E.g. `01:23:45Z`, `05:00:00+02:00`. diff --git a/docs/resources/ske_project.md b/docs/resources/ske_project.md new file mode 100644 index 00000000..438ea4d6 --- /dev/null +++ b/docs/resources/ske_project.md @@ -0,0 +1,30 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "stackit_ske_project Resource - stackit" +subcategory: "" +description: |- + +--- + +# stackit_ske_project (Resource) + + + +## Example Usage + +```terraform +resource "stackit_ske_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} +``` + + +## Schema + +### Required + +- `project_id` (String) STACKIT Project ID in which the kubernetes project is enabled. + +### Read-Only + +- `id` (String) Terraform's internal resource ID. diff --git a/examples/data-sources/stackit_argus_instance/data-source.tf b/examples/data-sources/stackit_argus_instance/data-source.tf new file mode 100644 index 00000000..462e0736 --- /dev/null +++ b/examples/data-sources/stackit_argus_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_argus_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_argus_scrapeconfig/data-source.tf b/examples/data-sources/stackit_argus_scrapeconfig/data-source.tf new file mode 100644 index 00000000..e78d7ae5 --- /dev/null +++ b/examples/data-sources/stackit_argus_scrapeconfig/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_argus_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + job_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_dns_record_set/data-source.tf b/examples/data-sources/stackit_dns_record_set/data-source.tf new file mode 100644 index 00000000..ad81e4d9 --- /dev/null +++ b/examples/data-sources/stackit_dns_record_set/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_dns_record_set" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + zone_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + record_set_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_dns_zone/data-source.tf b/examples/data-sources/stackit_dns_zone/data-source.tf new file mode 100644 index 00000000..227e1268 --- /dev/null +++ b/examples/data-sources/stackit_dns_zone/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_dns_zone" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + zone_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_logme_credentials/data-source.tf b/examples/data-sources/stackit_logme_credentials/data-source.tf new file mode 100644 index 00000000..4522cebf --- /dev/null +++ b/examples/data-sources/stackit_logme_credentials/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_logme_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_logme_instance/data-source.tf b/examples/data-sources/stackit_logme_instance/data-source.tf new file mode 100644 index 00000000..5fb2e57f --- /dev/null +++ b/examples/data-sources/stackit_logme_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_logme_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_mariadb_credentials/data-source.tf b/examples/data-sources/stackit_mariadb_credentials/data-source.tf new file mode 100644 index 00000000..8a6c8734 --- /dev/null +++ b/examples/data-sources/stackit_mariadb_credentials/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_mariadb_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_mariadb_instance/data-source.tf b/examples/data-sources/stackit_mariadb_instance/data-source.tf new file mode 100644 index 00000000..940c42db --- /dev/null +++ b/examples/data-sources/stackit_mariadb_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_mariadb_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_opensearch_credentials/data-source.tf b/examples/data-sources/stackit_opensearch_credentials/data-source.tf new file mode 100644 index 00000000..5d27b545 --- /dev/null +++ b/examples/data-sources/stackit_opensearch_credentials/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_opensearch_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_opensearch_instance/data-source.tf b/examples/data-sources/stackit_opensearch_instance/data-source.tf new file mode 100644 index 00000000..980e3e49 --- /dev/null +++ b/examples/data-sources/stackit_opensearch_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_opensearch_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_postgresflex_instance/data-source.tf b/examples/data-sources/stackit_postgresflex_instance/data-source.tf new file mode 100644 index 00000000..c5e07e13 --- /dev/null +++ b/examples/data-sources/stackit_postgresflex_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_postgresflex_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_postgresflex_user/data-source.tf b/examples/data-sources/stackit_postgresflex_user/data-source.tf new file mode 100644 index 00000000..4bd9a45f --- /dev/null +++ b/examples/data-sources/stackit_postgresflex_user/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_postgresflex_user" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + user_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_postgresql_credentials/data-source.tf b/examples/data-sources/stackit_postgresql_credentials/data-source.tf new file mode 100644 index 00000000..cf918f17 --- /dev/null +++ b/examples/data-sources/stackit_postgresql_credentials/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_postgresql_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_postgresql_instance/data-source.tf b/examples/data-sources/stackit_postgresql_instance/data-source.tf new file mode 100644 index 00000000..caa5c1a7 --- /dev/null +++ b/examples/data-sources/stackit_postgresql_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_postgresql_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_rabbitmq_credentials/data-source.tf b/examples/data-sources/stackit_rabbitmq_credentials/data-source.tf new file mode 100644 index 00000000..26ccb0db --- /dev/null +++ b/examples/data-sources/stackit_rabbitmq_credentials/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_rabbitmq_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_rabbitmq_instance/data-source.tf b/examples/data-sources/stackit_rabbitmq_instance/data-source.tf new file mode 100644 index 00000000..13ee22a1 --- /dev/null +++ b/examples/data-sources/stackit_rabbitmq_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_rabbitmq_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_redis_credentials/data-source.tf b/examples/data-sources/stackit_redis_credentials/data-source.tf new file mode 100644 index 00000000..46289780 --- /dev/null +++ b/examples/data-sources/stackit_redis_credentials/data-source.tf @@ -0,0 +1,5 @@ +data "stackit_redis_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + credentials_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_redis_instance/data-source.tf b/examples/data-sources/stackit_redis_instance/data-source.tf new file mode 100644 index 00000000..d0de5480 --- /dev/null +++ b/examples/data-sources/stackit_redis_instance/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_redis_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/data-sources/stackit_resourcemanager_project/data-source.tf b/examples/data-sources/stackit_resourcemanager_project/data-source.tf new file mode 100644 index 00000000..2aa4872d --- /dev/null +++ b/examples/data-sources/stackit_resourcemanager_project/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_resourcemanager_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + container_id = "example-container-abc123" +} diff --git a/examples/data-sources/stackit_ske_cluster/data-source.tf b/examples/data-sources/stackit_ske_cluster/data-source.tf new file mode 100644 index 00000000..6da899b2 --- /dev/null +++ b/examples/data-sources/stackit_ske_cluster/data-source.tf @@ -0,0 +1,4 @@ +data "stackit_ske_cluster" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-name" +} diff --git a/examples/data-sources/stackit_ske_project/data-source.tf b/examples/data-sources/stackit_ske_project/data-source.tf new file mode 100644 index 00000000..5762ff58 --- /dev/null +++ b/examples/data-sources/stackit_ske_project/data-source.tf @@ -0,0 +1,3 @@ +data "stackit_ske_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/provider/provider.tf b/examples/provider/provider.tf new file mode 100644 index 00000000..61cf15e1 --- /dev/null +++ b/examples/provider/provider.tf @@ -0,0 +1,3 @@ +provider "stackit" { + region = "eu01" +} diff --git a/examples/resources/stackit_argus_instance/resource.tf b/examples/resources/stackit_argus_instance/resource.tf new file mode 100644 index 00000000..6ebd0449 --- /dev/null +++ b/examples/resources/stackit_argus_instance/resource.tf @@ -0,0 +1,5 @@ +resource "stackit_argus_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + plan_name = "Monitoring-Medium-EU01" +} diff --git a/examples/resources/stackit_argus_scrapeconfig/resource.tf b/examples/resources/stackit_argus_scrapeconfig/resource.tf new file mode 100644 index 00000000..4be234ee --- /dev/null +++ b/examples/resources/stackit_argus_scrapeconfig/resource.tf @@ -0,0 +1,17 @@ +resource "stackit_argus_scrapeconfig" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-job" + metrics_path = "/my-metrics" + saml2 = { + enable_url_parameters = true + } + targets = [ + { + urls = ["url1", "urls2"] + labels = { + "url1" = "dev" + } + } + ] +} diff --git a/examples/resources/stackit_dns_record_set/resource.tf b/examples/resources/stackit_dns_record_set/resource.tf new file mode 100644 index 00000000..b9581a22 --- /dev/null +++ b/examples/resources/stackit_dns_record_set/resource.tf @@ -0,0 +1,8 @@ +resource "stackit_dns_record_set" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + zone_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-record-set.www.example-zone.com" + type = "A" + comment = "Example comment" + records = ["1.2.3.4"] +} diff --git a/examples/resources/stackit_dns_zone/resource.tf b/examples/resources/stackit_dns_zone/resource.tf new file mode 100644 index 00000000..cfdd4db4 --- /dev/null +++ b/examples/resources/stackit_dns_zone/resource.tf @@ -0,0 +1,10 @@ +resource "stackit_dns_zone" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "Example zone" + dns_name = "www.example-zone.com" + contact_email = "aa@bb.ccc" + type = "primary" + acl = "192.168.0.0/24" + description = "Example description" + default_ttl = 1230 +} diff --git a/examples/resources/stackit_logme_credentials /resource.tf b/examples/resources/stackit_logme_credentials /resource.tf new file mode 100644 index 00000000..de522cfc --- /dev/null +++ b/examples/resources/stackit_logme_credentials /resource.tf @@ -0,0 +1,4 @@ +resource "stackit_logme_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_logme_instance/resource.tf b/examples/resources/stackit_logme_instance/resource.tf new file mode 100644 index 00000000..cca2e065 --- /dev/null +++ b/examples/resources/stackit_logme_instance/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_logme_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} diff --git a/examples/resources/stackit_mariadb_credentials/resource.tf b/examples/resources/stackit_mariadb_credentials/resource.tf new file mode 100644 index 00000000..11d72f0f --- /dev/null +++ b/examples/resources/stackit_mariadb_credentials/resource.tf @@ -0,0 +1,4 @@ +resource "stackit_mariadb_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_mariadb_instance/resource.tf b/examples/resources/stackit_mariadb_instance/resource.tf new file mode 100644 index 00000000..b0b363f1 --- /dev/null +++ b/examples/resources/stackit_mariadb_instance/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_mariadb_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} diff --git a/examples/resources/stackit_opensearch_credentials/resource.tf b/examples/resources/stackit_opensearch_credentials/resource.tf new file mode 100644 index 00000000..f1d1325a --- /dev/null +++ b/examples/resources/stackit_opensearch_credentials/resource.tf @@ -0,0 +1,4 @@ +resource "stackit_opensearch_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_opensearch_instance/resource.tf b/examples/resources/stackit_opensearch_instance/resource.tf new file mode 100644 index 00000000..83217d73 --- /dev/null +++ b/examples/resources/stackit_opensearch_instance/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_opensearch_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} diff --git a/examples/resources/stackit_postgresflex_instance/resource.tf b/examples/resources/stackit_postgresflex_instance/resource.tf new file mode 100644 index 00000000..61a1540d --- /dev/null +++ b/examples/resources/stackit_postgresflex_instance/resource.tf @@ -0,0 +1,16 @@ +resource "stackit_postgresflex_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + acl = ["XXX.XXX.XXX.X/XX", "XX.XXX.XX.X/XX"] + backup_schedule = "00 00 * * *" + flavor = { + cpu = 2 + ram = 4 + } + replicas = 3 + storage = { + class = "class" + size = 5 + } + version = 14 +} diff --git a/examples/resources/stackit_postgresflex_user/resource.tf b/examples/resources/stackit_postgresflex_user/resource.tf new file mode 100644 index 00000000..5592411b --- /dev/null +++ b/examples/resources/stackit_postgresflex_user/resource.tf @@ -0,0 +1,6 @@ +resource "stackit_postgresflex_user" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + username = "username" + roles = ["role"] +} diff --git a/examples/resources/stackit_postgresql_credentials/resource.tf b/examples/resources/stackit_postgresql_credentials/resource.tf new file mode 100644 index 00000000..0775c2cf --- /dev/null +++ b/examples/resources/stackit_postgresql_credentials/resource.tf @@ -0,0 +1,4 @@ +resource "stackit_postgresql_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_postgresql_instance/resource.tf b/examples/resources/stackit_postgresql_instance/resource.tf new file mode 100644 index 00000000..47b887bb --- /dev/null +++ b/examples/resources/stackit_postgresql_instance/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_postgresql_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} diff --git a/examples/resources/stackit_rabbitmq_credentials/resource.tf b/examples/resources/stackit_rabbitmq_credentials/resource.tf new file mode 100644 index 00000000..922d0d4f --- /dev/null +++ b/examples/resources/stackit_rabbitmq_credentials/resource.tf @@ -0,0 +1,4 @@ +resource "stackit_rabbitmq_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_rabbitmq_instance/resource.tf b/examples/resources/stackit_rabbitmq_instance/resource.tf new file mode 100644 index 00000000..87b2a1b5 --- /dev/null +++ b/examples/resources/stackit_rabbitmq_instance/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_rabbitmq_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} diff --git a/examples/resources/stackit_redis_credentials/resource.tf b/examples/resources/stackit_redis_credentials/resource.tf new file mode 100644 index 00000000..0e89eb2e --- /dev/null +++ b/examples/resources/stackit_redis_credentials/resource.tf @@ -0,0 +1,4 @@ +resource "stackit_redis_credentials" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/examples/resources/stackit_redis_instance/resource.tf b/examples/resources/stackit_redis_instance/resource.tf new file mode 100644 index 00000000..4c4d30ae --- /dev/null +++ b/examples/resources/stackit_redis_instance/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_redis_instance" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-instance" + version = "10" + plan_name = "example-plan-name" + parameters = { + sgw_acl = "x.x.x.x/x,y.y.y.y/y" + } +} diff --git a/examples/resources/stackit_resourcemanager_project/resource.tf b/examples/resources/stackit_resourcemanager_project/resource.tf new file mode 100644 index 00000000..45649f75 --- /dev/null +++ b/examples/resources/stackit_resourcemanager_project/resource.tf @@ -0,0 +1,9 @@ +resource "stackit_resourcemanager_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + parent_container_id = "example-parent-container-abc123" + name = "example-container" + labels = { + "Label 1" = "foo" + } + owner_email = "aa@bb.ccc" +} diff --git a/examples/resources/stackit_ske_cluster/resource.tf b/examples/resources/stackit_ske_cluster/resource.tf new file mode 100644 index 00000000..0149e84a --- /dev/null +++ b/examples/resources/stackit_ske_cluster/resource.tf @@ -0,0 +1,21 @@ +resource "stackit_ske_cluster" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name = "example-name" + kubernetes_version = "1.25" + node_pools = [ + { + name = "np-example" + machine_type = "b1.2" + os_version = "3510.2.5" + minimum = "2" + maximum = "3" + availability_zones = ["eu01-3"] + } + ] + maintenance = { + enable_kubernetes_version_updates = true + enable_machine_image_version_updates = true + start = "01:00:00Z" + end = "02:00:00Z" + } +} diff --git a/examples/resources/stackit_ske_project/resource.tf b/examples/resources/stackit_ske_project/resource.tf new file mode 100644 index 00000000..eadcca65 --- /dev/null +++ b/examples/resources/stackit_ske_project/resource.tf @@ -0,0 +1,3 @@ +resource "stackit_ske_project" "example" { + project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +} diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..2f4e20db --- /dev/null +++ b/go.mod @@ -0,0 +1,82 @@ +module github.com/stackitcloud/terraform-provider-stackit + +go 1.20 + +require ( + github.com/google/go-cmp v0.5.9 + github.com/google/uuid v1.3.0 + github.com/hashicorp/terraform-plugin-framework v1.3.5 + github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 + github.com/hashicorp/terraform-plugin-go v0.18.0 + github.com/hashicorp/terraform-plugin-log v0.9.0 + github.com/hashicorp/terraform-plugin-testing v1.5.1 + github.com/stackitcloud/stackit-sdk-go/core v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/argus v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/dns v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/logme v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/postgresql v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/rabbitmq v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/redis v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.1.0 + github.com/stackitcloud/stackit-sdk-go/services/ske v0.1.0 + golang.org/x/mod v0.12.0 +) + +require ( + github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect + github.com/agext/levenshtein v1.2.2 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-checkpoint v0.5.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.4.10 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hc-install v0.5.2 // indirect + github.com/hashicorp/hcl/v2 v2.17.0 // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/terraform-exec v0.18.1 // indirect + github.com/hashicorp/terraform-json v0.17.1 // indirect + github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.1 // indirect + github.com/hashicorp/terraform-svchost v0.1.1 // indirect + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/stretchr/testify v1.8.3 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/zclconf/go-cty v1.13.3 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + k8s.io/apimachinery v0.28.1 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..88e04054 --- /dev/null +++ b/go.sum @@ -0,0 +1,222 @@ +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= +github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= +github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.10 h1:xUbmA4jC6Dq163/fWcp8P3JuHilrHHMLNRxzGQJ9hNk= +github.com/hashicorp/go-plugin v1.4.10/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.5.2 h1:SfwMFnEXVVirpwkDuSF5kymUOhrUxrTq3udEseZdOD0= +github.com/hashicorp/hc-install v0.5.2/go.mod h1:9QISwe6newMWIfEiXpzuu1k9HAGtQYgnSH8H9T8wmoI= +github.com/hashicorp/hcl/v2 v2.17.0 h1:z1XvSUyXd1HP10U4lrLg5e0JMVz6CPaJvAgxM0KNZVY= +github.com/hashicorp/hcl/v2 v2.17.0/go.mod h1:gJyW2PTShkJqQBKpAmPO3yxMxIuoXkOF2TpqXzrQyx4= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-exec v0.18.1 h1:LAbfDvNQU1l0NOQlTuudjczVhHj061fNX5H8XZxHlH4= +github.com/hashicorp/terraform-exec v0.18.1/go.mod h1:58wg4IeuAJ6LVsLUeD2DWZZoc/bYi6dzhLHzxM41980= +github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= +github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= +github.com/hashicorp/terraform-plugin-framework v1.3.5 h1:FJ6s3CVWVAxlhiF/jhy6hzs4AnPHiflsp9KgzTGl1wo= +github.com/hashicorp/terraform-plugin-framework v1.3.5/go.mod h1:2gGDpWiTI0irr9NSTLFAKlTi6KwGti3AoU19rFqU30o= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-go v0.18.0 h1:IwTkOS9cOW1ehLd/rG0y+u/TGLK9y6fGoBjXVUquzpE= +github.com/hashicorp/terraform-plugin-go v0.18.0/go.mod h1:l7VK+2u5Kf2y+A+742GX0ouLut3gttudmvMgN0PA74Y= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0 h1:gY4SG34ANc6ZSeWEKC9hDTChY0ZiN+Myon17fSA0Xgc= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0/go.mod h1:deXEw/iJXtJxNV9d1c/OVJrvL7Zh0a++v7rzokW6wVY= +github.com/hashicorp/terraform-plugin-testing v1.5.1 h1:T4aQh9JAhmWo4+t1A7x+rnxAJHCDIYW9kXyo4sVO92c= +github.com/hashicorp/terraform-plugin-testing v1.5.1/go.mod h1:dg8clO6K59rZ8w9EshBmDp1CxTIPu3yA4iaDpX1h5u0= +github.com/hashicorp/terraform-registry-address v0.2.1 h1:QuTf6oJ1+WSflJw6WYOHhLgwUiQ0FrROpHPYFtwTYWM= +github.com/hashicorp/terraform-registry-address v0.2.1/go.mod h1:BSE9fIFzp0qWsJUUyGquo4ldV9k2n+psif6NYkBRS3Y= +github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= +github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= +github.com/stackitcloud/stackit-sdk-go/core v0.1.0 h1:rvYOBuUUcje+jbfkCVOb417T3E5d5HMnJ5lKA4Y3fP0= +github.com/stackitcloud/stackit-sdk-go/core v0.1.0/go.mod h1:oajmlnz8XdRji0VnQhqoa3tcta8HIBuYB0uDzHDvMes= +github.com/stackitcloud/stackit-sdk-go/services/argus v0.1.0 h1:kjAYf7WnjhQ2JlYiUBH34n51KBej8a7rVbKF7+4UpNQ= +github.com/stackitcloud/stackit-sdk-go/services/argus v0.1.0/go.mod h1:9Y9MzEOos9xFFCs6YdYWb0k8Ij4JrJYfYGro9b1pVUY= +github.com/stackitcloud/stackit-sdk-go/services/dns v0.1.0 h1:PEfzbpktaK7Fj8O8zxLM3bpXwazmGhG59D15Vi8z/sE= +github.com/stackitcloud/stackit-sdk-go/services/dns v0.1.0/go.mod h1:eD6fRbn/pS9sdRYeq3EBKvFj2qlDu25Ir/doWfPaBcI= +github.com/stackitcloud/stackit-sdk-go/services/logme v0.1.0 h1:3PQQniOQIxGQjYNpkukjivvcvO0PjIaPGh7JHwC6888= +github.com/stackitcloud/stackit-sdk-go/services/logme v0.1.0/go.mod h1:KkWI0vWfBHRepIrH0VT2e1xwf1WaaNb/zPfSTRXAXjs= +github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.1.0 h1:puJISDlTEKO9ROZLzzjfijUEe0sODK2WK4TUiHUmbJI= +github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.1.0/go.mod h1:uTotoums0lfsmlTQVP5bYz4wWmfWlLuFx9i1ZzAyAkA= +github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.1.0 h1:t/uLNsqkpzARY9kfGR8pahfwScDdJZ+bRbZCGZ9o1lk= +github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.1.0/go.mod h1:pTTPSxx/BpcAm0ttcH+g4AiovC+oT7lXqca2C1XOxzY= +github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.1.0 h1:vm3AgvTA6TaHd0WpmzKT6HY6fSLOJUNPslqeLByO5P4= +github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.1.0/go.mod h1:sPEBUNRxaEsyhVOQKZrUdebexVX/z1RbvUFXxXOrICo= +github.com/stackitcloud/stackit-sdk-go/services/postgresql v0.1.0 h1:/P9LOqYbbMso7FP2wfqqsUKbBrWVlb73vflgCA8N/rI= +github.com/stackitcloud/stackit-sdk-go/services/postgresql v0.1.0/go.mod h1:9tfn47ltmTxuuchpn0Ui2n1KHMk4L/6T3K04NysSkTs= +github.com/stackitcloud/stackit-sdk-go/services/rabbitmq v0.1.0 h1:PSNXnc5QUOvYjIL4yYk3vLd0Ri9Lzu06MTfYEKOx1ys= +github.com/stackitcloud/stackit-sdk-go/services/rabbitmq v0.1.0/go.mod h1:TKYrlRjk4WJROlC23Wo6TuXntgYu8afkJfYtDd2JdiQ= +github.com/stackitcloud/stackit-sdk-go/services/redis v0.1.0 h1:sXbTo85QAcZ1hdZWKf3ZM+sdgBca1yYrrTiwS0ny4tY= +github.com/stackitcloud/stackit-sdk-go/services/redis v0.1.0/go.mod h1:7n6KxrNErscuXKGvXK93qEqd9jg9X9IT4tSGR2TsSg8= +github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.1.0 h1:3kvXPm9EeJQA3xe3nV4GQu+dPLoAeErza2g0xjWPMJo= +github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.1.0/go.mod h1:VhGU0mhxY7FQj02evme9Xxtyo/H5iOlrU38i3wY7krw= +github.com/stackitcloud/stackit-sdk-go/services/ske v0.1.0 h1:nifb5LIei6d/8B7wKzqyxZ/u8lT6Dd9bFSpyOxX0kU4= +github.com/stackitcloud/stackit-sdk-go/services/ske v0.1.0/go.mod h1:Q+jqrxRJvoLNihVP5Bc7nqJ/l3TFyIFFavEiwRZuuMk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/zclconf/go-cty v1.13.3 h1:m+b9q3YDbg6Bec5rr+KGy1MzEVzY/jC2X+YX4yqKtHI= +github.com/zclconf/go-cty v1.13.3/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 h1:EDuYyU/MkFXllv9QF9819VlI9a4tzGuCbhG0ExK9o1U= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY= +k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/golang-ci.yaml b/golang-ci.yaml new file mode 100644 index 00000000..7eceed18 --- /dev/null +++ b/golang-ci.yaml @@ -0,0 +1,100 @@ +# This file contains all available configuration options +# with their default values. + +# options for analysis running +run: + # default concurrency is a available CPU number + concurrency: 4 + + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 5m +linters-settings: + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: github.com/freiheit-com/nmww + depguard: + list-type: blacklist + include-go-root: false + packages: + - github.com/stretchr/testify + packages-with-error-message: + # specify an error message to output when a blacklisted package is used + - github.com/stretchr/testify: "do not use a testing framework" + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US + golint: + min-confidence: 0.8 + gosec: + excludes: + # Suppressions: (see https://github.com/securego/gosec#available-rules for details) + - G104 # "Audit errors not checked" -> which we don't need and is a badly implemented version of errcheck + - G102 # "Bind to all interfaces" -> since this is normal in k8s + - G304 # "File path provided as taint input" -> too many false positives + - G307 # "Deferring unsafe method "Close" on type "io.ReadCloser" -> false positive when calling defer resp.Body.Close() + nakedret: + max-func-lines: 0 + revive: + ignore-generated-header: true + severity: error + # https://github.com/mgechev/revive + rules: + - name: errorf + - name: context-as-argument + - name: error-return + - name: increment-decrement + - name: indent-error-flow + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: atomic + - name: empty-lines + - name: early-return + gocritic: + enabled-tags: + - performance + - style + - experimental + disabled-checks: + - wrapperFunc + - typeDefFirst + - ifElseChain + - dupImport # https://github.com/go-critic/go-critic/issues/845 +linters: + enable: + # https://golangci-lint.run/usage/linters/ + # default linters + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused + # additional linters + - errorlint + - exportloopref + - gochecknoinits + - gocritic + - gofmt + - goimports + - gosec + - misspell + - nakedret + - revive + - depguard + - bodyclose + - sqlclosecheck + - wastedassign + - forcetypeassert + - errcheck + disable: + - structcheck # deprecated + - deadcode # deprecated + - varcheck # deprecated + - noctx # false positive: finds errors with http.NewRequest that dont make sense + - unparam # false positives +issues: + exclude-use-default: false diff --git a/main.go b/main.go new file mode 100644 index 00000000..4d38f3ad --- /dev/null +++ b/main.go @@ -0,0 +1,23 @@ +package main + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/stackitcloud/terraform-provider-stackit/stackit" +) + +var ( + // goreleaser configuration will override this value + version string = "dev" +) + +func main() { + err := providerserver.Serve(context.Background(), stackit.New(version), providerserver.ServeOpts{ + Address: "registry.terraform.io/providers/stackitcloud/stackit", + }) + if err != nil { + log.Fatal(err.Error()) + } +} diff --git a/scripts/lint-golangci-lint.sh b/scripts/lint-golangci-lint.sh new file mode 100755 index 00000000..614748fd --- /dev/null +++ b/scripts/lint-golangci-lint.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script lints the SDK modules and the internal examples +# Pre-requisites: golangci-lint +set -eo pipefail + +ROOT_DIR=$(git rev-parse --show-toplevel) +GOLANG_CI_YAML_PATH="${ROOT_DIR}/golang-ci.yaml" +GOLANG_CI_ARGS="--allow-parallel-runners --timeout=5m --config=${GOLANG_CI_YAML_PATH}" + +if type -p golangci-lint >/dev/null; then + : +else + echo "golangci-lint not installed, unable to proceed." + exit 1 +fi + +cd ${ROOT_DIR} +golangci-lint run ${GOLANG_CI_ARGS} diff --git a/scripts/project.sh b/scripts/project.sh new file mode 100755 index 00000000..c5da2929 --- /dev/null +++ b/scripts/project.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# This script is used to manage the project, only used for installing the required tools for now +# Usage: ./project.sh [action] +# * tools: Install required tools to run the project +set -eo pipefail + +ROOT_DIR=$(git rev-parse --show-toplevel) + +action=$1 + +if [ "$action" = "help" ]; then + [ -f "$0".man ] && man "$0".man || echo "No help, please read the script in ${script}, we will add help later" +elif [ "$action" = "tools" ]; then + cd ${ROOT_DIR} + + go mod download + + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2 + go install github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs@v0.16.0 +else + echo "Invalid action: '$action', please use $0 help for help" +fi diff --git a/scripts/tfplugindocs.sh b/scripts/tfplugindocs.sh new file mode 100755 index 00000000..6fd37694 --- /dev/null +++ b/scripts/tfplugindocs.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Pre-requisites: tfplugindocs +set -eo pipefail + +ROOT_DIR=$(git rev-parse --show-toplevel) +EXAMPLES_DIR="${ROOT_DIR}/examples" +PROVIDER_NAME="stackit" + +# Create a new empty directory for the docs +if [ -d ${ROOT_DIR}/docs ]; then + rm -rf ${ROOT_DIR}/docs +fi +mkdir -p ${ROOT_DIR}/docs + +echo ">> Generating documentation" +tfplugindocs generate \ + --provider-name "stackit" diff --git a/stackit/conversion/conversion.go b/stackit/conversion/conversion.go new file mode 100644 index 00000000..9bc03f58 --- /dev/null +++ b/stackit/conversion/conversion.go @@ -0,0 +1,75 @@ +package conversion + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +func ToPtrInt32(source types.Int64) *int32 { + if source.IsNull() || source.IsUnknown() { + return nil + } + ttlInt64 := source.ValueInt64() + ttlInt32 := int32(ttlInt64) + return &ttlInt32 +} + +func ToTypeInt64(i *int32) types.Int64 { + if i == nil { + return types.Int64PointerValue(nil) + } + return types.Int64Value(int64(*i)) +} + +func ToString(ctx context.Context, v attr.Value) (string, error) { + if t := v.Type(ctx); t != types.StringType { + return "", fmt.Errorf("type mismatch. expected 'types.StringType' but got '%s'", t.String()) + } + if v.IsNull() || v.IsUnknown() { + return "", fmt.Errorf("value is unknown or null") + } + tv, err := v.ToTerraformValue(ctx) + if err != nil { + return "", err + } + var s string + if err := tv.Copy().As(&s); err != nil { + return "", err + } + return s, nil +} + +func ToOptStringMap(tfMap map[string]attr.Value) (*map[string]string, error) { //nolint: gocritic //pointer needed to map optional fields + labels := make(map[string]string, len(tfMap)) + for l, v := range tfMap { + valueString, ok := v.(types.String) + if !ok { + return nil, fmt.Errorf("error converting map value: expected to string, got %v", v) + } + labels[l] = valueString.ValueString() + } + + labelsPointer := &labels + if len(labels) == 0 { + labelsPointer = nil + } + return labelsPointer, nil +} + +func ToTerraformStringMap(ctx context.Context, m map[string]string) (basetypes.MapValue, error) { + labels := make(map[string]attr.Value, len(m)) + for l, v := range m { + stringValue := types.StringValue(v) + labels[l] = stringValue + } + res, diags := types.MapValueFrom(ctx, types.StringType, m) + if diags.HasError() { + return types.MapNull(types.StringType), fmt.Errorf("converting to MapValue: %v", diags.Errors()) + } + + return res, nil +} diff --git a/stackit/core/core.go b/stackit/core/core.go new file mode 100644 index 00000000..1c1dfcb3 --- /dev/null +++ b/stackit/core/core.go @@ -0,0 +1,56 @@ +package core + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Separator used for concatenation of TF-internal resource ID +const Separator = "," + +type ProviderData struct { + RoundTripper http.RoundTripper + ServiceAccountEmail string + Region string + DnsCustomEndpoint string + PostgreSQLCustomEndpoint string + PostgresFlexCustomEndpoint string + LogMeCustomEndpoint string + RabbitMQCustomEndpoint string + MariaDBCustomEndpoint string + OpenSearchCustomEndpoint string + RedisCustomEndpoint string + ArgusCustomEndpoint string + SKECustomEndpoint string + ResourceManagerCustomEndpoint string +} + +// DiagsToError Converts TF diagnostics' errors into an error with a human-readable description. +// If there are no errors, the output is nil +func DiagsToError(diags diag.Diagnostics) error { + if !diags.HasError() { + return nil + } + + diagsError := diags.Errors() + diagsStrings := make([]string, 0) + for _, diagnostic := range diagsError { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "(%s) %s", + diagnostic.Summary(), + diagnostic.Detail(), + )) + } + return fmt.Errorf("%s", strings.Join(diagsStrings, ";")) +} + +// LogAndAddError Logs the error and adds it to the diags +func LogAndAddError(ctx context.Context, diags *diag.Diagnostics, summary, detail string) { + tflog.Error(ctx, summary) + (*diags).AddError(summary, detail) +} diff --git a/stackit/provider.go b/stackit/provider.go new file mode 100644 index 00000000..78c4fc8e --- /dev/null +++ b/stackit/provider.go @@ -0,0 +1,295 @@ +package stackit + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + argusCredential "github.com/stackitcloud/terraform-provider-stackit/stackit/services/argus/credential" + argusInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/argus/instance" + argusScrapeConfig "github.com/stackitcloud/terraform-provider-stackit/stackit/services/argus/scrapeconfig" + dnsRecordSet "github.com/stackitcloud/terraform-provider-stackit/stackit/services/dns/recordset" + dnsZone "github.com/stackitcloud/terraform-provider-stackit/stackit/services/dns/zone" + logMeCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/services/logme/credentials" + logMeInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/logme/instance" + mariaDBCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/services/mariadb/credentials" + mariaDBInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/mariadb/instance" + openSearchCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/services/opensearch/credentials" + openSearchInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/opensearch/instance" + postgresFlexInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/postgresflex/instance" + postgresFlexUser "github.com/stackitcloud/terraform-provider-stackit/stackit/services/postgresflex/user" + postgresCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/services/postgresql/credentials" + postgresInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/postgresql/instance" + rabbitMQCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/services/rabbitmq/credentials" + rabbitMQInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/rabbitmq/instance" + redisCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/services/redis/credentials" + redisInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/services/redis/instance" + resourceManagerProject "github.com/stackitcloud/terraform-provider-stackit/stackit/services/resourcemanager/project" + skeCluster "github.com/stackitcloud/terraform-provider-stackit/stackit/services/ske/cluster" + skeProject "github.com/stackitcloud/terraform-provider-stackit/stackit/services/ske/project" + + sdkauth "github.com/stackitcloud/stackit-sdk-go/core/auth" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ provider.Provider = &Provider{} +) + +// Provider is the provider implementation. +type Provider struct { + version string +} + +// New is a helper function to simplify provider server and testing implementation. +func New(version string) func() provider.Provider { + return func() provider.Provider { + return &Provider{ + version: version, + } + } +} + +func (p *Provider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "stackit" + resp.Version = p.version +} + +type providerModel struct { + CredentialsFilePath types.String `tfsdk:"credentials_path"` + ServiceAccountEmail types.String `tfsdk:"service_account_email"` + Token types.String `tfsdk:"service_account_token"` + Region types.String `tfsdk:"region"` + DNSCustomEndpoint types.String `tfsdk:"dns_custom_endpoint"` + PostgreSQLCustomEndpoint types.String `tfsdk:"postgresql_custom_endpoint"` + PostgresFlexCustomEndpoint types.String `tfsdk:"postgresflex_custom_endpoint"` + LogMeCustomEndpoint types.String `tfsdk:"logme_custom_endpoint"` + RabbitMQCustomEndpoint types.String `tfsdk:"rabbitmq_custom_endpoint"` + MariaDBCustomEndpoint types.String `tfsdk:"mariadb_custom_endpoint"` + OpenSearchCustomEndpoint types.String `tfsdk:"opensearch_custom_endpoint"` + RedisCustomEndpoint types.String `tfsdk:"redis_custom_endpoint"` + ArgusCustomEndpoint types.String `tfsdk:"argus_custom_endpoint"` + SKECustomEndpoint types.String `tfsdk:"ske_custom_endpoint"` + ResourceManagerCustomEndpoint types.String `tfsdk:"resourcemanager_custom_endpoint"` +} + +// Schema defines the provider-level schema for configuration data. +func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { + descriptions := map[string]string{ + "credentials_path": "Path of JSON from where the credentials are read. Takes precedence over the env var `STACKIT_CREDENTIALS_PATH`. Default value is `~/.stackit/credentials.json`.", + "service_account_token": "Token used for authentication. If set, the token flow will be used to authenticate all operations.", + "service_account_email": "Service account email. It can also be set using the environment variable STACKIT_SERVICE_ACCOUNT_EMAIL", + "region": "Region will be used as the default location for regional services. Not all services require a region, some are global", + "dns_custom_endpoint": "Custom endpoint for the DNS service", + "postgresql_custom_endpoint": "Custom endpoint for the PostgreSQL service", + "postgresflex_custom_endpoint": "Custom endpoint for the PostgresFlex service", + "logme_custom_endpoint": "Custom endpoint for the LogMe service", + "rabbitmq_custom_endpoint": "Custom endpoint for the RabbitMQ service", + "mariadb_custom_endpoint": "Custom endpoint for the MariaDB service", + "opensearch_custom_endpoint": "Custom endpoint for the OpenSearch service", + "argus_custom_endpoint": "Custom endpoint for the Argus service", + "ske_custom_endpoint": "Custom endpoint for the Kubernetes Engine (SKE) service", + "resourcemanager_custom_endpoint": "Custom endpoint for the Resource Manager service", + } + + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "credentials_path": schema.StringAttribute{ + Optional: true, + Description: descriptions["credentials_path"], + }, + "service_account_email": schema.StringAttribute{ + Optional: true, + Description: descriptions["service_account_email"], + }, + "service_account_token": schema.StringAttribute{ + Optional: true, + Description: descriptions["service_account_token"], + }, + "region": schema.StringAttribute{ + Optional: true, + Description: descriptions["region"], + }, + "dns_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["dns_custom_endpoint"], + }, + "postgresql_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["postgresql_custom_endpoint"], + }, + "postgresflex_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["postgresflex_custom_endpoint"], + }, + "logme_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["logme_custom_endpoint"], + }, + "rabbitmq_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["rabbitmq_custom_endpoint"], + }, + "mariadb_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["mariadb_custom_endpoint"], + }, + "opensearch_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["opensearch_custom_endpoint"], + }, + "redis_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["redis_custom_endpoint"], + }, + "argus_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["argus_custom_endpoint"], + }, + "ske_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["ske_custom_endpoint"], + }, + "resourcemanager_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["resourcemanager_custom_endpoint"], + }, + }, + } +} + +// Configure prepares a stackit API client for data sources and resources. +func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + // Retrieve provider data and configuration + var providerConfig providerModel + diags := req.Config.Get(ctx, &providerConfig) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Configure SDK client + sdkConfig := &config.Configuration{} + var providerData core.ProviderData + if !(providerConfig.CredentialsFilePath.IsUnknown() || providerConfig.CredentialsFilePath.IsNull()) { + sdkConfig.CredentialsFilePath = providerConfig.CredentialsFilePath.ValueString() + } + if !(providerConfig.ServiceAccountEmail.IsUnknown() || providerConfig.ServiceAccountEmail.IsNull()) { + providerData.ServiceAccountEmail = providerConfig.ServiceAccountEmail.ValueString() + sdkConfig.ServiceAccountEmail = providerConfig.ServiceAccountEmail.ValueString() + } + if !(providerConfig.Token.IsUnknown() || providerConfig.Token.IsNull()) { + sdkConfig.Token = providerConfig.Token.ValueString() + } + if !(providerConfig.Region.IsUnknown() || providerConfig.Region.IsNull()) { + providerData.Region = providerConfig.Region.ValueString() + } + if !(providerConfig.DNSCustomEndpoint.IsUnknown() || providerConfig.DNSCustomEndpoint.IsNull()) { + providerData.DnsCustomEndpoint = providerConfig.DNSCustomEndpoint.ValueString() + } + if !(providerConfig.PostgreSQLCustomEndpoint.IsUnknown() || providerConfig.PostgreSQLCustomEndpoint.IsNull()) { + providerData.PostgreSQLCustomEndpoint = providerConfig.PostgreSQLCustomEndpoint.ValueString() + } + if !(providerConfig.PostgresFlexCustomEndpoint.IsUnknown() || providerConfig.PostgresFlexCustomEndpoint.IsNull()) { + providerData.PostgresFlexCustomEndpoint = providerConfig.PostgresFlexCustomEndpoint.ValueString() + } + if !(providerConfig.LogMeCustomEndpoint.IsUnknown() || providerConfig.LogMeCustomEndpoint.IsNull()) { + providerData.LogMeCustomEndpoint = providerConfig.LogMeCustomEndpoint.ValueString() + } + if !(providerConfig.RabbitMQCustomEndpoint.IsUnknown() || providerConfig.RabbitMQCustomEndpoint.IsNull()) { + providerData.RabbitMQCustomEndpoint = providerConfig.RabbitMQCustomEndpoint.ValueString() + } + if !(providerConfig.MariaDBCustomEndpoint.IsUnknown() || providerConfig.MariaDBCustomEndpoint.IsNull()) { + providerData.MariaDBCustomEndpoint = providerConfig.MariaDBCustomEndpoint.ValueString() + } + if !(providerConfig.OpenSearchCustomEndpoint.IsUnknown() || providerConfig.OpenSearchCustomEndpoint.IsNull()) { + providerData.OpenSearchCustomEndpoint = providerConfig.OpenSearchCustomEndpoint.ValueString() + } + if !(providerConfig.RedisCustomEndpoint.IsUnknown() || providerConfig.RedisCustomEndpoint.IsNull()) { + providerData.RedisCustomEndpoint = providerConfig.RedisCustomEndpoint.ValueString() + } + if !(providerConfig.ArgusCustomEndpoint.IsUnknown() || providerConfig.ArgusCustomEndpoint.IsNull()) { + providerData.ArgusCustomEndpoint = providerConfig.ArgusCustomEndpoint.ValueString() + } + if !(providerConfig.SKECustomEndpoint.IsUnknown() || providerConfig.SKECustomEndpoint.IsNull()) { + providerData.SKECustomEndpoint = providerConfig.SKECustomEndpoint.ValueString() + } + if !(providerConfig.ResourceManagerCustomEndpoint.IsUnknown() || providerConfig.ResourceManagerCustomEndpoint.IsNull()) { + providerData.ResourceManagerCustomEndpoint = providerConfig.ResourceManagerCustomEndpoint.ValueString() + } + roundTripper, err := sdkauth.SetupAuth(sdkConfig) + if err != nil { + resp.Diagnostics.AddError( + "Unable to Setup SDK", + err.Error(), + ) + return + } + + // Make round tripper and custom endpoints available during DataSource and Resource + // type Configure methods. + providerData.RoundTripper = roundTripper + resp.DataSourceData = providerData + resp.ResourceData = providerData +} + +// DataSources defines the data sources implemented in the provider. +func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + dnsZone.NewZoneDataSource, + dnsRecordSet.NewRecordSetDataSource, + postgresInstance.NewInstanceDataSource, + postgresCredentials.NewCredentialsDataSource, + logMeInstance.NewInstanceDataSource, + logMeCredentials.NewCredentialsDataSource, + mariaDBInstance.NewInstanceDataSource, + mariaDBCredentials.NewCredentialsDataSource, + openSearchInstance.NewInstanceDataSource, + openSearchCredentials.NewCredentialsDataSource, + rabbitMQInstance.NewInstanceDataSource, + rabbitMQCredentials.NewCredentialsDataSource, + redisInstance.NewInstanceDataSource, + redisCredentials.NewCredentialsDataSource, + argusInstance.NewInstanceDataSource, + argusScrapeConfig.NewScrapeConfigDataSource, + resourceManagerProject.NewProjectDataSource, + skeProject.NewProjectDataSource, + skeCluster.NewClusterDataSource, + postgresFlexInstance.NewInstanceDataSource, + postgresFlexUser.NewUserDataSource, + } +} + +// Resources defines the resources implemented in the provider. +func (p *Provider) Resources(_ context.Context) []func() resource.Resource { + return []func() resource.Resource{ + dnsZone.NewZoneResource, + dnsRecordSet.NewRecordSetResource, + postgresInstance.NewInstanceResource, + postgresCredentials.NewCredentialsResource, + logMeInstance.NewInstanceResource, + logMeCredentials.NewlogmeCredentialsResource, + mariaDBInstance.NewInstanceResource, + mariaDBCredentials.NewCredentialsResource, + openSearchInstance.NewInstanceResource, + openSearchCredentials.NewCredentialsResource, + rabbitMQInstance.NewInstanceResource, + rabbitMQCredentials.NewCredentialsResource, + redisInstance.NewInstanceResource, + redisCredentials.NewCredentialsResource, + argusInstance.NewInstanceResource, + argusScrapeConfig.NewScrapeConfigResource, + resourceManagerProject.NewProjectResource, + argusCredential.NewCredentialResource, + skeProject.NewProjectResource, + skeCluster.NewClusterResource, + postgresFlexInstance.NewInstanceResource, + postgresFlexUser.NewUserResource, + } +} diff --git a/stackit/services/argus/argus_acc_test.go b/stackit/services/argus/argus_acc_test.go new file mode 100644 index 00000000..4510cc89 --- /dev/null +++ b/stackit/services/argus/argus_acc_test.go @@ -0,0 +1,353 @@ +package argus_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/argus" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("argus"), + "plan_name": "Monitoring-Medium-EU01", +} + +var scrapeConfigResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": fmt.Sprintf("scrapeconfig-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum)), + "urls": fmt.Sprintf(`{urls = ["www.%s.de","%s.de"]}`, acctest.RandStringFromCharSet(15, acctest.CharSetAlphaNum), acctest.RandStringFromCharSet(15, acctest.CharSetAlphaNum)), + "metrics_path": "/metrics", + "scheme": "https", + "scrape_interval": "4m", // non-default + "saml2_enable_url_parameters": "false", +} + +var credentialsResource = map[string]string{ + "project_id": testutil.ProjectId, +} + +func resourceConfig(instanceName, target, saml2EnableUrlParameters string) string { + return fmt.Sprintf(` + %s + + resource "stackit_argus_instance" "instance" { + project_id = "%s" + name = "%s" + plan_name = "%s" + } + + resource "stackit_argus_scrapeconfig" "scrapeconfig" { + project_id = stackit_argus_instance.instance.project_id + instance_id = stackit_argus_instance.instance.instance_id + name = "%s" + metrics_path = "%s" + targets = [%s] + scrape_interval = "%s" + saml2 = { + enable_url_parameters = %s + } + } + + resource "stackit_argus_credential" "credential" { + project_id = stackit_argus_instance.instance.project_id + instance_id = stackit_argus_instance.instance.instance_id + } + + `, + testutil.ArgusProviderConfig(), + instanceResource["project_id"], + instanceName, + instanceResource["plan_name"], + scrapeConfigResource["name"], + scrapeConfigResource["metrics_path"], + target, + scrapeConfigResource["scrape_interval"], + saml2EnableUrlParameters, + ) +} + +func TestAccResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckArgusDestroy, + Steps: []resource.TestStep{ + // Creation + { + Config: resourceConfig(instanceResource["name"], scrapeConfigResource["urls"], scrapeConfigResource["saml2_enable_url_parameters"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "plan_name", instanceResource["plan_name"]), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "dashboard_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "is_updatable"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "grafana_public_read_access"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "grafana_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "grafana_initial_admin_user"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "grafana_initial_admin_password"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "metrics_retention_days"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "metrics_retention_days_5m_downsampling"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "metrics_retention_days_1h_downsampling"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "metrics_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "metrics_push_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "targets_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "alerting_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "logs_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "logs_push_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "jaeger_traces_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "jaeger_ui_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "otlp_traces_url"), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "zipkin_spans_url"), + + // scrape config data + resource.TestCheckResourceAttrPair( + "stackit_argus_instance.instance", "project_id", + "stackit_argus_scrapeconfig.scrapeconfig", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_argus_instance.instance", "instance_id", + "stackit_argus_scrapeconfig.scrapeconfig", "instance_id", + ), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "targets.0.urls.#", "2"), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", scrapeConfigResource["saml2_enable_url_parameters"]), + + // credentials + resource.TestCheckResourceAttr("stackit_argus_credential.credential", "project_id", credentialsResource["project_id"]), + resource.TestCheckResourceAttrPair( + "stackit_argus_instance.instance", "instance_id", + "stackit_argus_credential.credential", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_argus_credential.credential", "username"), + resource.TestCheckResourceAttrSet("stackit_argus_credential.credential", "password"), + ), + }, { + // Data source + Config: fmt.Sprintf(` + %s + + data "stackit_argus_instance" "instance" { + project_id = stackit_argus_instance.instance.project_id + instance_id = stackit_argus_instance.instance.instance_id + } + + data "stackit_argus_scrapeconfig" "scrapeconfig" { + project_id = stackit_argus_scrapeconfig.scrapeconfig.project_id + instance_id = stackit_argus_scrapeconfig.scrapeconfig.instance_id + name = stackit_argus_scrapeconfig.scrapeconfig.name + } + `, + resourceConfig(instanceResource["name"], scrapeConfigResource["urls"], scrapeConfigResource["saml2_enable_url_parameters"]), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_argus_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_argus_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("data.stackit_argus_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("data.stackit_argus_instance.instance", "plan_name", instanceResource["plan_name"]), + resource.TestCheckResourceAttrPair( + "stackit_argus_instance.instance", "project_id", + "data.stackit_argus_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_argus_instance.instance", "instance_id", + "data.stackit_argus_instance.instance", "instance_id", + ), + // scrape config data + resource.TestCheckResourceAttrPair( + "stackit_argus_scrapeconfig.scrapeconfig", "project_id", + "data.stackit_argus_scrapeconfig.scrapeconfig", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_argus_scrapeconfig.scrapeconfig", "instance_id", + "data.stackit_argus_scrapeconfig.scrapeconfig", "instance_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_argus_scrapeconfig.scrapeconfig", "name", + "data.stackit_argus_scrapeconfig.scrapeconfig", "name", + ), + resource.TestCheckResourceAttr("data.stackit_argus_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("data.stackit_argus_scrapeconfig.scrapeconfig", "targets.0.urls.#", "2"), + resource.TestCheckResourceAttr("data.stackit_argus_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("data.stackit_argus_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("data.stackit_argus_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("data.stackit_argus_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", scrapeConfigResource["saml2_enable_url_parameters"]), + ), + }, + + // Import + { + ResourceName: "stackit_argus_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_argus_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_argus_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_argus_scrapeconfig.scrapeconfig", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_argus_scrapeconfig.scrapeconfig"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_argus_scrapeconfig.scrapeconfig") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + name, ok := r.Primary.Attributes["name"] + if !ok { + return "", fmt.Errorf("couldn't find attribute name") + } + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, name), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfig(fmt.Sprintf("%s-new", instanceResource["name"]), "", "true"), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "name", instanceResource["name"]+"-new"), + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "plan_name", instanceResource["plan_name"]), + + // Scrape Config + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "targets.#", "0"), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "saml2.%", "1"), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters", "true"), + + // Credentials + resource.TestCheckResourceAttrSet("stackit_argus_credential.credential", "username"), + resource.TestCheckResourceAttrSet("stackit_argus_credential.credential", "password"), + ), + }, + // Update and remove saml2 attribute + { + Config: fmt.Sprintf(` + %s + + resource "stackit_argus_instance" "instance" { + project_id = "%s" + name = "%s" + plan_name = "%s" + } + + resource "stackit_argus_scrapeconfig" "scrapeconfig" { + project_id = stackit_argus_instance.instance.project_id + instance_id = stackit_argus_instance.instance.instance_id + name = "%s" + targets = [%s] + scrape_interval = "%s" + metrics_path = "%s" + } + `, + testutil.ArgusProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_name"], + scrapeConfigResource["name"], + scrapeConfigResource["urls"], + scrapeConfigResource["scrape_interval"], + scrapeConfigResource["metrics_path"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_argus_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_argus_instance.instance", "plan_name", instanceResource["plan_name"]), + + // Scrape Config + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "name", scrapeConfigResource["name"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "targets.#", "1"), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "metrics_path", scrapeConfigResource["metrics_path"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "scheme", scrapeConfigResource["scheme"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "scrape_interval", scrapeConfigResource["scrape_interval"]), + resource.TestCheckResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "saml2.%", "0"), + resource.TestCheckNoResourceAttr("stackit_argus_scrapeconfig.scrapeconfig", "saml2.enable_url_parameters"), + ), + }, + + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckArgusDestroy(s *terraform.State) error { + ctx := context.Background() + var client *argus.APIClient + var err error + if testutil.ArgusCustomEndpoint == "" { + client, err = argus.NewAPIClient() + } else { + client, err = argus.NewAPIClient( + config.WithEndpoint(testutil.ArgusCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_argus_instance" { + continue + } + // instance terraform ID: = "[project_id],[instance_id],[name]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if utils.Contains(instancesToDestroy, *instances[i].Id) { + if *instances[i].Status != argus.DeleteSuccess { + _, err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].Id) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].Id, err) + } + _, err = argus.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].Id).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].Id, err) + } + } + } + } + return nil +} diff --git a/stackit/services/argus/credential/resource.go b/stackit/services/argus/credential/resource.go new file mode 100644 index 00000000..38abb8ff --- /dev/null +++ b/stackit/services/argus/credential/resource.go @@ -0,0 +1,236 @@ +package argus + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/argus" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &credentialResource{} + _ resource.ResourceWithConfigure = &credentialResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` + ProjectId types.String `tfsdk:"project_id"` + InstanceId types.String `tfsdk:"instance_id"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` +} + +// NewCredentialResource is a helper function to simplify the provider implementation. +func NewCredentialResource() resource.Resource { + return &credentialResource{} +} + +// credentialResource is the resource implementation. +type credentialResource struct { + client *argus.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_argus_credential" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *argus.APIClient + var err error + if providerData.ArgusCustomEndpoint != "" { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ArgusCustomEndpoint), + ) + } else { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + r.client = apiClient +} + +func (r *credentialResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the credential is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "The Argus Instance ID the credential belongs to.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "username": schema.StringAttribute{ + Description: "Credential username", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "password": schema.StringAttribute{ + Description: "Credential password", + Computed: true, + Sensitive: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *credentialResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + + got, err := r.client.CreateCredential(ctx, instanceId, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("Error creating credential", fmt.Sprintf("Calling API: %v", err)) + return + } + err = mapFields(got.Credentials, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, &model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "ARGUS credential created") +} + +func mapFields(r *argus.Credential, model *Model) error { + if r == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + var userName string + if model.Username.ValueString() != "" { + userName = model.Username.ValueString() + } else if r.Username != nil { + userName = *r.Username + } else { + return fmt.Errorf("username id not present") + } + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + userName, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.Username = types.StringPointerValue(r.Username) + model.Password = types.StringPointerValue(r.Password) + return nil +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userName := model.Username.ValueString() + _, err := r.client.GetCredential(ctx, instanceId, projectId, userName).Execute() + if err != nil { + resp.Diagnostics.AddError("Error reading credential", fmt.Sprintf("Project id = %s, instance id = %s, username = %s: %v", projectId, instanceId, userName, err)) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "ARGUS credential read") +} + +func (r *credentialResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating credentials", "credentials can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *credentialResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userName := model.Username.ValueString() + _, err := r.client.DeleteCredential(ctx, instanceId, projectId, userName).Execute() + if err != nil { + resp.Diagnostics.AddError("Error deleting credential", "project id = "+projectId+", instance id = "+instanceId+", username = "+userName+", "+err.Error()) + return + } + tflog.Info(ctx, "ARGUS credential deleted") +} diff --git a/stackit/services/argus/credential/resource_test.go b/stackit/services/argus/credential/resource_test.go new file mode 100644 index 00000000..e1b5a1a2 --- /dev/null +++ b/stackit/services/argus/credential/resource_test.go @@ -0,0 +1,77 @@ +package argus + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/argus" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *argus.Credential + expected Model + isValid bool + }{ + { + "ok", + &argus.Credential{ + Username: utils.Ptr("username"), + Password: utils.Ptr("password"), + }, + Model{ + Id: types.StringValue("pid,iid,username"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + Username: types.StringValue("username"), + Password: types.StringValue("password"), + }, + true, + }, + { + "response_nil_fail", + nil, + Model{}, + false, + }, + { + "response_fields_nil_fail", + &argus.Credential{ + Password: nil, + Username: nil, + }, + Model{}, + false, + }, + { + "no_resource_id", + &argus.Credential{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/argus/instance/datasource.go b/stackit/services/argus/instance/datasource.go new file mode 100644 index 00000000..435ff3b1 --- /dev/null +++ b/stackit/services/argus/instance/datasource.go @@ -0,0 +1,229 @@ +package argus + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/argus" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *argus.APIClient +} + +// Metadata returns the data source type name. +func (d *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_argus_instance" +} + +func (d *instanceDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + var apiClient *argus.APIClient + var err error + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + if providerData.ArgusCustomEndpoint != "" { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ArgusCustomEndpoint), + ) + } else { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + if err != nil { + resp.Diagnostics.AddError( + "Could not Configure API Client", + err.Error(), + ) + return + } + d.client = apiClient +} + +// Schema defines the schema for the data source. +func (d *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the instance is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "The Argus instance ID.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The name of the Argus instance.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(300), + }, + }, + "plan_name": schema.StringAttribute{ + Description: "Specifies the Argus plan. E.g. `Monitoring-Medium-EU01`.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(200), + }, + }, + "plan_id": schema.StringAttribute{ + Description: "The Argus plan ID.", + Computed: true, + Validators: []validator.String{ + validate.UUID(), + }, + }, + "parameters": schema.MapAttribute{ + Description: "Additional parameters.", + Computed: true, + ElementType: types.StringType, + }, + "dashboard_url": schema.StringAttribute{ + Description: "Specifies Argus instance dashboard URL.", + Computed: true, + }, + "is_updatable": schema.BoolAttribute{ + Description: "Specifies if the instance can be updated.", + Computed: true, + }, + "grafana_public_read_access": schema.BoolAttribute{ + Description: "If true, anyone can access Grafana dashboards without logging in.", + Computed: true, + }, + "grafana_url": schema.StringAttribute{ + Description: "Specifies Grafana URL.", + Computed: true, + }, + "grafana_initial_admin_user": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin username.", + Computed: true, + }, + "grafana_initial_admin_password": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin password.", + Computed: true, + Sensitive: true, + }, + "metrics_retention_days": schema.Int64Attribute{ + Description: "Specifies for how many days the raw metrics are kept.", + Computed: true, + }, + "metrics_retention_days_5m_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled).", + Computed: true, + }, + "metrics_retention_days_1h_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled).", + Computed: true, + }, + "metrics_url": schema.StringAttribute{ + Description: "Specifies metrics URL.", + Computed: true, + }, + "metrics_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing metrics.", + Computed: true, + }, + "targets_url": schema.StringAttribute{ + Description: "Specifies Targets URL.", + Computed: true, + }, + "alerting_url": schema.StringAttribute{ + Description: "Specifies Alerting URL.", + Computed: true, + }, + "logs_url": schema.StringAttribute{ + Description: "Specifies Logs URL.", + Computed: true, + }, + "logs_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing logs.", + Computed: true, + }, + "jaeger_traces_url": schema.StringAttribute{ + Computed: true, + }, + "jaeger_ui_url": schema.StringAttribute{ + Computed: true, + }, + "otlp_traces_url": schema.StringAttribute{ + Computed: true, + }, + "zipkin_spans_url": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (d *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + instanceResponse, err := d.client.GetInstance(ctx, instanceId, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &diags, "Unable to read instance", err.Error()) + return + } + + err = mapFields(ctx, instanceResponse, &state) + if err != nil { + core.LogAndAddError(ctx, &diags, "Mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/stackit/services/argus/instance/resource.go b/stackit/services/argus/instance/resource.go new file mode 100644 index 00000000..cec3eda4 --- /dev/null +++ b/stackit/services/argus/instance/resource.go @@ -0,0 +1,558 @@ +package argus + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/argus" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + ProjectId types.String `tfsdk:"project_id"` + InstanceId types.String `tfsdk:"instance_id"` + Name types.String `tfsdk:"name"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` + Parameters types.Map `tfsdk:"parameters"` + DashboardURL types.String `tfsdk:"dashboard_url"` + IsUpdatable types.Bool `tfsdk:"is_updatable"` + GrafanaURL types.String `tfsdk:"grafana_url"` + GrafanaPublicReadAccess types.Bool `tfsdk:"grafana_public_read_access"` + GrafanaInitialAdminPassword types.String `tfsdk:"grafana_initial_admin_password"` + GrafanaInitialAdminUser types.String `tfsdk:"grafana_initial_admin_user"` + MetricsRetentionDays types.Int64 `tfsdk:"metrics_retention_days"` + MetricsRetentionDays5mDownsampling types.Int64 `tfsdk:"metrics_retention_days_5m_downsampling"` + MetricsRetentionDays1hDownsampling types.Int64 `tfsdk:"metrics_retention_days_1h_downsampling"` + MetricsURL types.String `tfsdk:"metrics_url"` + MetricsPushURL types.String `tfsdk:"metrics_push_url"` + TargetsURL types.String `tfsdk:"targets_url"` + AlertingURL types.String `tfsdk:"alerting_url"` + LogsURL types.String `tfsdk:"logs_url"` + LogsPushURL types.String `tfsdk:"logs_push_url"` + JaegerTracesURL types.String `tfsdk:"jaeger_traces_url"` + JaegerUIURL types.String `tfsdk:"jaeger_ui_url"` + OtlpTracesURL types.String `tfsdk:"otlp_traces_url"` + ZipkinSpansURL types.String `tfsdk:"zipkin_spans_url"` +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *argus.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_argus_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *argus.APIClient + var err error + if providerData.ArgusCustomEndpoint != "" { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ArgusCustomEndpoint), + ) + } else { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the instance is associated.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "The Argus instance ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The name of the Argus instance.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(200), + }, + }, + "plan_name": schema.StringAttribute{ + Description: "Specifies the Argus plan. E.g. `Monitoring-Medium-EU01`.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(200), + }, + }, + "plan_id": schema.StringAttribute{ + Description: "The Argus plan ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + }, + }, + "parameters": schema.MapAttribute{ + Description: "Additional parameters.", + Optional: true, + Computed: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.UseStateForUnknown(), + }, + }, + "dashboard_url": schema.StringAttribute{ + Description: "Specifies Argus instance dashboard URL.", + Computed: true, + }, + "is_updatable": schema.BoolAttribute{ + Description: "Specifies if the instance can be updated.", + Computed: true, + }, + "grafana_public_read_access": schema.BoolAttribute{ + Description: "If true, anyone can access Grafana dashboards without logging in.", + Computed: true, + }, + "grafana_url": schema.StringAttribute{ + Description: "Specifies Grafana URL.", + Computed: true, + }, + "grafana_initial_admin_user": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin username.", + Computed: true, + }, + "grafana_initial_admin_password": schema.StringAttribute{ + Description: "Specifies an initial Grafana admin password.", + Computed: true, + Sensitive: true, + }, + "metrics_retention_days": schema.Int64Attribute{ + Description: "Specifies for how many days the raw metrics are kept.", + Computed: true, + }, + "metrics_retention_days_5m_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `0` (disabled).", + Computed: true, + }, + "metrics_retention_days_1h_downsampling": schema.Int64Attribute{ + Description: "Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `0` (disabled).", + Computed: true, + }, + "metrics_url": schema.StringAttribute{ + Description: "Specifies metrics URL.", + Computed: true, + }, + "metrics_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing metrics.", + Computed: true, + }, + "targets_url": schema.StringAttribute{ + Description: "Specifies Targets URL.", + Computed: true, + }, + "alerting_url": schema.StringAttribute{ + Description: "Specifies Alerting URL.", + Computed: true, + }, + "logs_url": schema.StringAttribute{ + Description: "Specifies Logs URL.", + Computed: true, + }, + "logs_push_url": schema.StringAttribute{ + Description: "Specifies URL for pushing logs.", + Computed: true, + }, + "jaeger_traces_url": schema.StringAttribute{ + Computed: true, + }, + "jaeger_ui_url": schema.StringAttribute{ + Computed: true, + }, + "otlp_traces_url": schema.StringAttribute{ + Computed: true, + }, + "zipkin_spans_url": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load argus service plan", "plan "+model.PlanName.ValueString()) + return + } + // Generate API request body from model + payload, err := toCreatePayload(&model) + if err != nil { + resp.Diagnostics.AddError("Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + resp.Diagnostics.AddError("Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := createResp.InstanceId + if instanceId == nil || *instanceId == "" { + resp.Diagnostics.AddError("Error creating instance", "API didn't return an instance id") + return + } + wr, err := argus.CreateInstanceWaitHandler(ctx, r.client, *instanceId, projectId).SetTimeout(20 * time.Minute).WaitWithContext(ctx) + if err != nil { + resp.Diagnostics.AddError("Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*argus.InstanceResponse) + if !ok { + resp.Diagnostics.AddError("Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(ctx, got, &model) + if err != nil { + resp.Diagnostics.AddError("Error mapping fields", fmt.Sprintf("Project id %s, instance id %s: %v", projectId, *instanceId, err)) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + + instanceResp, err := r.client.GetInstance(ctx, instanceId, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("Error reading instance", fmt.Sprintf("Project id = %s, instance id = %s: %v", projectId, instanceId, err)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(ctx, instanceResp, &model) + if err != nil { + resp.Diagnostics.AddError("Error mapping fields", fmt.Sprintf("Project id %s, instance id %s: %v", projectId, instanceId, err)) + return + } + // Set refreshed model + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load argus service plan", "plan "+model.PlanName.ValueString()) + return + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model) + if err != nil { + resp.Diagnostics.AddError("Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + _, err = r.client.UpdateInstance(ctx, instanceId, projectId).UpdateInstancePayload(*payload).Execute() + if err != nil { + resp.Diagnostics.AddError("Error updating instance", "project id = "+projectId+", instance Id = "+instanceId+", "+err.Error()) + return + } + wr, err := argus.UpdateInstanceWaitHandler(ctx, r.client, instanceId, projectId).SetTimeout(20 * time.Minute).WaitWithContext(ctx) + if err != nil { + resp.Diagnostics.AddError("Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*argus.InstanceResponse) + if !ok { + resp.Diagnostics.AddError("Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + err = mapFields(ctx, got, &model) + if err != nil { + resp.Diagnostics.AddError("Error mapping fields in update", "project id = "+projectId+", instance Id = "+instanceId+", "+err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + + // Delete existing instance + _, err := r.client.DeleteInstance(ctx, instanceId, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("Error deleting instance", "project id = "+projectId+", instance Id = "+instanceId+", "+err.Error()) + return + } + _, err = argus.DeleteInstanceWaitHandler(ctx, r.client, instanceId, projectId).SetTimeout(10 * time.Minute).WaitWithContext(ctx) + if err != nil { + resp.Diagnostics.AddError("Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) +} + +func mapFields(ctx context.Context, r *argus.InstanceResponse, model *Model) error { + if r == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if r.Id != nil { + instanceId = *r.Id + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanName = types.StringPointerValue(r.PlanName) + model.PlanId = types.StringPointerValue(r.PlanId) + model.Name = types.StringPointerValue(r.Name) + + ps := r.Parameters + if ps == nil { + model.Parameters = types.MapNull(types.StringType) + } else { + params := make(map[string]attr.Value, len(*ps)) + for k, v := range *ps { + params[k] = types.StringValue(v) + } + res, diags := types.MapValueFrom(ctx, types.StringType, params) + if diags.HasError() { + return fmt.Errorf("parameter mapping %s", diags.Errors()) + } + model.Parameters = res + } + + model.IsUpdatable = types.BoolPointerValue(r.IsUpdatable) + model.DashboardURL = types.StringPointerValue(r.DashboardUrl) + if r.Instance != nil { + i := *r.Instance + model.GrafanaURL = types.StringPointerValue(i.GrafanaUrl) + model.GrafanaPublicReadAccess = types.BoolPointerValue(i.GrafanaPublicReadAccess) + model.GrafanaInitialAdminPassword = types.StringPointerValue(i.GrafanaAdminPassword) + model.GrafanaInitialAdminUser = types.StringPointerValue(i.GrafanaAdminUser) + model.MetricsRetentionDays = types.Int64Value(int64(*i.MetricsRetentionTimeRaw)) + model.MetricsRetentionDays5mDownsampling = types.Int64Value(int64(*i.MetricsRetentionTime5m)) + model.MetricsRetentionDays1hDownsampling = types.Int64Value(int64(*i.MetricsRetentionTime1h)) + model.MetricsURL = types.StringPointerValue(i.MetricsUrl) + model.MetricsPushURL = types.StringPointerValue(i.PushMetricsUrl) + model.TargetsURL = types.StringPointerValue(i.TargetsUrl) + model.AlertingURL = types.StringPointerValue(i.AlertingUrl) + model.LogsURL = types.StringPointerValue(i.LogsUrl) + model.LogsPushURL = types.StringPointerValue(i.LogsPushUrl) + model.JaegerTracesURL = types.StringPointerValue(i.JaegerTracesUrl) + model.JaegerUIURL = types.StringPointerValue(i.JaegerUiUrl) + model.OtlpTracesURL = types.StringPointerValue(i.OtlpTracesUrl) + model.ZipkinSpansURL = types.StringPointerValue(i.ZipkinSpansUrl) + } + return nil +} + +func toCreatePayload(model *Model) (*argus.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + elements := model.Parameters.Elements() + pa := make(map[string]interface{}, len(elements)) + for k := range elements { + pa[k] = elements[k].String() + } + return &argus.CreateInstancePayload{ + Name: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + Parameter: &pa, + }, nil +} + +func toUpdatePayload(model *Model) (*argus.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + elements := model.Parameters.Elements() + pa := make(map[string]interface{}, len(elements)) + for k, v := range elements { + pa[k] = v.String() + } + return &argus.UpdateInstancePayload{ + Name: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + Parameter: &pa, + }, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, diags *diag.Diagnostics, model *Model) { + projectId := model.ProjectId.ValueString() + res, err := r.client.GetPlans(ctx, projectId).Execute() + if err != nil { + diags.AddError("Failed to list argus plans", err.Error()) + return + } + + planName := model.PlanName.ValueString() + avl := "" + plans := *res.Plans + for i := range plans { + p := plans[i] + if p.Name == nil { + continue + } + if strings.EqualFold(*p.Name, planName) && p.PlanId != nil { + model.PlanId = types.StringPointerValue(p.PlanId) + break + } + avl = fmt.Sprintf("%s\n- %s", avl, *p.Name) + } + if model.PlanId.ValueString() == "" { + diags.AddError("Invalid plan_name", fmt.Sprintf("Couldn't find plan_name '%s', available names are:%s", planName, avl)) + return + } +} diff --git a/stackit/services/argus/instance/resource_test.go b/stackit/services/argus/instance/resource_test.go new file mode 100644 index 00000000..ec9dcc87 --- /dev/null +++ b/stackit/services/argus/instance/resource_test.go @@ -0,0 +1,250 @@ +package argus + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/argus" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *argus.InstanceResponse + expected Model + isValid bool + }{ + { + "default_ok", + &argus.InstanceResponse{ + Id: utils.Ptr("iid"), + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringNull(), + PlanName: types.StringNull(), + Name: types.StringNull(), + Parameters: types.MapNull(types.StringType), + }, + true, + }, + { + "values_ok", + &argus.InstanceResponse{ + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + PlanName: utils.Ptr("plan1"), + PlanId: utils.Ptr("planId"), + Parameters: &map[string]string{"key": "value"}, + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringValue("planId"), + PlanName: types.StringValue("plan1"), + Parameters: toTerraformStringMapMust(context.Background(), map[string]string{"key": "value"}), + }, + true, + }, + { + "nullable_fields_ok", + &argus.InstanceResponse{ + Id: utils.Ptr("iid"), + Name: nil, + }, + Model{ + Id: types.StringValue("pid,iid"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + PlanId: types.StringNull(), + PlanName: types.StringNull(), + Name: types.StringNull(), + Parameters: types.MapNull(types.StringType), + }, + true, + }, + { + "response_nil_fail", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &argus.InstanceResponse{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + } + err := mapFields(context.Background(), tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + expected *argus.CreateInstancePayload + isValid bool + }{ + { + "basic_ok", + &Model{ + PlanId: types.StringValue("planId"), + }, + &argus.CreateInstancePayload{ + Name: nil, + PlanId: utils.Ptr("planId"), + Parameter: &map[string]interface{}{}, + }, + true, + }, + { + "ok", + &Model{ + Name: types.StringValue("Name"), + PlanId: types.StringValue("planId"), + Parameters: makeTestMap(t), + }, + &argus.CreateInstancePayload{ + Name: utils.Ptr("Name"), + PlanId: utils.Ptr("planId"), + Parameter: &map[string]interface{}{"key": `"value"`}, + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToPayloadUpdate(t *testing.T) { + tests := []struct { + description string + input *Model + expected *argus.UpdateInstancePayload + isValid bool + }{ + { + "basic_ok", + &Model{ + PlanId: types.StringValue("planId"), + }, + &argus.UpdateInstancePayload{ + Name: nil, + PlanId: utils.Ptr("planId"), + Parameter: &map[string]any{}, + }, + true, + }, + { + "ok", + &Model{ + Name: types.StringValue("Name"), + PlanId: types.StringValue("planId"), + Parameters: makeTestMap(t), + }, + &argus.UpdateInstancePayload{ + Name: utils.Ptr("Name"), + PlanId: utils.Ptr("planId"), + Parameter: &map[string]any{"key": `"value"`}, + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func makeTestMap(t *testing.T) basetypes.MapValue { + p := make(map[string]attr.Value, 1) + p["key"] = types.StringValue("value") + params, diag := types.MapValueFrom(context.Background(), types.StringType, p) + if diag.HasError() { + t.Fail() + } + return params +} + +// ToTerraformStringMapMust Silently ignores the error +func toTerraformStringMapMust(ctx context.Context, m map[string]string) basetypes.MapValue { + labels := make(map[string]attr.Value, len(m)) + for l, v := range m { + stringValue := types.StringValue(v) + labels[l] = stringValue + } + res, diags := types.MapValueFrom(ctx, types.StringType, m) + if diags.HasError() { + return types.MapNull(types.StringType) + } + return res +} diff --git a/stackit/services/argus/scrapeconfig/datasource.go b/stackit/services/argus/scrapeconfig/datasource.go new file mode 100644 index 00000000..5d181025 --- /dev/null +++ b/stackit/services/argus/scrapeconfig/datasource.go @@ -0,0 +1,221 @@ +package argus + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/argus" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &scrapeConfigDataSource{} +) + +// NewScrapeConfigDataSource is a helper function to simplify the provider implementation. +func NewScrapeConfigDataSource() datasource.DataSource { + return &scrapeConfigDataSource{} +} + +// scrapeConfigDataSource is the data source implementation. +type scrapeConfigDataSource struct { + client *argus.APIClient +} + +// Metadata returns the data source type name. +func (d *scrapeConfigDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_argus_scrapeconfig" +} + +func (d *scrapeConfigDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + var apiClient *argus.APIClient + var err error + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + if providerData.ArgusCustomEndpoint != "" { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ArgusCustomEndpoint), + ) + } else { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + if err != nil { + resp.Diagnostics.AddError( + "Could not Configure API Client", + err.Error(), + ) + return + } + d.client = apiClient +} + +// Schema defines the schema for the data source. +func (d *scrapeConfigDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "Argus instance ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "Specifies the name of the scraping job", + Required: true, + Validators: []validator.String{ + validate.NoSeparator(), + stringvalidator.LengthBetween(1, 200), + }, + }, + "metrics_path": schema.StringAttribute{ + Description: "Specifies the job scraping url path.", + Computed: true, + }, + + "scheme": schema.StringAttribute{ + Description: "Specifies the http scheme.", + Computed: true, + }, + + "scrape_interval": schema.StringAttribute{ + Description: "Specifies the scrape interval as duration string.", + Validators: []validator.String{ + stringvalidator.LengthBetween(2, 8), + }, + Computed: true, + }, + + "scrape_timeout": schema.StringAttribute{ + Description: "Specifies the scrape timeout as duration string.", + Computed: true, + }, + "saml2": schema.SingleNestedAttribute{ + Description: "A SAML2 configuration block", + Computed: true, + Attributes: map[string]schema.Attribute{ + "enable_url_parameters": schema.BoolAttribute{ + Description: "Are URL parameters be enabled?", + Computed: true, + }, + }, + }, + "basic_auth": schema.SingleNestedAttribute{ + Description: "A basic authentication block.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "username": schema.StringAttribute{ + Description: "Specifies basic auth username.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + "password": schema.StringAttribute{ + Description: "Specifies basic auth password.", + Computed: true, + Sensitive: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + }, + }, + "targets": schema.ListNestedAttribute{ + Description: "The targets list (specified by the static config).", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "urls": schema.ListAttribute{ + Description: "Specifies target URLs.", + Computed: true, + ElementType: types.StringType, + Validators: []validator.List{ + listvalidator.ValueStringsAre( + stringvalidator.LengthBetween(1, 500), + ), + }, + }, + "labels": schema.MapAttribute{ + Description: "Specifies labels.", + Computed: true, + ElementType: types.StringType, + Validators: []validator.Map{ + mapvalidator.SizeAtMost(10), + mapvalidator.ValueStringsAre(stringvalidator.LengthBetween(0, 200)), + mapvalidator.KeysAre(stringvalidator.LengthBetween(0, 200)), + }, + }, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (d *scrapeConfigDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + scResp, err := d.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &diags, "Unable to read scrape config", err.Error()) + return + } + + err = mapFields(scResp.Data, &model) + if err != nil { + core.LogAndAddError(ctx, &diags, "Mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/stackit/services/argus/scrapeconfig/resource.go b/stackit/services/argus/scrapeconfig/resource.go new file mode 100644 index 00000000..4846a96f --- /dev/null +++ b/stackit/services/argus/scrapeconfig/resource.go @@ -0,0 +1,676 @@ +package argus + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/argus" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +const ( + DefaultScheme = "https" // API default is "http" + DefaultScrapeInterval = "5m" + DefaultScrapeTimeout = "2m" + DefaultSAML2EnableURLParameters = true +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &scrapeConfigResource{} + _ resource.ResourceWithConfigure = &scrapeConfigResource{} + _ resource.ResourceWithImportState = &scrapeConfigResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + ProjectId types.String `tfsdk:"project_id"` + InstanceId types.String `tfsdk:"instance_id"` + Name types.String `tfsdk:"name"` + MetricsPath types.String `tfsdk:"metrics_path"` + Scheme types.String `tfsdk:"scheme"` + ScrapeInterval types.String `tfsdk:"scrape_interval"` + ScrapeTimeout types.String `tfsdk:"scrape_timeout"` + SAML2 *SAML2 `tfsdk:"saml2"` + BasicAuth *BasicAuth `tfsdk:"basic_auth"` + Targets []Target `tfsdk:"targets"` +} + +type SAML2 struct { + EnableURLParameters types.Bool `tfsdk:"enable_url_parameters"` +} + +type Target struct { + URLs []types.String `tfsdk:"urls"` + Labels types.Map `tfsdk:"labels"` +} + +type BasicAuth struct { + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` +} + +// NewScrapeConfigResource is a helper function to simplify the provider implementation. +func NewScrapeConfigResource() resource.Resource { + return &scrapeConfigResource{} +} + +// scrapeConfigResource is the resource implementation. +type scrapeConfigResource struct { + client *argus.APIClient +} + +// Metadata returns the resource type name. +func (r *scrapeConfigResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_argus_scrapeconfig" +} + +// Configure adds the provider configured client to the resource. +func (r *scrapeConfigResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *argus.APIClient + var err error + if providerData.ArgusCustomEndpoint != "" { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.ArgusCustomEndpoint), + ) + } else { + apiClient, err = argus.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *scrapeConfigResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: "Argus instance ID to which the scraping job is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "name": schema.StringAttribute{ + Description: "Specifies the name of the scraping job.", + Required: true, + Validators: []validator.String{ + validate.NoSeparator(), + stringvalidator.LengthBetween(1, 200), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "metrics_path": schema.StringAttribute{ + Description: "Specifies the job scraping url path. E.g. `/metrics`.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + + "scheme": schema.StringAttribute{ + Description: "Specifies the http scheme. E.g. `https`.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultScheme), + }, + "scrape_interval": schema.StringAttribute{ + Description: "Specifies the scrape interval as duration string. E.g. `5m`.", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(2, 8), + }, + Default: stringdefault.StaticString(DefaultScrapeInterval), + }, + "scrape_timeout": schema.StringAttribute{ + Description: "Specifies the scrape timeout as duration string. E.g.`2m`.", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(2, 8), + }, + Default: stringdefault.StaticString(DefaultScrapeTimeout), + }, + "saml2": schema.SingleNestedAttribute{ + Description: "A SAML2 configuration block.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "enable_url_parameters": schema.BoolAttribute{ + Description: "Are URL parameters be enabled?", + Optional: true, + Computed: true, + Default: booldefault.StaticBool(DefaultSAML2EnableURLParameters), + }, + }, + }, + "basic_auth": schema.SingleNestedAttribute{ + Description: "A basic authentication block.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "username": schema.StringAttribute{ + Description: "Specifies basic auth username.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + "password": schema.StringAttribute{ + Description: "Specifies basic auth password.", + Required: true, + Sensitive: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, + }, + }, + }, + "targets": schema.ListNestedAttribute{ + Description: "The targets list (specified by the static config).", + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "urls": schema.ListAttribute{ + Description: "Specifies target URLs.", + Required: true, + ElementType: types.StringType, + Validators: []validator.List{ + listvalidator.ValueStringsAre( + stringvalidator.LengthBetween(1, 500), + ), + }, + }, + "labels": schema.MapAttribute{ + Description: "Specifies labels.", + Optional: true, + ElementType: types.StringType, + Validators: []validator.Map{ + mapvalidator.SizeAtMost(10), + mapvalidator.ValueStringsAre(stringvalidator.LengthBetween(0, 200)), + mapvalidator.KeysAre(stringvalidator.LengthBetween(0, 200)), + }, + }, + }, + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *scrapeConfigResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + // Generate API request body from model + payload, err := toCreatePayload(ctx, &model) + if err != nil { + resp.Diagnostics.AddError("Error creating scrape config", fmt.Sprintf("Creating API payload: %v", err)) + return + } + _, err = r.client.CreateScrapeConfig(ctx, instanceId, projectId).CreateScrapeConfigPayload(*payload).Execute() + if err != nil { + resp.Diagnostics.AddError("Error creating scrape config", fmt.Sprintf("Calling API: %v", err)) + return + } + _, err = argus.CreateScrapeConfigWaitHandler(ctx, r.client, instanceId, scName, projectId).SetTimeout(3 * time.Minute).WaitWithContext(ctx) + if err != nil { + resp.Diagnostics.AddError("Error creating scrape config", fmt.Sprintf("ScrapeConfig creation waiting: %v", err)) + return + } + got, err := r.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("Error creating scrape config", fmt.Sprintf("ScrapeConfig creation waiting: %v", err)) + return + } + err = mapFields(got.Data, &model) + if err != nil { + resp.Diagnostics.AddError("Error mapping fields", fmt.Sprintf("Project id %s, ScrapeConfig id %s: %v", projectId, scName, err)) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "ARGUS scrape config created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *scrapeConfigResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + scResp, err := r.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("Error reading scrape config", fmt.Sprintf("Project id = %s, instance id = %s, scrape config name = %s: %v", projectId, instanceId, scName, err)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(scResp.Data, &model) + if err != nil { + resp.Diagnostics.AddError("Error mapping fields", fmt.Sprintf("Project id = %s, instance id = %s, sc name = %s: %v", projectId, instanceId, scName, err)) + return + } + // Set refreshed model + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "ARGUS scrape config read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *scrapeConfigResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + // Generate API request body from model + payload, err := toUpdatePayload(ctx, &model) + if err != nil { + resp.Diagnostics.AddError("Error updating scrape config", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + _, err = r.client.UpdateScrapeConfig(ctx, instanceId, scName, projectId).UpdateScrapeConfigPayload(*payload).Execute() + if err != nil { + resp.Diagnostics.AddError("Error updating scrape config", fmt.Sprintf("Project id = %s, instance id = %s, scrape config name = %s: %v", projectId, instanceId, scName, err)) + return + } + // We do not have an update status provided by the argus scrape config api, so we cannot use a waiter here, hence a simple sleep is used. + time.Sleep(15 * time.Second) + + // Fetch updated ScrapeConfig + scResp, err := r.client.GetScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("Error reading updated data", fmt.Sprintf("Project id %s, instance id %s, jo name %s: %v", projectId, instanceId, scName, err)) + return + } + err = mapFields(scResp.Data, &model) + if err != nil { + resp.Diagnostics.AddError("Error mapping fields in update", "project id = "+projectId+", instance id = "+instanceId+", scrape config name = "+scName+", "+err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "ARGUS scrape config updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *scrapeConfigResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + scName := model.Name.ValueString() + + // Delete existing ScrapeConfig + _, err := r.client.DeleteScrapeConfig(ctx, instanceId, scName, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("Error deleting scrape config", "project id = "+projectId+", instance id = "+instanceId+", scrape config name = "+scName+", "+err.Error()) + return + } + _, err = argus.DeleteScrapeConfigWaitHandler(ctx, r.client, instanceId, scName, projectId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + resp.Diagnostics.AddError("Error deleting scrape config", fmt.Sprintf("ScrapeConfig deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "ARGUS scrape config deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,name +func (r *scrapeConfigResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id],[name] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[2])...) + tflog.Info(ctx, "ARGUS scrape config state imported") +} + +func mapFields(sc *argus.Job, model *Model) error { + if sc == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var scName string + if model.Name.ValueString() != "" { + scName = model.Name.ValueString() + } else if sc.JobName != nil { + scName = *sc.JobName + } else { + return fmt.Errorf("scrape config name not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + scName, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.Name = types.StringValue(scName) + + model.MetricsPath = types.StringPointerValue(sc.MetricsPath) + model.Scheme = types.StringPointerValue(sc.Scheme) + model.ScrapeInterval = types.StringPointerValue(sc.ScrapeInterval) + model.ScrapeTimeout = types.StringPointerValue(sc.ScrapeTimeout) + handleSAML2(sc, model) + handleBasicAuth(sc, model) + handleTargets(sc, model) + return nil +} + +func handleBasicAuth(sc *argus.Job, model *Model) { + if sc.BasicAuth == nil { + model.BasicAuth = nil + return + } + model.BasicAuth = &BasicAuth{ + Username: types.StringPointerValue(sc.BasicAuth.Username), + Password: types.StringPointerValue(sc.BasicAuth.Password), + } +} + +func handleSAML2(sc *argus.Job, model *Model) { + if (sc.Params == nil || *sc.Params == nil) && model.SAML2 == nil { + return + } + + if model.SAML2 == nil { + model.SAML2 = &SAML2{} + } + + flag := true + if sc.Params == nil || *sc.Params == nil { + return + } + p := *sc.Params + if v, ok := p["saml2"]; ok { + if len(v) == 1 && v[0] == "disabled" { + flag = false + } + } + + model.SAML2 = &SAML2{ + EnableURLParameters: types.BoolValue(flag), + } +} + +func handleTargets(sc *argus.Job, model *Model) { + if sc == nil || sc.StaticConfigs == nil { + model.Targets = []Target{} + return + } + newTargets := []Target{} + for i, sc := range *sc.StaticConfigs { + nt := Target{ + URLs: []types.String{}, + } + if sc.Targets != nil { + for _, v := range *sc.Targets { + nt.URLs = append(nt.URLs, types.StringValue(v)) + } + } + + if len(model.Targets) > i && model.Targets[i].Labels.IsNull() || sc.Labels == nil { + nt.Labels = types.MapNull(types.StringType) + } else { + newl := map[string]attr.Value{} + for k, v := range *sc.Labels { + newl[k] = types.StringValue(v) + } + nt.Labels = types.MapValueMust(types.StringType, newl) + } + newTargets = append(newTargets, nt) + } + model.Targets = newTargets +} + +func toCreatePayload(ctx context.Context, model *Model) (*argus.CreateScrapeConfigPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + sc := argus.CreateScrapeConfigPayload{ + JobName: model.Name.ValueStringPointer(), + MetricsPath: model.MetricsPath.ValueStringPointer(), + ScrapeInterval: model.ScrapeInterval.ValueStringPointer(), + ScrapeTimeout: model.ScrapeTimeout.ValueStringPointer(), + Scheme: model.Scheme.ValueStringPointer(), + } + setDefaultsCreateScrapeConfig(&sc, model) + + if model.SAML2 != nil && !model.SAML2.EnableURLParameters.ValueBool() { + m := make(map[string]interface{}) + if sc.Params != nil { + m = *sc.Params + } + m["saml2"] = []string{"disabled"} + sc.Params = &m + } + + if model.BasicAuth != nil { + if sc.BasicAuth == nil { + sc.BasicAuth = &argus.UpdateScrapeConfigPayloadBasicAuth{ + Username: model.BasicAuth.Username.ValueStringPointer(), + Password: model.BasicAuth.Password.ValueStringPointer(), + } + } + } + + t := make([]argus.CreateScrapeConfigPayloadStaticConfigsInner, len(model.Targets)) + for i, target := range model.Targets { + ti := argus.CreateScrapeConfigPayloadStaticConfigsInner{} + tgts := []string{} + for _, v := range target.URLs { + tgts = append(tgts, v.ValueString()) + } + ti.Targets = &tgts + + ls := map[string]interface{}{} + for k, v := range target.Labels.Elements() { + ls[k], _ = conversion.ToString(ctx, v) + } + ti.Labels = &ls + t[i] = ti + } + sc.StaticConfigs = &t + return &sc, nil +} + +func setDefaultsCreateScrapeConfig(sc *argus.CreateScrapeConfigPayload, model *Model) { + if sc == nil { + return + } + if model.Scheme.IsNull() || model.Scheme.IsUnknown() { + sc.Scheme = utils.Ptr(DefaultScheme) + } + if model.ScrapeInterval.IsNull() || model.ScrapeInterval.IsUnknown() { + sc.ScrapeInterval = utils.Ptr(DefaultScrapeInterval) + } + if model.ScrapeTimeout.IsNull() || model.ScrapeTimeout.IsUnknown() { + sc.ScrapeTimeout = utils.Ptr(DefaultScrapeTimeout) + } + // Make the API default more explicit by setting the field. + if model.SAML2 == nil || model.SAML2.EnableURLParameters.IsNull() || model.SAML2.EnableURLParameters.IsUnknown() { + m := map[string]interface{}{} + if sc.Params != nil { + m = *sc.Params + } + if DefaultSAML2EnableURLParameters { + m["saml2"] = []string{"enabled"} + } else { + m["saml2"] = []string{"disabled"} + } + sc.Params = &m + } +} + +func toUpdatePayload(ctx context.Context, model *Model) (*argus.UpdateScrapeConfigPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + sc := argus.UpdateScrapeConfigPayload{ + MetricsPath: model.MetricsPath.ValueStringPointer(), + ScrapeInterval: model.ScrapeInterval.ValueStringPointer(), + ScrapeTimeout: model.ScrapeTimeout.ValueStringPointer(), + Scheme: model.Scheme.ValueStringPointer(), + } + setDefaultsUpdateScrapeConfig(&sc, model) + + if model.SAML2 != nil && !model.SAML2.EnableURLParameters.ValueBool() { + m := make(map[string]interface{}) + if sc.Params != nil { + m = *sc.Params + } + m["saml2"] = []string{"disabled"} + sc.Params = &m + } + + if model.BasicAuth != nil { + if sc.BasicAuth == nil { + sc.BasicAuth = &argus.UpdateScrapeConfigPayloadBasicAuth{ + Username: model.BasicAuth.Username.ValueStringPointer(), + Password: model.BasicAuth.Password.ValueStringPointer(), + } + } + } + + t := make([]argus.UpdateScrapeConfigPayloadStaticConfigsInner, len(model.Targets)) + for i, target := range model.Targets { + ti := argus.UpdateScrapeConfigPayloadStaticConfigsInner{} + tgts := []string{} + for _, v := range target.URLs { + tgts = append(tgts, v.ValueString()) + } + ti.Targets = &tgts + + ls := map[string]interface{}{} + for k, v := range target.Labels.Elements() { + ls[k], _ = conversion.ToString(ctx, v) + } + ti.Labels = &ls + t[i] = ti + } + sc.StaticConfigs = &t + return &sc, nil +} + +func setDefaultsUpdateScrapeConfig(sc *argus.UpdateScrapeConfigPayload, model *Model) { + if sc == nil { + return + } + if model.Scheme.IsNull() || model.Scheme.IsUnknown() { + sc.Scheme = utils.Ptr(DefaultScheme) + } + if model.ScrapeInterval.IsNull() || model.ScrapeInterval.IsUnknown() { + sc.ScrapeInterval = utils.Ptr(DefaultScrapeInterval) + } + if model.ScrapeTimeout.IsNull() || model.ScrapeTimeout.IsUnknown() { + sc.ScrapeTimeout = utils.Ptr(DefaultScrapeTimeout) + } +} diff --git a/stackit/services/argus/scrapeconfig/resource_test.go b/stackit/services/argus/scrapeconfig/resource_test.go new file mode 100644 index 00000000..ae3e6886 --- /dev/null +++ b/stackit/services/argus/scrapeconfig/resource_test.go @@ -0,0 +1,272 @@ +package argus + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/argus" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *argus.Job + expected Model + isValid bool + }{ + { + "default_ok", + &argus.Job{ + JobName: utils.Ptr("name"), + }, + Model{ + Id: types.StringValue("pid,iid,name"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + Name: types.StringValue("name"), + MetricsPath: types.StringNull(), + Scheme: types.StringNull(), + ScrapeInterval: types.StringNull(), + ScrapeTimeout: types.StringNull(), + SAML2: nil, + BasicAuth: nil, + Targets: []Target{}, + }, + true, + }, + { + description: "values_ok", + input: &argus.Job{ + JobName: utils.Ptr("name"), + MetricsPath: utils.Ptr("/m"), + BasicAuth: &argus.BasicAuth{ + Password: utils.Ptr("p"), + Username: utils.Ptr("u"), + }, + Params: &map[string][]string{"saml2": {"disabled"}, "x": {"y", "z"}}, + Scheme: utils.Ptr("scheme"), + ScrapeInterval: utils.Ptr("1"), + ScrapeTimeout: utils.Ptr("2"), + StaticConfigs: &[]argus.StaticConfigs{ + { + Labels: &map[string]string{"k1": "v1"}, + Targets: &[]string{"url1"}, + }, + { + Labels: &map[string]string{"k2": "v2", "k3": "v3"}, + Targets: &[]string{"url1", "url3"}, + }, + { + Labels: nil, + Targets: &[]string{}, + }, + }, + }, + expected: Model{ + Id: types.StringValue("pid,iid,name"), + ProjectId: types.StringValue("pid"), + InstanceId: types.StringValue("iid"), + Name: types.StringValue("name"), + MetricsPath: types.StringValue("/m"), + Scheme: types.StringValue("scheme"), + ScrapeInterval: types.StringValue("1"), + ScrapeTimeout: types.StringValue("2"), + SAML2: &SAML2{ + EnableURLParameters: types.BoolValue(false), + }, + BasicAuth: &BasicAuth{ + Username: types.StringValue("u"), + Password: types.StringValue("p"), + }, + Targets: []Target{ + { + URLs: []types.String{types.StringValue("url1")}, + Labels: types.MapValueMust(types.StringType, map[string]attr.Value{ + "k1": types.StringValue("v1"), + }), + }, + { + URLs: []types.String{types.StringValue("url1"), types.StringValue("url3")}, + Labels: types.MapValueMust(types.StringType, map[string]attr.Value{ + "k2": types.StringValue("v2"), + "k3": types.StringValue("v3"), + }), + }, + { + URLs: []types.String{}, + Labels: types.MapNull(types.StringType), + }, + }, + }, + isValid: true, + }, + { + "response_nil_fail", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &argus.Job{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + expected *argus.CreateScrapeConfigPayload + isValid bool + }{ + { + "basic_ok", + &Model{ + MetricsPath: types.StringValue("/metrics"), + }, + &argus.CreateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + StaticConfigs: &[]argus.CreateScrapeConfigPayloadStaticConfigsInner{}, + Params: &map[string]any{"saml2": []string{"enabled"}}, + }, + true, + }, + { + "ok", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Name: types.StringValue("Name"), + }, + &argus.CreateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + JobName: utils.Ptr("Name"), + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + StaticConfigs: &[]argus.CreateScrapeConfigPayloadStaticConfigsInner{}, + Params: &map[string]any{"saml2": []string{"enabled"}}, + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(context.Background(), tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + expected *argus.UpdateScrapeConfigPayload + isValid bool + }{ + { + "basic_ok", + &Model{ + MetricsPath: types.StringValue("/metrics"), + }, + &argus.UpdateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + // Defaults + Scheme: utils.Ptr("https"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + StaticConfigs: &[]argus.UpdateScrapeConfigPayloadStaticConfigsInner{}, + }, + true, + }, + { + "ok", + &Model{ + MetricsPath: types.StringValue("/metrics"), + Scheme: types.StringValue("http"), + }, + &argus.UpdateScrapeConfigPayload{ + MetricsPath: utils.Ptr("/metrics"), + // Defaults + Scheme: utils.Ptr("http"), + ScrapeInterval: utils.Ptr("5m"), + ScrapeTimeout: utils.Ptr("2m"), + StaticConfigs: &[]argus.UpdateScrapeConfigPayloadStaticConfigsInner{}, + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(context.Background(), tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/dns/dns_acc_test.go b/stackit/services/dns/dns_acc_test.go new file mode 100644 index 00000000..960d3e36 --- /dev/null +++ b/stackit/services/dns/dns_acc_test.go @@ -0,0 +1,557 @@ +package dns_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/dns" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Zone resource data +var zoneResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("zone"), + "dns_name": fmt.Sprintf("www.%s.com", acctest.RandStringFromCharSet(20, acctest.CharSetAlpha)), + "dns_name_min": fmt.Sprintf("www.%s.com", acctest.RandStringFromCharSet(20, acctest.CharSetAlpha)), + "description": "my description", + "acl": "192.168.0.0/24", + "active": "true", + "contact_email": "aa@bb.cc", + "ttl": "12", + "ttl_updated": "4440", + "expire_time": "123456", + "is_reverse_zone": "false", + "negative_cache": "60", + "primaries": "1.2.3.4", + "refresh_time": "500", + "retry_time": "700", + "type": "primary", +} + +// Record set resource data +var recordSetResource = map[string]string{ + "name": fmt.Sprintf("tf-acc-%s.%s.", acctest.RandStringFromCharSet(5, acctest.CharSetAlpha), zoneResource["dns_name"]), + "name_min": fmt.Sprintf("tf-acc-%s.%s.", acctest.RandStringFromCharSet(5, acctest.CharSetAlpha), zoneResource["dns_name_min"]), + "records": `"1.2.3.4"`, + "records_updated": `"5.6.7.8", "9.10.11.12"`, + "ttl": "3700", + "type": "A", + "active": "true", + "comment": "a comment", +} + +func inputConfig(zoneName, ttl, records string) string { + return fmt.Sprintf(` + %s + + resource "stackit_dns_zone" "zone" { + project_id = "%s" + name = "%s" + dns_name = "%s" + description = "%s" + acl = "%s" + active = %s + contact_email = "%s" + default_ttl = %s + expire_time = %s + is_reverse_zone = %s + negative_cache = %s + primaries = ["%s"] + refresh_time = %s + retry_time = %s + type = "%s" + } + + resource "stackit_dns_record_set" "record_set" { + project_id = stackit_dns_zone.zone.project_id + zone_id = stackit_dns_zone.zone.zone_id + name = "%s" + records = [%s] + type = "%s" + ttl = %s + comment = "%s" + active = %s + + } + `, + testutil.DnsProviderConfig(), + zoneResource["project_id"], + zoneName, + zoneResource["dns_name"], + zoneResource["description"], + zoneResource["acl"], + zoneResource["active"], + zoneResource["contact_email"], + ttl, + zoneResource["expire_time"], + zoneResource["is_reverse_zone"], + zoneResource["negative_cache"], + zoneResource["primaries"], + zoneResource["refresh_time"], + zoneResource["retry_time"], + zoneResource["type"], + recordSetResource["name"], + records, + recordSetResource["type"], + recordSetResource["ttl"], + recordSetResource["comment"], + recordSetResource["active"], + ) +} + +func TestAccDnsResource(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckDnsDestroy, + Steps: []resource.TestStep{ + // Creation + { + Config: inputConfig(zoneResource["name"], zoneResource["ttl"], recordSetResource["records"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Zone data + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "project_id", zoneResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "zone_id"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "name", zoneResource["name"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "dns_name", zoneResource["dns_name"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "description", zoneResource["description"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "acl", zoneResource["acl"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "active", zoneResource["active"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "contact_email", zoneResource["contact_email"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "default_ttl", zoneResource["ttl"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "expire_time", zoneResource["expire_time"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "is_reverse_zone", zoneResource["is_reverse_zone"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "negative_cache", zoneResource["negative_cache"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "primaries.#", "1"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "primaries.0", zoneResource["primaries"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "refresh_time", zoneResource["refresh_time"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "retry_time", zoneResource["retry_time"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "type", zoneResource["type"]), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "primary_name_server"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "serial_number"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "visibility"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "state"), + + // Record set data + resource.TestCheckResourceAttrPair( + "stackit_dns_record_set.record_set", "project_id", + "stackit_dns_zone.zone", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_dns_record_set.record_set", "zone_id", + "stackit_dns_zone.zone", "zone_id", + ), + resource.TestCheckResourceAttrSet("stackit_dns_record_set.record_set", "record_set_id"), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "name", recordSetResource["name"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "records.#", "1"), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "records.0", strings.ReplaceAll(recordSetResource["records"], "\"", "")), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "type", recordSetResource["type"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "ttl", recordSetResource["ttl"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "comment", recordSetResource["comment"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "active", recordSetResource["active"]), + ), + }, + // Data sources + { + Config: fmt.Sprintf(` + %s + + data "stackit_dns_zone" "zone" { + project_id = stackit_dns_zone.zone.project_id + zone_id = stackit_dns_zone.zone.zone_id + } + + data "stackit_dns_record_set" "record_set" { + project_id = stackit_dns_zone.zone.project_id + zone_id = stackit_dns_zone.zone.zone_id + record_set_id = stackit_dns_record_set.record_set.record_set_id + }`, + inputConfig(zoneResource["name"], zoneResource["ttl"], recordSetResource["records"]), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Zone data + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "project_id", zoneResource["project_id"]), + resource.TestCheckResourceAttrPair( + "stackit_dns_zone.zone", "zone_id", + "data.stackit_dns_zone.zone", "zone_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_dns_record_set.record_set", "zone_id", + "data.stackit_dns_zone.zone", "zone_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_dns_record_set.record_set", "project_id", + "data.stackit_dns_zone.zone", "project_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_dns_record_set.record_set", "project_id", + "stackit_dns_record_set.record_set", "project_id", + ), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "name", zoneResource["name"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "default_ttl", zoneResource["ttl"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "dns_name", zoneResource["dns_name"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "description", zoneResource["description"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "acl", zoneResource["acl"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "active", zoneResource["active"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "contact_email", zoneResource["contact_email"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "default_ttl", zoneResource["ttl"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "expire_time", zoneResource["expire_time"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "is_reverse_zone", zoneResource["is_reverse_zone"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "negative_cache", zoneResource["negative_cache"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "primaries.#", "1"), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "primaries.0", zoneResource["primaries"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "refresh_time", zoneResource["refresh_time"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "retry_time", zoneResource["retry_time"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "type", zoneResource["type"]), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone", "primary_name_server"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone", "serial_number"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone", "visibility"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone", "state"), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone", "record_count", "4"), + + // Record set data + resource.TestCheckResourceAttrSet("data.stackit_dns_record_set.record_set", "record_set_id"), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set", "name", recordSetResource["name"]), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set", "records.#", "1"), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set", "type", recordSetResource["type"]), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set", "ttl", recordSetResource["ttl"]), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set", "comment", recordSetResource["comment"]), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set", "active", recordSetResource["active"]), + ), + }, + // Import + { + ResourceName: "stackit_dns_zone.zone", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_dns_zone.zone"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_dns_zone.recozonerd_set") + } + zoneId, ok := r.Primary.Attributes["zone_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute zone_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, zoneId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_dns_record_set.record_set", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_dns_record_set.record_set"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_dns_record_set.record_set") + } + zoneId, ok := r.Primary.Attributes["zone_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute zone_id") + } + recordSetId, ok := r.Primary.Attributes["record_set_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute record_set_id") + } + + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, zoneId, recordSetId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update. The zone ttl should not be updated according to the DNS API. + { + Config: inputConfig(zoneResource["name"], zoneResource["ttl"], recordSetResource["records_updated"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Zone data + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "project_id", zoneResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "zone_id"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "name", zoneResource["name"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "dns_name", zoneResource["dns_name"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "description", zoneResource["description"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "acl", zoneResource["acl"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "active", zoneResource["active"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "contact_email", zoneResource["contact_email"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "default_ttl", zoneResource["ttl"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "expire_time", zoneResource["expire_time"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "is_reverse_zone", zoneResource["is_reverse_zone"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "negative_cache", zoneResource["negative_cache"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "primaries.#", "1"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "primaries.0", zoneResource["primaries"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "refresh_time", zoneResource["refresh_time"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "retry_time", zoneResource["retry_time"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone", "type", zoneResource["type"]), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "primary_name_server"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "serial_number"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "visibility"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone", "state"), + + // Record set data + resource.TestCheckResourceAttrPair( + "stackit_dns_record_set.record_set", "project_id", + "stackit_dns_zone.zone", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_dns_record_set.record_set", "zone_id", + "stackit_dns_zone.zone", "zone_id", + ), + resource.TestCheckResourceAttrSet("stackit_dns_record_set.record_set", "record_set_id"), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "name", recordSetResource["name"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "records.#", "2"), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "type", recordSetResource["type"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "ttl", recordSetResource["ttl"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "comment", recordSetResource["comment"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set", "active", recordSetResource["active"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func inputConfigMinimal() string { + return fmt.Sprintf(` + %s + + resource "stackit_dns_zone" "zone_min" { + project_id = "%s" + name = "%s" + dns_name = "%s" + contact_email = "%s" + type = "%s" + acl = "%s" + } + + resource "stackit_dns_record_set" "record_set_min" { + project_id = stackit_dns_zone.zone_min.project_id + zone_id = stackit_dns_zone.zone_min.zone_id + name = "%s" + records = [%s] + type = "%s" + } + `, + testutil.DnsProviderConfig(), + zoneResource["project_id"], + zoneResource["name"], + zoneResource["dns_name_min"], + zoneResource["contact_email"], + zoneResource["type"], + zoneResource["acl"], + recordSetResource["name_min"], + recordSetResource["records"], + recordSetResource["type"], + ) +} + +func TestAccDnsMinimalResource(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckDnsDestroy, + Steps: []resource.TestStep{ + // Creation + { + Config: inputConfigMinimal(), + Check: resource.ComposeAggregateTestCheckFunc( + // Zone + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "project_id", zoneResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "zone_id"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "name", zoneResource["name"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "dns_name", zoneResource["dns_name_min"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "contact_email", zoneResource["contact_email"]), + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "type", zoneResource["type"]), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "acl"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "active", "true"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "default_ttl"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "expire_time"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "is_reverse_zone", "false"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "negative_cache"), + resource.TestCheckResourceAttr("stackit_dns_zone.zone_min", "primaries.#", "1"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "refresh_time"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "retry_time"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "primary_name_server"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "serial_number"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "visibility"), + resource.TestCheckResourceAttrSet("stackit_dns_zone.zone_min", "state"), + + // Record set + resource.TestCheckResourceAttrPair( + "stackit_dns_record_set.record_set_min", "project_id", + "stackit_dns_zone.zone_min", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_dns_record_set.record_set_min", "zone_id", + "stackit_dns_zone.zone_min", "zone_id", + ), + resource.TestCheckResourceAttrSet("stackit_dns_record_set.record_set_min", "record_set_id"), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set_min", "name", recordSetResource["name_min"]), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set_min", "records.#", "1"), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set_min", "records.0", strings.ReplaceAll(recordSetResource["records"], "\"", "")), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set_min", "type", recordSetResource["type"]), + resource.TestCheckResourceAttrSet("stackit_dns_record_set.record_set_min", "ttl"), + resource.TestCheckNoResourceAttr("stackit_dns_record_set.record_set_min", "comment"), + resource.TestCheckResourceAttr("stackit_dns_record_set.record_set_min", "active", "true"), + ), + }, + // Data sources + { + Config: fmt.Sprintf(` + %s + + data "stackit_dns_zone" "zone_min" { + project_id = stackit_dns_zone.zone_min.project_id + zone_id = stackit_dns_zone.zone_min.zone_id + } + + data "stackit_dns_record_set" "record_set_min" { + project_id = stackit_dns_zone.zone_min.project_id + zone_id = stackit_dns_zone.zone_min.zone_id + record_set_id = stackit_dns_record_set.record_set_min.record_set_id + }`, + inputConfigMinimal(), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Zone data + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "project_id", zoneResource["project_id"]), + resource.TestCheckResourceAttrPair( + "stackit_dns_zone.zone_min", "zone_id", + "data.stackit_dns_zone.zone_min", "zone_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_dns_record_set.record_set_min", "zone_id", + "data.stackit_dns_zone.zone_min", "zone_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_dns_record_set.record_set_min", "project_id", + "data.stackit_dns_zone.zone_min", "project_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_dns_record_set.record_set_min", "project_id", + "stackit_dns_record_set.record_set_min", "project_id", + ), + + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "project_id", zoneResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "zone_id"), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "name", zoneResource["name"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "dns_name", zoneResource["dns_name_min"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "contact_email", zoneResource["contact_email"]), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "type", zoneResource["type"]), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "acl"), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "active", "true"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "default_ttl"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "expire_time"), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "is_reverse_zone", "false"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "negative_cache"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "primary_name_server"), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "primaries.#", "1"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "refresh_time"), + resource.TestCheckResourceAttrSet("data.stackit_dns_zone.zone_min", "retry_time"), + resource.TestCheckResourceAttr("data.stackit_dns_zone.zone_min", "record_count", "4"), + + // Record set data + resource.TestCheckResourceAttrSet("data.stackit_dns_record_set.record_set_min", "record_set_id"), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set_min", "name", recordSetResource["name_min"]), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set_min", "records.#", "1"), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set_min", "records.0", strings.ReplaceAll(recordSetResource["records"], "\"", "")), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set_min", "type", recordSetResource["type"]), + resource.TestCheckResourceAttrSet("data.stackit_dns_record_set.record_set_min", "ttl"), + resource.TestCheckNoResourceAttr("data.stackit_dns_record_set.record_set_min", "comment"), + resource.TestCheckResourceAttr("data.stackit_dns_record_set.record_set_min", "active", "true"), + ), + }, + // Import + { + ResourceName: "stackit_dns_zone.zone_min", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_dns_zone.zone_min"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_dns_zone.zone_min") + } + zoneId, ok := r.Primary.Attributes["zone_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute zone_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, zoneId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_dns_record_set.record_set_min", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_dns_record_set.record_set_min"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_dns_record_set.record_set_min") + } + zoneId, ok := r.Primary.Attributes["zone_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute zone_id") + } + recordSetId, ok := r.Primary.Attributes["record_set_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute record_set_id") + } + + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, zoneId, recordSetId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckDnsDestroy(s *terraform.State) error { + ctx := context.Background() + var client *dns.APIClient + var err error + if testutil.DnsCustomEndpoint == "" { + client, err = dns.NewAPIClient() + } else { + client, err = dns.NewAPIClient( + config.WithEndpoint(testutil.DnsCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + zonesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_dns_zone" { + continue + } + // zone terraform ID: "[projectId],[zoneId]" + zoneId := strings.Split(rs.Primary.ID, core.Separator)[1] + zonesToDestroy = append(zonesToDestroy, zoneId) + } + + zonesResp, err := client.GetZones(ctx, testutil.ProjectId).ActiveEq(true).Execute() + if err != nil { + return fmt.Errorf("getting zonesResp: %w", err) + } + + zones := *zonesResp.Zones + for i := range zones { + id := *zones[i].Id + if utils.Contains(zonesToDestroy, id) { + _, err := client.DeleteZoneExecute(ctx, testutil.ProjectId, id) + if err != nil { + return fmt.Errorf("destroying zone %s during CheckDestroy: %w", *zones[i].Id, err) + } + _, err = dns.DeleteZoneWaitHandler(ctx, client, testutil.ProjectId, id).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying zone %s during CheckDestroy: waiting for deletion %w", *zones[i].Id, err) + } + } + } + return nil +} diff --git a/stackit/services/dns/recordset/datasource.go b/stackit/services/dns/recordset/datasource.go new file mode 100644 index 00000000..32c38da1 --- /dev/null +++ b/stackit/services/dns/recordset/datasource.go @@ -0,0 +1,174 @@ +package dns + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/dns" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &recordSetDataSource{} +) + +// NewRecordSetDataSource NewZoneDataSource is a helper function to simplify the provider implementation. +func NewRecordSetDataSource() datasource.DataSource { + return &recordSetDataSource{} +} + +// recordSetDataSource is the data source implementation. +type recordSetDataSource struct { + client *dns.APIClient +} + +// Metadata returns the data source type name. +func (d *recordSetDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dns_record_set" +} + +// Configure adds the provider configured client to the resource. +func (d *recordSetDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *dns.APIClient + var err error + if providerData.DnsCustomEndpoint != "" { + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.DnsCustomEndpoint), + ) + } else { + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "DNS record set client configured") + d.client = apiClient +} + +// Schema defines the schema for the data source. +func (d *recordSetDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "DNS Record Set Resource schema.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the dns record set is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "zone_id": schema.StringAttribute{ + Description: "The zone ID to which is dns record set is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "record_set_id": schema.StringAttribute{ + Description: "The rr set id.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the record which should be a valid domain according to rfc1035 Section 2.3.4. E.g. `example.com`", + Computed: true, + }, + "records": schema.ListAttribute{ + Description: "Records.", + Computed: true, + ElementType: types.StringType, + }, + "ttl": schema.Int64Attribute{ + Description: "Time to live. E.g. 3600", + Computed: true, + }, + "type": schema.StringAttribute{ + Description: "The record set type. E.g. `A` or `CNAME`", + Computed: true, + }, + "active": schema.BoolAttribute{ + Description: "Specifies if the record set is active or not.", + Computed: true, + }, + "comment": schema.StringAttribute{ + Description: "Comment.", + Computed: true, + }, + "error": schema.StringAttribute{ + Description: "Error shows error in case create/update/delete failed.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "Record set state.", + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (d *recordSetDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + zoneId := state.ZoneId.ValueString() + recordSetId := state.RecordSetId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + ctx = tflog.SetField(ctx, "record_set_id", recordSetId) + zoneResp, err := d.client.GetRecordSet(ctx, projectId, zoneId, recordSetId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to Read record set", err.Error()) + return + } + + err = mapFields(zoneResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "DNS record set created") +} diff --git a/stackit/services/dns/recordset/resource.go b/stackit/services/dns/recordset/resource.go new file mode 100644 index 00000000..8a252da7 --- /dev/null +++ b/stackit/services/dns/recordset/resource.go @@ -0,0 +1,497 @@ +package dns + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/dns" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &recordSetResource{} + _ resource.ResourceWithConfigure = &recordSetResource{} + _ resource.ResourceWithImportState = &recordSetResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + RecordSetId types.String `tfsdk:"record_set_id"` + ZoneId types.String `tfsdk:"zone_id"` + ProjectId types.String `tfsdk:"project_id"` + Active types.Bool `tfsdk:"active"` + Comment types.String `tfsdk:"comment"` + Name types.String `tfsdk:"name"` + Records types.List `tfsdk:"records"` + TTL types.Int64 `tfsdk:"ttl"` + Type types.String `tfsdk:"type"` + Error types.String `tfsdk:"error"` + State types.String `tfsdk:"state"` +} + +// NewRecordSetResource is a helper function to simplify the provider implementation. +func NewRecordSetResource() resource.Resource { + return &recordSetResource{} +} + +// recordSetResource is the resource implementation. +type recordSetResource struct { + client *dns.APIClient +} + +// Metadata returns the resource type name. +func (r *recordSetResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dns_record_set" +} + +// Configure adds the provider configured client to the resource. +func (r *recordSetResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *dns.APIClient + var err error + if providerData.DnsCustomEndpoint != "" { + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.DnsCustomEndpoint), + ) + } else { + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Debug(ctx, "DNS record set client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *recordSetResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "DNS Record Set Resource schema.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the dns record set is associated.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "zone_id": schema.StringAttribute{ + Description: "The zone ID to which is dns record set is associated.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "record_set_id": schema.StringAttribute{ + Description: "The rr set id.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "Name of the record which should be a valid domain according to rfc1035 Section 2.3.4. E.g. `example.com`", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(63), + }, + }, + "records": schema.ListAttribute{ + Description: "Records.", + ElementType: types.StringType, + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.UniqueValues(), + listvalidator.ValueStringsAre(validate.IP()), + }, + }, + "ttl": schema.Int64Attribute{ + Description: "Time to live. E.g. 3600", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.AtLeast(30), + int64validator.AtMost(99999999), + }, + }, + "type": schema.StringAttribute{ + Description: "The record set type. E.g. `A` or `CNAME`", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "active": schema.BoolAttribute{ + Description: "Specifies if the record set is active or not.", + Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), + }, + "comment": schema.StringAttribute{ + Description: "Comment.", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtMost(255), + }, + }, + "error": schema.StringAttribute{ + Description: "Error shows error in case create/update/delete failed.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtMost(2000), + }, + }, + "state": schema.StringAttribute{ + Description: "Record set state.", + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *recordSetResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + zoneId := model.ZoneId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + + // Generate API request body from model + payload, err := toCreatePayload(&model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating recordset", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new recordset + recordSetResp, err := r.client.CreateRecordSet(ctx, projectId, zoneId).CreateRecordSetPayload(*payload).Execute() + if err != nil || recordSetResp.Rrset == nil || recordSetResp.Rrset.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating recordset", fmt.Sprintf("Calling API: %v", err)) + return + } + ctx = tflog.SetField(ctx, "record_set_id", *recordSetResp.Rrset.Id) + + wr, err := dns.CreateRecordSetWaitHandler(ctx, r.client, projectId, zoneId, *recordSetResp.Rrset.Id).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating recordset", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*dns.RecordSetResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating recordset", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "DNS record set created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *recordSetResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + zoneId := model.ZoneId.ValueString() + recordSetId := model.RecordSetId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + ctx = tflog.SetField(ctx, "record_set_id", recordSetId) + + recordSetResp, err := r.client.GetRecordSet(ctx, projectId, zoneId, recordSetId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading zones", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "DNS record set read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *recordSetResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + zoneId := model.ZoneId.ValueString() + recordSetId := model.RecordSetId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + ctx = tflog.SetField(ctx, "record_set_id", recordSetId) + + // Generate API request body from model + payload, err := toUpdatePayload(&model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating recordset", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update recordset + _, err = r.client.UpdateRecordSet(ctx, projectId, zoneId, recordSetId).UpdateRecordSetPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating recordset", err.Error()) + return + } + wr, err := dns.UpdateRecordSetWaitHandler(ctx, r.client, projectId, zoneId, recordSetId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating recordset", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*dns.RecordSetResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating recordset", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Fetch updated record set + recordSetResp, err := r.client.GetRecordSet(ctx, projectId, zoneId, recordSetId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading updated data", err.Error()) + return + } + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "DNS record set updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *recordSetResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + zoneId := model.ZoneId.ValueString() + recordSetId := model.RecordSetId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + ctx = tflog.SetField(ctx, "record_set_id", recordSetId) + + // Delete existing record set + _, err := r.client.DeleteRecordSet(ctx, projectId, zoneId, recordSetId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting recordset", err.Error()) + } + _, err = dns.DeleteRecordSetWaitHandler(ctx, r.client, projectId, zoneId, recordSetId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting record set", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "DNS record set deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *recordSetResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[zone_id],[record_set_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("zone_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("record_set_id"), idParts[2])...) + tflog.Info(ctx, "DNS record set state imported") +} + +func mapFields(recordSetResp *dns.RecordSetResponse, model *Model) error { + if recordSetResp == nil || recordSetResp.Rrset == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + recordSet := recordSetResp.Rrset + + var recordSetId string + if model.RecordSetId.ValueString() != "" { + recordSetId = model.RecordSetId.ValueString() + } else if recordSet.Id != nil { + recordSetId = *recordSet.Id + } else { + return fmt.Errorf("record set id not present") + } + + if recordSet.Records == nil { + model.Records = types.ListNull(types.StringType) + } else { + records := []attr.Value{} + for _, record := range *recordSet.Records { + records = append(records, types.StringPointerValue(record.Content)) + } + recordsList, diags := types.ListValue(types.StringType, records) + if diags.HasError() { + return fmt.Errorf("failed to map records: %w", core.DiagsToError(diags)) + } + model.Records = recordsList + } + idParts := []string{ + model.ProjectId.ValueString(), + model.ZoneId.ValueString(), + recordSetId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.RecordSetId = types.StringPointerValue(recordSet.Id) + model.Active = types.BoolPointerValue(recordSet.Active) + model.Comment = types.StringPointerValue(recordSet.Comment) + model.Error = types.StringPointerValue(recordSet.Error) + model.Name = types.StringPointerValue(recordSet.Name) + model.State = types.StringPointerValue(recordSet.State) + model.TTL = conversion.ToTypeInt64(recordSet.Ttl) + model.Type = types.StringPointerValue(recordSet.Type) + return nil +} + +func toCreatePayload(model *Model) (*dns.CreateRecordSetPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + records := []dns.RecordPayload{} + for i, record := range model.Records.Elements() { + recordString, ok := record.(types.String) + if !ok { + return nil, fmt.Errorf("expected record at index %d to be of type %T, got %T", i, types.String{}, record) + } + records = append(records, dns.RecordPayload{ + Content: recordString.ValueStringPointer(), + }) + } + + return &dns.CreateRecordSetPayload{ + Comment: model.Comment.ValueStringPointer(), + Name: model.Name.ValueStringPointer(), + Records: &records, + Ttl: conversion.ToPtrInt32(model.TTL), + Type: model.Type.ValueStringPointer(), + }, nil +} + +func toUpdatePayload(model *Model) (*dns.UpdateRecordSetPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + records := []dns.RecordPayload{} + for i, record := range model.Records.Elements() { + recordString, ok := record.(types.String) + if !ok { + return nil, fmt.Errorf("expected record at index %d to be of type %T, got %T", i, types.String{}, record) + } + records = append(records, dns.RecordPayload{ + Content: recordString.ValueStringPointer(), + }) + } + + return &dns.UpdateRecordSetPayload{ + Comment: model.Comment.ValueStringPointer(), + Name: model.Name.ValueStringPointer(), + Records: &records, + Ttl: conversion.ToPtrInt32(model.TTL), + }, nil +} diff --git a/stackit/services/dns/recordset/resource_test.go b/stackit/services/dns/recordset/resource_test.go new file mode 100644 index 00000000..ae17e5ce --- /dev/null +++ b/stackit/services/dns/recordset/resource_test.go @@ -0,0 +1,307 @@ +package dns + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/dns" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *dns.RecordSetResponse + expected Model + isValid bool + }{ + { + "default_values", + &dns.RecordSetResponse{ + Rrset: &dns.RecordSet{ + Id: utils.Ptr("rid"), + }, + }, + Model{ + Id: types.StringValue("pid,zid,rid"), + RecordSetId: types.StringValue("rid"), + ZoneId: types.StringValue("zid"), + ProjectId: types.StringValue("pid"), + Active: types.BoolNull(), + Comment: types.StringNull(), + Error: types.StringNull(), + Name: types.StringNull(), + Records: types.ListNull(types.StringType), + State: types.StringNull(), + TTL: types.Int64Null(), + Type: types.StringNull(), + }, + true, + }, + { + "simple_values", + &dns.RecordSetResponse{ + Rrset: &dns.RecordSet{ + Id: utils.Ptr("rid"), + Active: utils.Ptr(true), + Comment: utils.Ptr("comment"), + Error: utils.Ptr("error"), + Name: utils.Ptr("name"), + Records: &[]dns.Record{ + {Content: utils.Ptr("record_1")}, + {Content: utils.Ptr("record_2")}, + }, + State: utils.Ptr("state"), + Ttl: utils.Ptr(int32(1)), + Type: utils.Ptr("type"), + }, + }, + Model{ + Id: types.StringValue("pid,zid,rid"), + RecordSetId: types.StringValue("rid"), + ZoneId: types.StringValue("zid"), + ProjectId: types.StringValue("pid"), + Active: types.BoolValue(true), + Comment: types.StringValue("comment"), + Error: types.StringValue("error"), + Name: types.StringValue("name"), + Records: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("record_1"), + types.StringValue("record_2"), + }), + State: types.StringValue("state"), + TTL: types.Int64Value(1), + Type: types.StringValue("type"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &dns.RecordSetResponse{ + Rrset: &dns.RecordSet{ + Id: utils.Ptr("rid"), + Active: nil, + Comment: nil, + Error: nil, + Name: utils.Ptr("name"), + Records: nil, + State: utils.Ptr("state"), + Ttl: utils.Ptr(int32(2123456789)), + Type: utils.Ptr("type"), + }, + }, + Model{ + Id: types.StringValue("pid,zid,rid"), + RecordSetId: types.StringValue("rid"), + ZoneId: types.StringValue("zid"), + ProjectId: types.StringValue("pid"), + Active: types.BoolNull(), + Comment: types.StringNull(), + Error: types.StringNull(), + Name: types.StringValue("name"), + Records: types.ListNull(types.StringType), + State: types.StringValue("state"), + TTL: types.Int64Value(2123456789), + Type: types.StringValue("type"), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &dns.RecordSetResponse{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + ZoneId: tt.expected.ZoneId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + expected *dns.CreateRecordSetPayload + isValid bool + }{ + { + "default values", + &Model{}, + &dns.CreateRecordSetPayload{ + Records: &[]dns.RecordPayload{}, + }, + true, + }, + { + "simple_values", + &Model{ + Comment: types.StringValue("comment"), + Name: types.StringValue("name"), + Records: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("record_1"), + types.StringValue("record_2"), + }), + TTL: types.Int64Value(1), + Type: types.StringValue("type"), + }, + &dns.CreateRecordSetPayload{ + Comment: utils.Ptr("comment"), + Name: utils.Ptr("name"), + Records: &[]dns.RecordPayload{ + {Content: utils.Ptr("record_1")}, + {Content: utils.Ptr("record_2")}, + }, + Ttl: utils.Ptr(int32(1)), + Type: utils.Ptr("type"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Comment: types.StringNull(), + Name: types.StringValue(""), + Records: types.ListValueMust(types.StringType, nil), + TTL: types.Int64Value(2123456789), + Type: types.StringValue(""), + }, + &dns.CreateRecordSetPayload{ + Comment: nil, + Name: utils.Ptr(""), + Records: &[]dns.RecordPayload{}, + Ttl: utils.Ptr(int32(2123456789)), + Type: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + expected *dns.UpdateRecordSetPayload + isValid bool + }{ + { + "default_values", + &Model{}, + &dns.UpdateRecordSetPayload{ + Records: &[]dns.RecordPayload{}, + }, + true, + }, + { + "simple_values", + &Model{ + Comment: types.StringValue("comment"), + Name: types.StringValue("name"), + Records: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("record_1"), + types.StringValue("record_2"), + }), + TTL: types.Int64Value(1), + }, + &dns.UpdateRecordSetPayload{ + Comment: utils.Ptr("comment"), + Name: utils.Ptr("name"), + Records: &[]dns.RecordPayload{ + {Content: utils.Ptr("record_1")}, + {Content: utils.Ptr("record_2")}, + }, + Ttl: utils.Ptr(int32(1)), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Comment: types.StringNull(), + Name: types.StringValue(""), + Records: types.ListValueMust(types.StringType, nil), + TTL: types.Int64Value(2123456789), + }, + &dns.UpdateRecordSetPayload{ + Comment: nil, + Name: utils.Ptr(""), + Records: &[]dns.RecordPayload{}, + Ttl: utils.Ptr(int32(2123456789)), + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/dns/zone/datasource.go b/stackit/services/dns/zone/datasource.go new file mode 100644 index 00000000..374d2a83 --- /dev/null +++ b/stackit/services/dns/zone/datasource.go @@ -0,0 +1,211 @@ +package dns + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/dns" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &zoneDataSource{} +) + +// NewZoneDataSource is a helper function to simplify the provider implementation. +func NewZoneDataSource() datasource.DataSource { + return &zoneDataSource{} +} + +// zoneDataSource is the data source implementation. +type zoneDataSource struct { + client *dns.APIClient +} + +// Metadata returns the data source type name. +func (d *zoneDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dns_zone" +} + +func (d *zoneDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + var apiClient *dns.APIClient + var err error + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + if providerData.DnsCustomEndpoint != "" { + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.DnsCustomEndpoint), + ) + } else { + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + ) + } + if err != nil { + resp.Diagnostics.AddError( + "Could not Configure API Client", + err.Error(), + ) + return + } + + tflog.Info(ctx, "DNS zone client configured") + d.client = apiClient +} + +// Schema defines the schema for the data source. +func (d *zoneDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "DNS Zone resource schema.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the dns zone is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "zone_id": schema.StringAttribute{ + Description: "The zone ID.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The user given name of the zone.", + Computed: true, + }, + "dns_name": schema.StringAttribute{ + Description: "The zone name. E.g. `example.com`", + Computed: true, + }, + "description": schema.StringAttribute{ + Description: "Description of the zone.", + Computed: true, + }, + "acl": schema.StringAttribute{ + Description: "The access control list.", + Computed: true, + }, + "active": schema.BoolAttribute{ + Description: "", + Computed: true, + }, + "contact_email": schema.StringAttribute{ + Description: "A contact e-mail for the zone.", + Computed: true, + }, + "default_ttl": schema.Int64Attribute{ + Description: "Default time to live.", + Computed: true, + }, + "expire_time": schema.Int64Attribute{ + Description: "Expire time.", + Computed: true, + }, + "is_reverse_zone": schema.BoolAttribute{ + Description: "Specifies, if the zone is a reverse zone or not.", + Computed: true, + }, + "negative_cache": schema.Int64Attribute{ + Description: "Negative caching.", + Computed: true, + }, + "primary_name_server": schema.StringAttribute{ + Description: "Primary name server. FQDN.", + Computed: true, + }, + "primaries": schema.ListAttribute{ + Description: `Primary name server for secondary zone.`, + Computed: true, + ElementType: types.StringType, + }, + "record_count": schema.Int64Attribute{ + Description: "Record count how many records are in the zone.", + Computed: true, + }, + "refresh_time": schema.Int64Attribute{ + Description: "Refresh time.", + Computed: true, + }, + "retry_time": schema.Int64Attribute{ + Description: "Retry time.", + Computed: true, + }, + "serial_number": schema.Int64Attribute{ + Description: "Serial number.", + Computed: true, + }, + "type": schema.StringAttribute{ + Description: "Zone type.", + Computed: true, + }, + "visibility": schema.StringAttribute{ + Description: "Visibility of the zone.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "Zone state.", + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (d *zoneDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + zoneId := state.ZoneId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + + zoneResp, err := d.client.GetZone(ctx, projectId, zoneId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to Read Zone", err.Error()) + return + } + + err = mapFields(zoneResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "DNS zone read") +} diff --git a/stackit/services/dns/zone/resource.go b/stackit/services/dns/zone/resource.go new file mode 100644 index 00000000..aa40b02e --- /dev/null +++ b/stackit/services/dns/zone/resource.go @@ -0,0 +1,608 @@ +package dns + +import ( + "context" + "fmt" + "math" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/dns" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &zoneResource{} + _ resource.ResourceWithConfigure = &zoneResource{} + _ resource.ResourceWithImportState = &zoneResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + ZoneId types.String `tfsdk:"zone_id"` + ProjectId types.String `tfsdk:"project_id"` + Name types.String `tfsdk:"name"` + DnsName types.String `tfsdk:"dns_name"` + Description types.String `tfsdk:"description"` + Acl types.String `tfsdk:"acl"` + Active types.Bool `tfsdk:"active"` + ContactEmail types.String `tfsdk:"contact_email"` + DefaultTTL types.Int64 `tfsdk:"default_ttl"` + ExpireTime types.Int64 `tfsdk:"expire_time"` + IsReverseZone types.Bool `tfsdk:"is_reverse_zone"` + NegativeCache types.Int64 `tfsdk:"negative_cache"` + PrimaryNameServer types.String `tfsdk:"primary_name_server"` + Primaries types.List `tfsdk:"primaries"` + RecordCount types.Int64 `tfsdk:"record_count"` + RefreshTime types.Int64 `tfsdk:"refresh_time"` + RetryTime types.Int64 `tfsdk:"retry_time"` + SerialNumber types.Int64 `tfsdk:"serial_number"` + Type types.String `tfsdk:"type"` + Visibility types.String `tfsdk:"visibility"` + State types.String `tfsdk:"state"` +} + +// NewZoneResource is a helper function to simplify the provider implementation. +func NewZoneResource() resource.Resource { + return &zoneResource{} +} + +// zoneResource is the resource implementation. +type zoneResource struct { + client *dns.APIClient +} + +// Metadata returns the resource type name. +func (r *zoneResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dns_zone" +} + +// Configure adds the provider configured client to the resource. +func (r *zoneResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *dns.APIClient + var err error + if providerData.DnsCustomEndpoint != "" { + ctx = tflog.SetField(ctx, "dns_custom_endpoint", providerData.DnsCustomEndpoint) + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.DnsCustomEndpoint), + ) + } else { + apiClient, err = dns.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "DNS zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *zoneResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "DNS Zone resource schema.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the dns zone is associated.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "zone_id": schema.StringAttribute{ + Description: "The zone ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The user given name of the zone.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(63), + }, + }, + "dns_name": schema.StringAttribute{ + Description: "The zone name. E.g. `example.com`", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(253), + }, + }, + "description": schema.StringAttribute{ + Description: "Description of the zone.", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtMost(1024), + }, + }, + "acl": schema.StringAttribute{ + Description: "The access control list. E.g. `0.0.0.0/0,::/0`", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtMost(2000), + }, + }, + "active": schema.BoolAttribute{ + Description: "", + Optional: true, + Computed: true, + }, + "contact_email": schema.StringAttribute{ + Description: "A contact e-mail for the zone.", + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtMost(255), + }, + }, + "default_ttl": schema.Int64Attribute{ + Description: "Default time to live. E.g. 3600.", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(60, 99999999), + }, + }, + "expire_time": schema.Int64Attribute{ + Description: "Expire time. E.g. 1209600.", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(60, 99999999), + }, + }, + "is_reverse_zone": schema.BoolAttribute{ + Description: "Specifies, if the zone is a reverse zone or not.", + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + "negative_cache": schema.Int64Attribute{ + Description: "Negative caching. E.g. 60", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(60, 99999999), + }, + }, + "primaries": schema.ListAttribute{ + Description: `Primary name server for secondary zone. E.g. ["1.2.3.4"]`, + Optional: true, + Computed: true, + ElementType: types.StringType, + Validators: []validator.List{ + listvalidator.SizeAtMost(10), + }, + }, + "refresh_time": schema.Int64Attribute{ + Description: "Refresh time. E.g. 3600", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(60, 99999999), + }, + }, + "retry_time": schema.Int64Attribute{ + Description: "Retry time. E.g. 600", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.Between(60, 99999999), + }, + }, + "type": schema.StringAttribute{ + Description: "Zone type. E.g. `primary`", + Optional: true, + Computed: true, + Default: stringdefault.StaticString("primary"), + Validators: []validator.String{ + stringvalidator.OneOf("primary", "secondary"), + }, + }, + "primary_name_server": schema.StringAttribute{ + Description: "Primary name server. FQDN.", + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(253), + }, + }, + "serial_number": schema.Int64Attribute{ + Description: "Serial number. E.g. `2022111400`.", + Computed: true, + Validators: []validator.Int64{ + int64validator.AtLeast(0), + int64validator.AtMost(math.MaxInt32 - 1), + }, + }, + "visibility": schema.StringAttribute{ + Description: "Visibility of the zone. E.g. `public`.", + Computed: true, + }, + "record_count": schema.Int64Attribute{ + Description: "Record count how many records are in the zone.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "Zone state. E.g. `CREATE_SUCCEEDED`.", + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *zoneResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + // Generate API request body from model + payload, err := toCreatePayload(&model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating zone", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new zone + createResp, err := r.client.CreateZone(ctx, projectId).CreateZonePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating zone", fmt.Sprintf("Calling API: %v", err)) + return + } + if createResp.Zone.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating zone", "API didn't return zone id") + return + } + zoneId := *createResp.Zone.Id + + ctx = tflog.SetField(ctx, "zone_id", zoneId) + wr, err := dns.CreateZoneWaitHandler(ctx, r.client, projectId, zoneId).SetTimeout(10 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating zone", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*dns.ZoneResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating zone", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "DNS zone created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *zoneResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + zoneId := state.ZoneId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + + zoneResp, err := r.client.GetZone(ctx, projectId, zoneId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading zones", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(zoneResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "DNS zone read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *zoneResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + zoneId := model.ZoneId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + + // Generate API request body from model + payload, err := toUpdatePayload(&model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating zone", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing zone + _, err = r.client.UpdateZone(ctx, projectId, zoneId).UpdateZonePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating zone", err.Error()) + return + } + wr, err := dns.UpdateZoneWaitHandler(ctx, r.client, projectId, zoneId).SetTimeout(10 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating zone", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*dns.ZoneResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating zone", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Fetch updated zone + zoneResp, err := r.client.GetZone(ctx, projectId, zoneId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading updated data", err.Error()) + return + } + err = mapFields(zoneResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "DNS zone updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *zoneResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + zoneId := model.ZoneId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + + // Delete existing zone + _, err := r.client.DeleteZone(ctx, projectId, zoneId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting zone", err.Error()) + return + } + _, err = dns.DeleteZoneWaitHandler(ctx, r.client, projectId, zoneId).SetTimeout(10 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting zone", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + + tflog.Info(ctx, "DNS zone deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id +func (r *zoneResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[zone_id] Got: %q", req.ID), + ) + return + } + + projectId := idParts[0] + zoneId := idParts[1] + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "zone_id", zoneId) + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("zone_id"), zoneId)...) + tflog.Info(ctx, "DNS zone state imported") +} + +func mapFields(zoneResp *dns.ZoneResponse, model *Model) error { + if zoneResp == nil || zoneResp.Zone == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + z := zoneResp.Zone + + var rc *int64 + if z.RecordCount != nil { + recordCount64 := int64(*z.RecordCount) + rc = &recordCount64 + } else { + rc = nil + } + + var zoneId string + if model.ZoneId.ValueString() != "" { + zoneId = model.ZoneId.ValueString() + } else if z.Id != nil { + zoneId = *z.Id + } else { + return fmt.Errorf("zone id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + zoneId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + + if z.Primaries == nil { + model.Primaries = types.ListNull(types.StringType) + } else { + respZonePrimaries := []attr.Value{} + for _, primary := range *z.Primaries { + respZonePrimaries = append(respZonePrimaries, types.StringValue(primary)) + respZonePrimariesList, diags := types.ListValue(types.StringType, respZonePrimaries) + if diags.HasError() { + return fmt.Errorf("creating primaries list: %w", core.DiagsToError(diags)) + } + model.Primaries = respZonePrimariesList + } + } + model.ZoneId = types.StringValue(zoneId) + model.Description = types.StringPointerValue(z.Description) + model.Acl = types.StringPointerValue(z.Acl) + model.Active = types.BoolPointerValue(z.Active) + model.ContactEmail = types.StringPointerValue(z.ContactEmail) + model.DefaultTTL = conversion.ToTypeInt64(z.DefaultTTL) + model.DnsName = types.StringPointerValue(z.DnsName) + model.ExpireTime = conversion.ToTypeInt64(z.ExpireTime) + model.IsReverseZone = types.BoolPointerValue(z.IsReverseZone) + model.Name = types.StringPointerValue(z.Name) + model.NegativeCache = conversion.ToTypeInt64(z.NegativeCache) + model.PrimaryNameServer = types.StringPointerValue(z.PrimaryNameServer) + model.RecordCount = types.Int64PointerValue(rc) + model.RefreshTime = conversion.ToTypeInt64(z.RefreshTime) + model.RetryTime = conversion.ToTypeInt64(z.RetryTime) + model.SerialNumber = conversion.ToTypeInt64(z.SerialNumber) + model.State = types.StringPointerValue(z.State) + model.Type = types.StringPointerValue(z.Type) + model.Visibility = types.StringPointerValue(z.Visibility) + return nil +} + +func toCreatePayload(model *Model) (*dns.CreateZonePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + modelPrimaries := []string{} + for _, primary := range model.Primaries.Elements() { + primaryString, ok := primary.(types.String) + if !ok { + return nil, fmt.Errorf("type assertion failed") + } + modelPrimaries = append(modelPrimaries, primaryString.ValueString()) + } + return &dns.CreateZonePayload{ + Name: model.Name.ValueStringPointer(), + DnsName: model.DnsName.ValueStringPointer(), + ContactEmail: model.ContactEmail.ValueStringPointer(), + Description: model.Description.ValueStringPointer(), + Acl: model.Acl.ValueStringPointer(), + Type: model.Type.ValueStringPointer(), + DefaultTTL: conversion.ToPtrInt32(model.DefaultTTL), + ExpireTime: conversion.ToPtrInt32(model.ExpireTime), + RefreshTime: conversion.ToPtrInt32(model.RefreshTime), + RetryTime: conversion.ToPtrInt32(model.RetryTime), + NegativeCache: conversion.ToPtrInt32(model.NegativeCache), + IsReverseZone: model.IsReverseZone.ValueBoolPointer(), + Primaries: &modelPrimaries, + }, nil +} + +func toUpdatePayload(model *Model) (*dns.UpdateZonePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + modelPrimaries := []string{} + for _, primary := range model.Primaries.Elements() { + primaryString, ok := primary.(types.String) + if !ok { + return nil, fmt.Errorf("type assertion failed") + } + modelPrimaries = append(modelPrimaries, primaryString.ValueString()) + } + return &dns.UpdateZonePayload{ + Name: model.Name.ValueStringPointer(), + ContactEmail: model.ContactEmail.ValueStringPointer(), + Description: model.Description.ValueStringPointer(), + Acl: model.Acl.ValueStringPointer(), + DefaultTTL: conversion.ToPtrInt32(model.DefaultTTL), + ExpireTime: conversion.ToPtrInt32(model.ExpireTime), + RefreshTime: conversion.ToPtrInt32(model.RefreshTime), + RetryTime: conversion.ToPtrInt32(model.RetryTime), + NegativeCache: conversion.ToPtrInt32(model.NegativeCache), + Primaries: &modelPrimaries, + }, nil +} diff --git a/stackit/services/dns/zone/resource_test.go b/stackit/services/dns/zone/resource_test.go new file mode 100644 index 00000000..27985891 --- /dev/null +++ b/stackit/services/dns/zone/resource_test.go @@ -0,0 +1,351 @@ +package dns + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/dns" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *dns.ZoneResponse + expected Model + isValid bool + }{ + { + "default_ok", + &dns.ZoneResponse{ + Zone: &dns.Zone{ + Id: utils.Ptr("zid"), + }, + }, + Model{ + Id: types.StringValue("pid,zid"), + ProjectId: types.StringValue("pid"), + ZoneId: types.StringValue("zid"), + Name: types.StringNull(), + DnsName: types.StringNull(), + Acl: types.StringNull(), + DefaultTTL: types.Int64Null(), + ExpireTime: types.Int64Null(), + RefreshTime: types.Int64Null(), + RetryTime: types.Int64Null(), + SerialNumber: types.Int64Null(), + NegativeCache: types.Int64Null(), + Type: types.StringNull(), + State: types.StringNull(), + PrimaryNameServer: types.StringNull(), + Primaries: types.ListNull(types.StringType), + Visibility: types.StringNull(), + }, + true, + }, + { + "values_ok", + &dns.ZoneResponse{ + Zone: &dns.Zone{ + Id: utils.Ptr("zid"), + Name: utils.Ptr("name"), + DnsName: utils.Ptr("dnsname"), + Acl: utils.Ptr("acl"), + Active: utils.Ptr(false), + CreationStarted: utils.Ptr("bar"), + CreationFinished: utils.Ptr("foo"), + DefaultTTL: utils.Ptr(int32(1)), + ExpireTime: utils.Ptr(int32(2)), + RefreshTime: utils.Ptr(int32(3)), + RetryTime: utils.Ptr(int32(4)), + SerialNumber: utils.Ptr(int32(5)), + NegativeCache: utils.Ptr(int32(6)), + State: utils.Ptr("state"), + Type: utils.Ptr("type"), + Primaries: &[]string{"primary"}, + PrimaryNameServer: utils.Ptr("pns"), + UpdateStarted: utils.Ptr("ufoo"), + UpdateFinished: utils.Ptr("ubar"), + Visibility: utils.Ptr("visibility"), + Error: utils.Ptr("error"), + ContactEmail: utils.Ptr("a@b.cd"), + Description: utils.Ptr("description"), + IsReverseZone: utils.Ptr(false), + RecordCount: utils.Ptr(int32(3)), + }, + }, + Model{ + Id: types.StringValue("pid,zid"), + ProjectId: types.StringValue("pid"), + ZoneId: types.StringValue("zid"), + Name: types.StringValue("name"), + DnsName: types.StringValue("dnsname"), + Acl: types.StringValue("acl"), + Active: types.BoolValue(false), + DefaultTTL: types.Int64Value(1), + ExpireTime: types.Int64Value(2), + RefreshTime: types.Int64Value(3), + RetryTime: types.Int64Value(4), + SerialNumber: types.Int64Value(5), + NegativeCache: types.Int64Value(6), + Type: types.StringValue("type"), + State: types.StringValue("state"), + PrimaryNameServer: types.StringValue("pns"), + Primaries: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("primary"), + }), + Visibility: types.StringValue("visibility"), + ContactEmail: types.StringValue("a@b.cd"), + Description: types.StringValue("description"), + IsReverseZone: types.BoolValue(false), + RecordCount: types.Int64Value(3), + }, + true, + }, + { + "nullable_fields_and_int_conversions_ok", + &dns.ZoneResponse{ + Zone: &dns.Zone{ + Id: utils.Ptr("zid"), + Name: utils.Ptr("name"), + DnsName: utils.Ptr("dnsname"), + Acl: utils.Ptr("acl"), + Active: nil, + CreationStarted: utils.Ptr("bar"), + CreationFinished: utils.Ptr("foo"), + DefaultTTL: utils.Ptr(int32(2123456789)), + ExpireTime: utils.Ptr(int32(-2)), + RefreshTime: utils.Ptr(int32(3)), + RetryTime: utils.Ptr(int32(4)), + SerialNumber: utils.Ptr(int32(5)), + NegativeCache: utils.Ptr(int32(0)), + State: utils.Ptr("state"), + Type: utils.Ptr("type"), + Primaries: nil, + PrimaryNameServer: utils.Ptr("pns"), + UpdateStarted: utils.Ptr("ufoo"), + UpdateFinished: utils.Ptr("ubar"), + Visibility: utils.Ptr("visibility"), + ContactEmail: nil, + Description: nil, + IsReverseZone: nil, + RecordCount: utils.Ptr(int32(-2123456789)), + }, + }, + Model{ + Id: types.StringValue("pid,zid"), + ProjectId: types.StringValue("pid"), + ZoneId: types.StringValue("zid"), + Name: types.StringValue("name"), + DnsName: types.StringValue("dnsname"), + Acl: types.StringValue("acl"), + Active: types.BoolNull(), + DefaultTTL: types.Int64Value(2123456789), + ExpireTime: types.Int64Value(-2), + RefreshTime: types.Int64Value(3), + RetryTime: types.Int64Value(4), + SerialNumber: types.Int64Value(5), + NegativeCache: types.Int64Value(0), + Type: types.StringValue("type"), + Primaries: types.ListNull(types.StringType), + State: types.StringValue("state"), + PrimaryNameServer: types.StringValue("pns"), + Visibility: types.StringValue("visibility"), + ContactEmail: types.StringNull(), + Description: types.StringNull(), + IsReverseZone: types.BoolNull(), + RecordCount: types.Int64Value(-2123456789), + }, + true, + }, + { + "response_nil_fail", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &dns.ZoneResponse{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + expected *dns.CreateZonePayload + isValid bool + }{ + { + "default_ok", + &Model{ + Name: types.StringValue("Name"), + DnsName: types.StringValue("DnsName"), + }, + &dns.CreateZonePayload{ + Name: utils.Ptr("Name"), + DnsName: utils.Ptr("DnsName"), + Primaries: &[]string{}, + }, + true, + }, + { + "mapping_with_conversions_ok", + &Model{ + Name: types.StringValue("Name"), + DnsName: types.StringValue("DnsName"), + Acl: types.StringValue("Acl"), + Description: types.StringValue("Description"), + Type: types.StringValue("Type"), + ContactEmail: types.StringValue("ContactEmail"), + RetryTime: types.Int64Value(3), + RefreshTime: types.Int64Value(4), + ExpireTime: types.Int64Value(5), + DefaultTTL: types.Int64Value(4534534), + NegativeCache: types.Int64Value(-4534534), + Primaries: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("primary"), + }), + IsReverseZone: types.BoolValue(true), + }, + &dns.CreateZonePayload{ + Name: utils.Ptr("Name"), + DnsName: utils.Ptr("DnsName"), + Acl: utils.Ptr("Acl"), + Description: utils.Ptr("Description"), + Type: utils.Ptr("Type"), + ContactEmail: utils.Ptr("ContactEmail"), + Primaries: &[]string{"primary"}, + RetryTime: utils.Ptr(int32(3)), + RefreshTime: utils.Ptr(int32(4)), + ExpireTime: utils.Ptr(int32(5)), + DefaultTTL: utils.Ptr(int32(4534534)), + NegativeCache: utils.Ptr(int32(-4534534)), + IsReverseZone: utils.Ptr(true), + }, + true, + }, + { + "nil_model", + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToPayloadUpdate(t *testing.T) { + tests := []struct { + description string + input *Model + expected *dns.UpdateZonePayload + isValid bool + }{ + { + "single_field_change_ok", + &Model{ + Name: types.StringValue("Name"), + }, + &dns.UpdateZonePayload{ + Name: utils.Ptr("Name"), + Primaries: &[]string{}, + }, + true, + }, + { + "mapping_with_conversions_ok", + &Model{ + Name: types.StringValue("Name"), + DnsName: types.StringValue("DnsName"), + Acl: types.StringValue("Acl"), + Active: types.BoolValue(true), + Description: types.StringValue("Description"), + Type: types.StringValue("Type"), + ContactEmail: types.StringValue("ContactEmail"), + PrimaryNameServer: types.StringValue("PrimaryNameServer"), + Primaries: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("Primary"), + }), + RetryTime: types.Int64Value(3), + RefreshTime: types.Int64Value(4), + ExpireTime: types.Int64Value(5), + DefaultTTL: types.Int64Value(4534534), + NegativeCache: types.Int64Value(-4534534), + IsReverseZone: types.BoolValue(true), + }, + &dns.UpdateZonePayload{ + Name: utils.Ptr("Name"), + Acl: utils.Ptr("Acl"), + Description: utils.Ptr("Description"), + ContactEmail: utils.Ptr("ContactEmail"), + Primaries: &[]string{"Primary"}, + RetryTime: utils.Ptr(int32(3)), + RefreshTime: utils.Ptr(int32(4)), + ExpireTime: utils.Ptr(int32(5)), + DefaultTTL: utils.Ptr(int32(4534534)), + NegativeCache: utils.Ptr(int32(-4534534)), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/logme/credentials/datasource.go b/stackit/services/logme/credentials/datasource.go new file mode 100644 index 00000000..7a3ac40b --- /dev/null +++ b/stackit/services/logme/credentials/datasource.go @@ -0,0 +1,178 @@ +package logme + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/logme" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &credentialsDataSource{} +) + +// NewCredentialsDataSource is a helper function to simplify the provider implementation. +func NewCredentialsDataSource() datasource.DataSource { + return &credentialsDataSource{} +} + +// credentialsDataSource is the data source implementation. +type credentialsDataSource struct { + client *logme.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_logme_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *logme.APIClient + var err error + if providerData.LogMeCustomEndpoint != "" { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.LogMeCustomEndpoint), + ) + } else { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "LogMe zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *credentialsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "LogMe credentials data source schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the LogMe instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "LogMe credentials read") +} diff --git a/stackit/services/logme/credentials/resource.go b/stackit/services/logme/credentials/resource.go new file mode 100644 index 00000000..decc38a3 --- /dev/null +++ b/stackit/services/logme/credentials/resource.go @@ -0,0 +1,371 @@ +package logme + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/logme" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &logmeCredentialsResource{} + _ resource.ResourceWithConfigure = &logmeCredentialsResource{} + _ resource.ResourceWithImportState = &logmeCredentialsResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + CredentialsId types.String `tfsdk:"credentials_id"` + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Host types.String `tfsdk:"host"` + Hosts types.List `tfsdk:"hosts"` + HttpAPIURI types.String `tfsdk:"http_api_uri"` + Name types.String `tfsdk:"name"` + Password types.String `tfsdk:"password"` + Port types.Int64 `tfsdk:"port"` + Uri types.String `tfsdk:"uri"` + Username types.String `tfsdk:"username"` +} + +// NewlogmeCredentialsResource is a helper function to simplify the provider implementation. +func NewlogmeCredentialsResource() resource.Resource { + return &logmeCredentialsResource{} +} + +// credentialsResource is the resource implementation. +type logmeCredentialsResource struct { + client *logme.APIClient +} + +// Metadata returns the resource type name. +func (r *logmeCredentialsResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_logme_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *logmeCredentialsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *logme.APIClient + var err error + if providerData.LogMeCustomEndpoint != "" { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.LogMeCustomEndpoint), + ) + } else { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "logme zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *logmeCredentialsResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "LogMe credentials resource schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the LogMe instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *logmeCredentialsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Create new recordset + credentialsResp, err := r.client.CreateCredentials(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Calling API: %v", err)) + return + } + if credentialsResp.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", "Got empty credentials id") + return + } + credentialsId := *credentialsResp.Id + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + wr, err := logme.CreateCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*logme.CredentialsResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "LogMe credentials created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *logmeCredentialsResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "LogMe credentials read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *logmeCredentialsResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating credentials", "credentials can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *logmeCredentialsResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + // Delete existing record set + err := r.client.DeleteCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", err.Error()) + } + _, err = logme.DeleteCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "LogMe credentials deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,credentials_id +func (r *logmeCredentialsResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[instance_id],[credentials_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("credentials_id"), idParts[2])...) + tflog.Info(ctx, "LogMe credentials state imported") +} + +func mapFields(credentialsResp *logme.CredentialsResponse, model *Model) error { + if credentialsResp == nil { + return fmt.Errorf("response input is nil") + } + if credentialsResp.Raw == nil { + return fmt.Errorf("response credentials raw is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + credentials := credentialsResp.Raw.Credentials + + var credentialsId string + if model.CredentialsId.ValueString() != "" { + credentialsId = model.CredentialsId.ValueString() + } else if credentialsResp.Id != nil { + credentialsId = *credentialsResp.Id + } else { + return fmt.Errorf("credentials id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + credentialsId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.CredentialsId = types.StringValue(credentialsId) + model.Hosts = types.ListNull(types.StringType) + if credentials != nil { + if credentials.Hosts != nil { + var hosts []attr.Value + for _, host := range *credentials.Hosts { + hosts = append(hosts, types.StringValue(host)) + } + hostsList, diags := types.ListValue(types.StringType, hosts) + if diags.HasError() { + return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags)) + } + model.Hosts = hostsList + } + model.Host = types.StringPointerValue(credentials.Host) + model.HttpAPIURI = types.StringPointerValue(credentials.HttpApiUri) + model.Name = types.StringPointerValue(credentials.Name) + model.Password = types.StringPointerValue(credentials.Password) + model.Port = conversion.ToTypeInt64(credentials.Port) + model.Uri = types.StringPointerValue(credentials.Uri) + model.Username = types.StringPointerValue(credentials.Username) + } + return nil +} diff --git a/stackit/services/logme/credentials/resource_test.go b/stackit/services/logme/credentials/resource_test.go new file mode 100644 index 00000000..fd53c26c --- /dev/null +++ b/stackit/services/logme/credentials/resource_test.go @@ -0,0 +1,156 @@ +package logme + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/logme" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *logme.CredentialsResponse + expected Model + isValid bool + }{ + { + "default_values", + &logme.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &logme.RawCredentials{}, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringNull(), + Hosts: types.ListNull(types.StringType), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringNull(), + Port: types.Int64Null(), + Uri: types.StringNull(), + Username: types.StringNull(), + }, + true, + }, + { + "simple_values", + &logme.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &logme.RawCredentials{ + Credentials: &logme.Credentials{ + Host: utils.Ptr("host"), + Hosts: &[]string{ + "host_1", + "", + }, + HttpApiUri: utils.Ptr("http"), + Name: utils.Ptr("name"), + Password: utils.Ptr("password"), + Port: utils.Ptr(int32(1234)), + Uri: utils.Ptr("uri"), + Username: utils.Ptr("username"), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue("host"), + Hosts: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("host_1"), + types.StringValue(""), + }), + HttpAPIURI: types.StringValue("http"), + Name: types.StringValue("name"), + Password: types.StringValue("password"), + Port: types.Int64Value(1234), + Uri: types.StringValue("uri"), + Username: types.StringValue("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &logme.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &logme.RawCredentials{ + Credentials: &logme.Credentials{ + Host: utils.Ptr(""), + Hosts: &[]string{}, + HttpApiUri: nil, + Name: nil, + Password: utils.Ptr(""), + Port: utils.Ptr(int32(2123456789)), + Uri: nil, + Username: utils.Ptr(""), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue(""), + Hosts: types.ListValueMust(types.StringType, []attr.Value{}), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringValue(""), + Port: types.Int64Value(2123456789), + Uri: types.StringNull(), + Username: types.StringValue(""), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &logme.CredentialsResponse{}, + Model{}, + false, + }, + { + "nil_raw_credentials", + &logme.CredentialsResponse{ + Id: utils.Ptr("cid"), + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + model := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, model) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(model, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/logme/instance/datasource.go b/stackit/services/logme/instance/datasource.go new file mode 100644 index 00000000..c3ab8c68 --- /dev/null +++ b/stackit/services/logme/instance/datasource.go @@ -0,0 +1,181 @@ +package logme + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/logme" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *logme.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_logme_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *logme.APIClient + var err error + if providerData.LogMeCustomEndpoint != "" { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.LogMeCustomEndpoint), + ) + } else { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "LogMe zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "LogMe instance data source schema.", + "id": "Terraform's internal resource identifier.", + "instance_id": "ID of the LogMe instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Computed: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Computed: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Computed: true, + }, + }, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read instance", err.Error()) + return + } + + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "LogMe instance read") +} diff --git a/stackit/services/logme/instance/resource.go b/stackit/services/logme/instance/resource.go new file mode 100644 index 00000000..2d10ec08 --- /dev/null +++ b/stackit/services/logme/instance/resource.go @@ -0,0 +1,642 @@ +package logme + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/logme" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + CfGuid types.String `tfsdk:"cf_guid"` + CfSpaceGuid types.String `tfsdk:"cf_space_guid"` + DashboardUrl types.String `tfsdk:"dashboard_url"` + ImageUrl types.String `tfsdk:"image_url"` + Name types.String `tfsdk:"name"` + OrganizationGuid types.String `tfsdk:"organization_guid"` + Parameters types.Object `tfsdk:"parameters"` + Version types.String `tfsdk:"version"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` +} + +// Struct corresponding to DataSourceModel.Parameters +type parametersModel struct { + SgwAcl types.String `tfsdk:"sgw_acl"` +} + +// Types corresponding to parametersModel +var parametersTypes = map[string]attr.Type{ + "sgw_acl": basetypes.StringType{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *logme.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_logme_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *logme.APIClient + var err error + if providerData.LogMeCustomEndpoint != "" { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.LogMeCustomEndpoint), + ) + } else { + apiClient, err = logme.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "logme zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "LogMe instance resource schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the LogMe instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Required: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Required: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + }, + Optional: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "image_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load LogMe service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := *createResp.InstanceId + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := logme.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*logme.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "logme instance created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instances", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "logme instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load LogMe service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + err = r.client.UpdateInstance(ctx, projectId, instanceId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := logme.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*logme.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "logme instance updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error()) + return + } + _, err = logme.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "logme instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "LogMe instance state imported") +} + +func mapFields(instance *logme.Instance, model *Model) error { + if instance == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.InstanceId != nil { + instanceId = *instance.InstanceId + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanId = types.StringPointerValue(instance.PlanId) + model.CfGuid = types.StringPointerValue(instance.CfGuid) + model.CfSpaceGuid = types.StringPointerValue(instance.CfSpaceGuid) + model.DashboardUrl = types.StringPointerValue(instance.DashboardUrl) + model.ImageUrl = types.StringPointerValue(instance.ImageUrl) + model.Name = types.StringPointerValue(instance.Name) + model.OrganizationGuid = types.StringPointerValue(instance.OrganizationGuid) + + if instance.Parameters == nil { + model.Parameters = types.ObjectNull(parametersTypes) + } else { + parameters, err := mapParameters(*instance.Parameters) + if err != nil { + return fmt.Errorf("mapping parameters: %w", err) + } + model.Parameters = parameters + } + return nil +} + +func mapParameters(params map[string]interface{}) (types.Object, error) { + attributes := map[string]attr.Value{} + for attribute := range parametersTypes { + valueInterface, ok := params[attribute] + if !ok { + // All fields are optional, so this is ok + // Set the value as nil, will be handled accordingly + valueInterface = nil + } + + var value attr.Value + switch parametersTypes[attribute].(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found unexpected attribute type '%T'", parametersTypes[attribute]) + case basetypes.StringType: + if valueInterface == nil { + value = types.StringNull() + } else { + valueString, ok := valueInterface.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as string", attribute, valueInterface) + } + value = types.StringValue(valueString) + } + case basetypes.BoolType: + if valueInterface == nil { + value = types.BoolNull() + } else { + valueBool, ok := valueInterface.(bool) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as bool", attribute, valueInterface) + } + value = types.BoolValue(valueBool) + } + case basetypes.Int64Type: + if valueInterface == nil { + value = types.Int64Null() + } else { + // This may be int64, int32, int or float64 + // We try to assert all 4 + var valueInt64 int64 + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as int", attribute, valueInterface) + case int64: + valueInt64 = temp + case int32: + valueInt64 = int64(temp) + case int: + valueInt64 = int64(temp) + case float64: + valueInt64 = int64(temp) + } + value = types.Int64Value(valueInt64) + } + case basetypes.ListType: // Assumed to be a list of strings + if valueInterface == nil { + value = types.ListNull(types.StringType) + } else { + // This may be []string{} or []interface{} + // We try to assert all 2 + var valueList []attr.Value + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as array of interface", attribute, valueInterface) + case []string: + for _, x := range temp { + valueList = append(valueList, types.StringValue(x)) + } + case []interface{}: + for _, x := range temp { + xString, ok := x.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' with element '%s' of type %T, failed to assert as string", attribute, x, x) + } + valueList = append(valueList, types.StringValue(xString)) + } + } + temp2, diags := types.ListValue(types.StringType, valueList) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to map %s: %w", attribute, core.DiagsToError(diags)) + } + value = temp2 + } + } + attributes[attribute] = value + } + + output, diags := types.ObjectValue(parametersTypes, attributes) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to create object: %w", core.DiagsToError(diags)) + } + return output, nil +} + +func toCreatePayload(model *Model, parameters *parametersModel) (*logme.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if parameters == nil { + return &logme.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + payloadParams := &logme.InstanceParameters{} + if parameters.SgwAcl.ValueString() != "" { + payloadParams.SgwAcl = parameters.SgwAcl.ValueStringPointer() + } + return &logme.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + Parameters: payloadParams, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +func toUpdatePayload(model *Model, parameters *parametersModel) (*logme.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + if parameters == nil { + return &logme.UpdateInstancePayload{ + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + return &logme.UpdateInstancePayload{ + Parameters: &logme.InstanceParameters{ + SgwAcl: parameters.SgwAcl.ValueStringPointer(), + }, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, diags *diag.Diagnostics, model *Model) { + projectId := model.ProjectId.ValueString() + res, err := r.client.GetOfferings(ctx, projectId).Execute() + if err != nil { + diags.AddError("Failed to list LogMe offerings", err.Error()) + return + } + + version := model.Version.ValueString() + planName := model.PlanName.ValueString() + availableVersions := "" + availablePlanNames := "" + isValidVersion := false + for _, offer := range *res.Offerings { + if !strings.EqualFold(*offer.Version, version) { + availableVersions = fmt.Sprintf("%s\n- %s", availableVersions, *offer.Version) + continue + } + isValidVersion = true + + for _, plan := range *offer.Plans { + if plan.Name == nil { + continue + } + if strings.EqualFold(*plan.Name, planName) && plan.Id != nil { + model.PlanId = types.StringPointerValue(plan.Id) + return + } + availablePlanNames = fmt.Sprintf("%s\n- %s", availablePlanNames, *plan.Name) + } + } + + if !isValidVersion { + diags.AddError("Invalid version", fmt.Sprintf("Couldn't find version '%s', available versions are:%s", version, availableVersions)) + return + } + diags.AddError("Invalid plan_name", fmt.Sprintf("Couldn't find plan_name '%s' for version %s, available names are:%s", planName, version, availablePlanNames)) +} diff --git a/stackit/services/logme/instance/resource_test.go b/stackit/services/logme/instance/resource_test.go new file mode 100644 index 00000000..c8f57f4e --- /dev/null +++ b/stackit/services/logme/instance/resource_test.go @@ -0,0 +1,304 @@ +package logme + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/logme" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *logme.Instance + expected Model + isValid bool + }{ + { + "default_values", + &logme.Instance{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringNull(), + Name: types.StringNull(), + CfGuid: types.StringNull(), + CfSpaceGuid: types.StringNull(), + DashboardUrl: types.StringNull(), + ImageUrl: types.StringNull(), + OrganizationGuid: types.StringNull(), + Parameters: types.ObjectNull(parametersTypes), + }, + true, + }, + { + "simple_values", + &logme.Instance{ + PlanId: utils.Ptr("plan"), + CfGuid: utils.Ptr("cf"), + CfSpaceGuid: utils.Ptr("space"), + DashboardUrl: utils.Ptr("dashboard"), + ImageUrl: utils.Ptr("image"), + InstanceId: utils.Ptr("iid"), + Name: utils.Ptr("name"), + OrganizationGuid: utils.Ptr("org"), + Parameters: &map[string]interface{}{ + "sgw_acl": "acl", + }, + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringValue("plan"), + Name: types.StringValue("name"), + CfGuid: types.StringValue("cf"), + CfSpaceGuid: types.StringValue("space"), + DashboardUrl: types.StringValue("dashboard"), + ImageUrl: types.StringValue("image"), + OrganizationGuid: types.StringValue("org"), + Parameters: types.ObjectValueMust(parametersTypes, map[string]attr.Value{ + "sgw_acl": types.StringValue("acl"), + }), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &logme.Instance{}, + Model{}, + false, + }, + { + "wrong_param_types_1", + &logme.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": true, + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_2", + &logme.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": 1, + }, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *logme.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &logme.CreateInstancePayload{ + Parameters: &logme.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &logme.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + Parameters: &logme.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Name: types.StringValue(""), + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &logme.CreateInstancePayload{ + InstanceName: utils.Ptr(""), + Parameters: &logme.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + nil, + &logme.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *logme.UpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &logme.UpdateInstancePayload{ + Parameters: &logme.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &logme.UpdateInstancePayload{ + Parameters: &logme.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &logme.UpdateInstancePayload{ + Parameters: &logme.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + PlanId: types.StringValue("plan"), + }, + nil, + &logme.UpdateInstancePayload{ + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/logme/logme_acc_test.go b/stackit/services/logme/logme_acc_test.go new file mode 100644 index 00000000..e2035360 --- /dev/null +++ b/stackit/services/logme/logme_acc_test.go @@ -0,0 +1,241 @@ +package logme_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/logme" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("logme"), + "plan_id": "7a54492c-8a2e-4d3c-b6c2-a4f20cb65912", + "sgw_acl-1": "192.168.0.0/16", + "sgw_acl-2": "192.168.0.0/24", +} + +func resourceConfig(acls string) string { + return fmt.Sprintf(` + %s + + resource "stackit_logme_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + sgw_acl = "%s" + } + } + + resource "stackit_logme_credentials" "credentials" { + project_id = stackit_logme_instance.instance.project_id + instance_id = stackit_logme_instance.instance.instance_id + } + `, + testutil.LogMeProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + acls, + ) +} +func TestAccLogMeResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckLogMeDestroy, + Steps: []resource.TestStep{ + + // Creation + { + Config: resourceConfig(instanceResource["sgw_acl-1"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_logme_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl-1"]), + + // Credentials data + resource.TestCheckResourceAttrPair( + "stackit_logme_credentials.credentials", "project_id", + "stackit_logme_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_logme_credentials.credentials", "instance_id", + "stackit_logme_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_logme_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("stackit_logme_credentials.credentials", "host"), + ), + }, + // Data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_logme_instance" "instance" { + project_id = stackit_logme_instance.instance.project_id + instance_id = stackit_logme_instance.instance.instance_id + } + + data "stackit_logme_credentials" "credentials" { + project_id = stackit_logme_credentials.credentials.project_id + instance_id = stackit_logme_credentials.credentials.instance_id + credentials_id = stackit_logme_credentials.credentials.credentials_id + }`, + resourceConfig(instanceResource["sgw_acl-1"]), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_logme_instance.instance", "project_id", instanceResource["project_id"]), + + resource.TestCheckResourceAttrPair("stackit_logme_instance.instance", "instance_id", + "data.stackit_logme_instance.instance", "instance_id"), + + resource.TestCheckResourceAttrPair("stackit_logme_credentials.credentials", "credentials_id", + "data.stackit_logme_credentials.credentials", "credentials_id"), + + resource.TestCheckResourceAttr("data.stackit_logme_instance.instance", "plan_id", instanceResource["plan_id"]), + + resource.TestCheckResourceAttr("data.stackit_logme_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("data.stackit_logme_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl-1"]), + + // Credentials data + resource.TestCheckResourceAttr("data.stackit_logme_credentials.credentials", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_logme_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("data.stackit_logme_credentials.credentials", "host"), + resource.TestCheckResourceAttrSet("data.stackit_logme_credentials.credentials", "port"), + resource.TestCheckResourceAttrSet("data.stackit_logme_credentials.credentials", "uri"), + ), + }, + // Import + { + ResourceName: "stackit_logme_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_logme_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_logme_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_logme_credentials.credentials", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_logme_credentials.credentials"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_logme_credentials.credentials") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + credentialsId, ok := r.Primary.Attributes["credentials_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute credentials_id") + } + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, credentialsId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfig(instanceResource["sgw_acl-2"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_logme_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_logme_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl-2"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckLogMeDestroy(s *terraform.State) error { + ctx := context.Background() + var client *logme.APIClient + var err error + if testutil.LogMeCustomEndpoint == "" { + client, err = logme.NewAPIClient() + } else { + client, err = logme.NewAPIClient( + config.WithEndpoint(testutil.LogMeCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_logme_instance" { + continue + } + // instance terraform ID: "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if instances[i].InstanceId == nil { + continue + } + if utils.Contains(instancesToDestroy, *instances[i].InstanceId) { + if !checkInstanceDeleteSuccess(&instances[i]) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].InstanceId) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].InstanceId, err) + } + _, err = logme.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].InstanceId).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].InstanceId, err) + } + } + } + } + return nil +} + +func checkInstanceDeleteSuccess(i *logme.Instance) bool { + if *i.LastOperation.Type != logme.InstanceTypeDelete { + return false + } + + if *i.LastOperation.Type == logme.InstanceTypeDelete { + if *i.LastOperation.State != logme.InstanceStateSuccess { + return false + } else if strings.Contains(*i.LastOperation.Description, "DeleteFailed") || strings.Contains(*i.LastOperation.Description, "failed") { + return false + } + } + return true +} diff --git a/stackit/services/mariadb/credentials/datasource.go b/stackit/services/mariadb/credentials/datasource.go new file mode 100644 index 00000000..09f22b14 --- /dev/null +++ b/stackit/services/mariadb/credentials/datasource.go @@ -0,0 +1,178 @@ +package mariadb + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/mariadb" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &credentialsDataSource{} +) + +// NewCredentialsDataSource is a helper function to simplify the provider implementation. +func NewCredentialsDataSource() datasource.DataSource { + return &credentialsDataSource{} +} + +// credentialsDataSource is the data source implementation. +type credentialsDataSource struct { + client *mariadb.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mariadb_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *mariadb.APIClient + var err error + if providerData.MariaDBCustomEndpoint != "" { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.MariaDBCustomEndpoint), + ) + } else { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresql zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *credentialsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "MariaDB credentials data source schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the MariaDB instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "MariaDB credentials read") +} diff --git a/stackit/services/mariadb/credentials/resource.go b/stackit/services/mariadb/credentials/resource.go new file mode 100644 index 00000000..7beb30e2 --- /dev/null +++ b/stackit/services/mariadb/credentials/resource.go @@ -0,0 +1,371 @@ +package mariadb + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/mariadb" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &mariaDBCredentialsResource{} + _ resource.ResourceWithConfigure = &mariaDBCredentialsResource{} + _ resource.ResourceWithImportState = &mariaDBCredentialsResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + CredentialsId types.String `tfsdk:"credentials_id"` + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Host types.String `tfsdk:"host"` + Hosts types.List `tfsdk:"hosts"` + HttpAPIURI types.String `tfsdk:"http_api_uri"` + Name types.String `tfsdk:"name"` + Password types.String `tfsdk:"password"` + Port types.Int64 `tfsdk:"port"` + Uri types.String `tfsdk:"uri"` + Username types.String `tfsdk:"username"` +} + +// NewPostgreSQLCredentialsResource is a helper function to simplify the provider implementation. +func NewCredentialsResource() resource.Resource { + return &mariaDBCredentialsResource{} +} + +// credentialsResource is the resource implementation. +type mariaDBCredentialsResource struct { + client *mariadb.APIClient +} + +// Metadata returns the resource type name. +func (r *mariaDBCredentialsResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mariadb_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *mariaDBCredentialsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *mariadb.APIClient + var err error + if providerData.MariaDBCustomEndpoint != "" { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.MariaDBCustomEndpoint), + ) + } else { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "MariaDB client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *mariaDBCredentialsResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "MariaDB credentials resource schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the MariaDB instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *mariaDBCredentialsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Create new recordset + credentialsResp, err := r.client.CreateCredentials(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Calling API: %v", err)) + return + } + if credentialsResp.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", "Got empty credentials id") + return + } + credentialsId := *credentialsResp.Id + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + wr, err := mariadb.CreateCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*mariadb.CredentialsResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "MariaDB credentials created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *mariaDBCredentialsResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "MariaDB credentials read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *mariaDBCredentialsResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating credentials", "credentials can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *mariaDBCredentialsResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + // Delete existing record set + err := r.client.DeleteCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", err.Error()) + } + _, err = mariadb.DeleteCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "MariaDB credentials deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,credentials_id +func (r *mariaDBCredentialsResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[instance_id],[credentials_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("credentials_id"), idParts[2])...) + tflog.Info(ctx, "Postgresql credentials state imported") +} + +func mapFields(credentialsResp *mariadb.CredentialsResponse, model *Model) error { + if credentialsResp == nil { + return fmt.Errorf("response input is nil") + } + if credentialsResp.Raw == nil { + return fmt.Errorf("response credentials raw is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + credentials := credentialsResp.Raw.Credentials + + var credentialsId string + if model.CredentialsId.ValueString() != "" { + credentialsId = model.CredentialsId.ValueString() + } else if credentialsResp.Id != nil { + credentialsId = *credentialsResp.Id + } else { + return fmt.Errorf("credentials id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + credentialsId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.CredentialsId = types.StringValue(credentialsId) + model.Hosts = types.ListNull(types.StringType) + if credentials != nil { + if credentials.Hosts != nil { + var hosts []attr.Value + for _, host := range *credentials.Hosts { + hosts = append(hosts, types.StringValue(host)) + } + hostsList, diags := types.ListValue(types.StringType, hosts) + if diags.HasError() { + return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags)) + } + model.Hosts = hostsList + } + model.Host = types.StringPointerValue(credentials.Host) + model.HttpAPIURI = types.StringPointerValue(credentials.HttpApiUri) + model.Name = types.StringPointerValue(credentials.Name) + model.Password = types.StringPointerValue(credentials.Password) + model.Port = conversion.ToTypeInt64(credentials.Port) + model.Uri = types.StringPointerValue(credentials.Uri) + model.Username = types.StringPointerValue(credentials.Username) + } + return nil +} diff --git a/stackit/services/mariadb/credentials/resource_test.go b/stackit/services/mariadb/credentials/resource_test.go new file mode 100644 index 00000000..01a050fd --- /dev/null +++ b/stackit/services/mariadb/credentials/resource_test.go @@ -0,0 +1,156 @@ +package mariadb + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/mariadb" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *mariadb.CredentialsResponse + expected Model + isValid bool + }{ + { + "default_values", + &mariadb.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &mariadb.RawCredentials{}, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringNull(), + Hosts: types.ListNull(types.StringType), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringNull(), + Port: types.Int64Null(), + Uri: types.StringNull(), + Username: types.StringNull(), + }, + true, + }, + { + "simple_values", + &mariadb.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &mariadb.RawCredentials{ + Credentials: &mariadb.Credentials{ + Host: utils.Ptr("host"), + Hosts: &[]string{ + "host_1", + "", + }, + HttpApiUri: utils.Ptr("http"), + Name: utils.Ptr("name"), + Password: utils.Ptr("password"), + Port: utils.Ptr(int32(1234)), + Uri: utils.Ptr("uri"), + Username: utils.Ptr("username"), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue("host"), + Hosts: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("host_1"), + types.StringValue(""), + }), + HttpAPIURI: types.StringValue("http"), + Name: types.StringValue("name"), + Password: types.StringValue("password"), + Port: types.Int64Value(1234), + Uri: types.StringValue("uri"), + Username: types.StringValue("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &mariadb.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &mariadb.RawCredentials{ + Credentials: &mariadb.Credentials{ + Host: utils.Ptr(""), + Hosts: &[]string{}, + HttpApiUri: nil, + Name: nil, + Password: utils.Ptr(""), + Port: utils.Ptr(int32(2123456789)), + Uri: nil, + Username: utils.Ptr(""), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue(""), + Hosts: types.ListValueMust(types.StringType, []attr.Value{}), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringValue(""), + Port: types.Int64Value(2123456789), + Uri: types.StringNull(), + Username: types.StringValue(""), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &mariadb.CredentialsResponse{}, + Model{}, + false, + }, + { + "nil_raw_credentials", + &mariadb.CredentialsResponse{ + Id: utils.Ptr("cid"), + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + model := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, model) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(model, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/mariadb/instance/datasource.go b/stackit/services/mariadb/instance/datasource.go new file mode 100644 index 00000000..82baa9c1 --- /dev/null +++ b/stackit/services/mariadb/instance/datasource.go @@ -0,0 +1,181 @@ +package mariadb + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/mariadb" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *mariadb.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mariadb_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *mariadb.APIClient + var err error + if providerData.MariaDBCustomEndpoint != "" { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.MariaDBCustomEndpoint), + ) + } else { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "MariaDB zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "MariaDB instance data source schema.", + "id": "Terraform's internal resource identifier.", + "instance_id": "ID of the MariaDB instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Computed: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Computed: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Computed: true, + }, + }, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read instance", err.Error()) + return + } + + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "MariaDB instance read") +} diff --git a/stackit/services/mariadb/instance/resource.go b/stackit/services/mariadb/instance/resource.go new file mode 100644 index 00000000..6629edc3 --- /dev/null +++ b/stackit/services/mariadb/instance/resource.go @@ -0,0 +1,624 @@ +package mariadb + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/mariadb" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + CfGuid types.String `tfsdk:"cf_guid"` + CfSpaceGuid types.String `tfsdk:"cf_space_guid"` + DashboardUrl types.String `tfsdk:"dashboard_url"` + ImageUrl types.String `tfsdk:"image_url"` + Name types.String `tfsdk:"name"` + OrganizationGuid types.String `tfsdk:"organization_guid"` + Parameters types.Object `tfsdk:"parameters"` + Version types.String `tfsdk:"version"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` +} + +// Struct corresponding to DataSourceModel.Parameters +type parametersModel struct { + SgwAcl types.String `tfsdk:"sgw_acl"` +} + +// Types corresponding to parametersModel +var parametersTypes = map[string]attr.Type{ + "sgw_acl": basetypes.StringType{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *mariadb.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mariadb_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *mariadb.APIClient + var err error + if providerData.MariaDBCustomEndpoint != "" { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.MariaDBCustomEndpoint), + ) + } else { + apiClient, err = mariadb.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "mariadb zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "MariaDB instance resource schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the MariaDB instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Required: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Required: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + }, + Optional: true, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load MariaDB service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := *createResp.InstanceId + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := mariadb.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*mariadb.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "mariadb instance created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instances", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "mariadb instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load MariaDB service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + err = r.client.UpdateInstance(ctx, projectId, instanceId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := mariadb.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*mariadb.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "mariadb instance updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error()) + return + } + _, err = mariadb.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "mariadb instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "MariaDB instance state imported") +} + +func mapFields(instance *mariadb.Instance, model *Model) error { + if instance == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.InstanceId != nil { + instanceId = *instance.InstanceId + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanId = types.StringPointerValue(instance.PlanId) + model.CfGuid = types.StringPointerValue(instance.CfGuid) + model.CfSpaceGuid = types.StringPointerValue(instance.CfSpaceGuid) + model.DashboardUrl = types.StringPointerValue(instance.DashboardUrl) + model.ImageUrl = types.StringPointerValue(instance.ImageUrl) + model.Name = types.StringPointerValue(instance.Name) + model.OrganizationGuid = types.StringPointerValue(instance.OrganizationGuid) + + if instance.Parameters == nil { + model.Parameters = types.ObjectNull(parametersTypes) + } else { + parameters, err := mapParameters(*instance.Parameters) + if err != nil { + return fmt.Errorf("mapping parameters: %w", err) + } + model.Parameters = parameters + } + return nil +} + +func mapParameters(params map[string]interface{}) (types.Object, error) { + attributes := map[string]attr.Value{} + for attribute := range parametersTypes { + valueInterface, ok := params[attribute] + if !ok { + // All fields are optional, so this is ok + // Set the value as nil, will be handled accordingly + valueInterface = nil + } + + var value attr.Value + switch parametersTypes[attribute].(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found unexpected attribute type '%T'", parametersTypes[attribute]) + case basetypes.StringType: + if valueInterface == nil { + value = types.StringNull() + } else { + valueString, ok := valueInterface.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as string", attribute, valueInterface) + } + value = types.StringValue(valueString) + } + case basetypes.BoolType: + if valueInterface == nil { + value = types.BoolNull() + } else { + valueBool, ok := valueInterface.(bool) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as bool", attribute, valueInterface) + } + value = types.BoolValue(valueBool) + } + case basetypes.Int64Type: + if valueInterface == nil { + value = types.Int64Null() + } else { + // This may be int64, int32, int or float64 + // We try to assert all 4 + var valueInt64 int64 + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as int", attribute, valueInterface) + case int64: + valueInt64 = temp + case int32: + valueInt64 = int64(temp) + case int: + valueInt64 = int64(temp) + case float64: + valueInt64 = int64(temp) + } + value = types.Int64Value(valueInt64) + } + case basetypes.ListType: // Assumed to be a list of strings + if valueInterface == nil { + value = types.ListNull(types.StringType) + } else { + // This may be []string{} or []interface{} + // We try to assert all 2 + var valueList []attr.Value + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as array of interface", attribute, valueInterface) + case []string: + for _, x := range temp { + valueList = append(valueList, types.StringValue(x)) + } + case []interface{}: + for _, x := range temp { + xString, ok := x.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' with element '%s' of type %T, failed to assert as string", attribute, x, x) + } + valueList = append(valueList, types.StringValue(xString)) + } + } + temp2, diags := types.ListValue(types.StringType, valueList) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to map %s: %w", attribute, core.DiagsToError(diags)) + } + value = temp2 + } + } + attributes[attribute] = value + } + + output, diags := types.ObjectValue(parametersTypes, attributes) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to create object: %w", core.DiagsToError(diags)) + } + return output, nil +} + +func toCreatePayload(model *Model, parameters *parametersModel) (*mariadb.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if parameters == nil { + return &mariadb.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + payloadParams := &mariadb.InstanceParameters{} + if parameters.SgwAcl.ValueString() != "" { + payloadParams.SgwAcl = parameters.SgwAcl.ValueStringPointer() + } + return &mariadb.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + Parameters: payloadParams, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +func toUpdatePayload(model *Model, parameters *parametersModel) (*mariadb.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + if parameters == nil { + return &mariadb.UpdateInstancePayload{ + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + return &mariadb.UpdateInstancePayload{ + Parameters: &mariadb.InstanceParameters{ + SgwAcl: parameters.SgwAcl.ValueStringPointer(), + }, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, diags *diag.Diagnostics, model *Model) { + projectId := model.ProjectId.ValueString() + res, err := r.client.GetOfferings(ctx, projectId).Execute() + if err != nil { + diags.AddError("Failed to list MariaDB offerings", err.Error()) + return + } + + version := model.Version.ValueString() + planName := model.PlanName.ValueString() + availableVersions := "" + availablePlanNames := "" + isValidVersion := false + for _, offer := range *res.Offerings { + if !strings.EqualFold(*offer.Version, version) { + availableVersions = fmt.Sprintf("%s\n- %s", availableVersions, *offer.Version) + continue + } + isValidVersion = true + + for _, plan := range *offer.Plans { + if plan.Name == nil { + continue + } + if strings.EqualFold(*plan.Name, planName) && plan.Id != nil { + model.PlanId = types.StringPointerValue(plan.Id) + return + } + availablePlanNames = fmt.Sprintf("%s\n- %s", availablePlanNames, *plan.Name) + } + } + + if !isValidVersion { + diags.AddError("Invalid version", fmt.Sprintf("Couldn't find version '%s', available versions are:%s", version, availableVersions)) + return + } + diags.AddError("Invalid plan_name", fmt.Sprintf("Couldn't find plan_name '%s' for version %s, available names are:%s", planName, version, availablePlanNames)) +} diff --git a/stackit/services/mariadb/instance/resource_test.go b/stackit/services/mariadb/instance/resource_test.go new file mode 100644 index 00000000..e564ce53 --- /dev/null +++ b/stackit/services/mariadb/instance/resource_test.go @@ -0,0 +1,304 @@ +package mariadb + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/mariadb" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *mariadb.Instance + expected Model + isValid bool + }{ + { + "default_values", + &mariadb.Instance{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringNull(), + Name: types.StringNull(), + CfGuid: types.StringNull(), + CfSpaceGuid: types.StringNull(), + DashboardUrl: types.StringNull(), + ImageUrl: types.StringNull(), + OrganizationGuid: types.StringNull(), + Parameters: types.ObjectNull(parametersTypes), + }, + true, + }, + { + "simple_values", + &mariadb.Instance{ + PlanId: utils.Ptr("plan"), + CfGuid: utils.Ptr("cf"), + CfSpaceGuid: utils.Ptr("space"), + DashboardUrl: utils.Ptr("dashboard"), + ImageUrl: utils.Ptr("image"), + InstanceId: utils.Ptr("iid"), + Name: utils.Ptr("name"), + OrganizationGuid: utils.Ptr("org"), + Parameters: &map[string]interface{}{ + "sgw_acl": "acl", + }, + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringValue("plan"), + Name: types.StringValue("name"), + CfGuid: types.StringValue("cf"), + CfSpaceGuid: types.StringValue("space"), + DashboardUrl: types.StringValue("dashboard"), + ImageUrl: types.StringValue("image"), + OrganizationGuid: types.StringValue("org"), + Parameters: types.ObjectValueMust(parametersTypes, map[string]attr.Value{ + "sgw_acl": types.StringValue("acl"), + }), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &mariadb.Instance{}, + Model{}, + false, + }, + { + "wrong_param_types_1", + &mariadb.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": true, + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_2", + &mariadb.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": 1, + }, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *mariadb.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &mariadb.CreateInstancePayload{ + Parameters: &mariadb.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &mariadb.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + Parameters: &mariadb.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Name: types.StringValue(""), + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &mariadb.CreateInstancePayload{ + InstanceName: utils.Ptr(""), + Parameters: &mariadb.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + nil, + &mariadb.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *mariadb.UpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &mariadb.UpdateInstancePayload{ + Parameters: &mariadb.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &mariadb.UpdateInstancePayload{ + Parameters: &mariadb.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &mariadb.UpdateInstancePayload{ + Parameters: &mariadb.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + PlanId: types.StringValue("plan"), + }, + nil, + &mariadb.UpdateInstancePayload{ + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/mariadb/mariadb_acc_test.go b/stackit/services/mariadb/mariadb_acc_test.go new file mode 100644 index 00000000..0c22185f --- /dev/null +++ b/stackit/services/mariadb/mariadb_acc_test.go @@ -0,0 +1,241 @@ +package mariadb_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/mariadb" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("mariadb"), + "plan_id": "683be856-3587-42de-b1b5-a792ff854f52", + "sgw_acl-1": "192.168.0.0/16", + "sgw_acl-2": "192.168.0.0/24", +} + +func resourceConfig(acls string) string { + return fmt.Sprintf(` + %s + + resource "stackit_mariadb_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + sgw_acl = "%s" + } + } + + resource "stackit_mariadb_credentials" "credentials" { + project_id = stackit_mariadb_instance.instance.project_id + instance_id = stackit_mariadb_instance.instance.instance_id + } + `, + testutil.MariaDBProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + acls, + ) +} +func TestAccMariaDBResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckMariaDBDestroy, + Steps: []resource.TestStep{ + + // Creation + { + Config: resourceConfig(instanceResource["sgw_acl-1"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_mariadb_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl-1"]), + + // Credentials data + resource.TestCheckResourceAttrPair( + "stackit_mariadb_credentials.credentials", "project_id", + "stackit_mariadb_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_mariadb_credentials.credentials", "instance_id", + "stackit_mariadb_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_mariadb_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("stackit_mariadb_credentials.credentials", "host"), + ), + }, + // Data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_mariadb_instance" "instance" { + project_id = stackit_mariadb_instance.instance.project_id + instance_id = stackit_mariadb_instance.instance.instance_id + } + + data "stackit_mariadb_credentials" "credentials" { + project_id = stackit_mariadb_credentials.credentials.project_id + instance_id = stackit_mariadb_credentials.credentials.instance_id + credentials_id = stackit_mariadb_credentials.credentials.credentials_id + }`, + resourceConfig(instanceResource["sgw_acl-1"]), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_mariadb_instance.instance", "project_id", instanceResource["project_id"]), + + resource.TestCheckResourceAttrPair("stackit_mariadb_instance.instance", "instance_id", + "data.stackit_mariadb_instance.instance", "instance_id"), + + resource.TestCheckResourceAttrPair("stackit_mariadb_credentials.credentials", "credentials_id", + "data.stackit_mariadb_credentials.credentials", "credentials_id"), + + resource.TestCheckResourceAttr("data.stackit_mariadb_instance.instance", "plan_id", instanceResource["plan_id"]), + + resource.TestCheckResourceAttr("data.stackit_mariadb_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("data.stackit_mariadb_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl-1"]), + + // Credentials data + resource.TestCheckResourceAttr("data.stackit_mariadb_credentials.credentials", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_mariadb_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("data.stackit_mariadb_credentials.credentials", "host"), + resource.TestCheckResourceAttrSet("data.stackit_mariadb_credentials.credentials", "port"), + resource.TestCheckResourceAttrSet("data.stackit_mariadb_credentials.credentials", "uri"), + ), + }, + // Import + { + ResourceName: "stackit_mariadb_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_mariadb_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_mariadb_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_mariadb_credentials.credentials", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_mariadb_credentials.credentials"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_mariadb_credentials.credentials") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + credentialsId, ok := r.Primary.Attributes["credentials_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute credentials_id") + } + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, credentialsId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfig(instanceResource["sgw_acl-2"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_mariadb_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_mariadb_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl-2"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckMariaDBDestroy(s *terraform.State) error { + ctx := context.Background() + var client *mariadb.APIClient + var err error + if testutil.MariaDBCustomEndpoint == "" { + client, err = mariadb.NewAPIClient() + } else { + client, err = mariadb.NewAPIClient( + config.WithEndpoint(testutil.MariaDBCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_mariadb_instance" { + continue + } + // instance terraform ID: "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if instances[i].InstanceId == nil { + continue + } + if utils.Contains(instancesToDestroy, *instances[i].InstanceId) { + if !checkInstanceDeleteSuccess(&instances[i]) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].InstanceId) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].InstanceId, err) + } + _, err = mariadb.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].InstanceId).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].InstanceId, err) + } + } + } + } + return nil +} + +func checkInstanceDeleteSuccess(i *mariadb.Instance) bool { + if *i.LastOperation.Type != mariadb.InstanceTypeDelete { + return false + } + + if *i.LastOperation.Type == mariadb.InstanceTypeDelete { + if *i.LastOperation.State != mariadb.InstanceStateSuccess { + return false + } else if strings.Contains(*i.LastOperation.Description, "DeleteFailed") || strings.Contains(*i.LastOperation.Description, "failed") { + return false + } + } + return true +} diff --git a/stackit/services/opensearch/credentials/datasource.go b/stackit/services/opensearch/credentials/datasource.go new file mode 100644 index 00000000..dd507f82 --- /dev/null +++ b/stackit/services/opensearch/credentials/datasource.go @@ -0,0 +1,178 @@ +package opensearch + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/opensearch" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &credentialsDataSource{} +) + +// NewCredentialsDataSource is a helper function to simplify the provider implementation. +func NewCredentialsDataSource() datasource.DataSource { + return &credentialsDataSource{} +} + +// credentialsDataSource is the data source implementation. +type credentialsDataSource struct { + client *opensearch.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_opensearch_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *opensearch.APIClient + var err error + if providerData.OpenSearchCustomEndpoint != "" { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.OpenSearchCustomEndpoint), + ) + } else { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresql zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *credentialsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "OpenSearch credentials data source schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the OpenSearch instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql credentials read") +} diff --git a/stackit/services/opensearch/credentials/resource.go b/stackit/services/opensearch/credentials/resource.go new file mode 100644 index 00000000..aeb4b0cd --- /dev/null +++ b/stackit/services/opensearch/credentials/resource.go @@ -0,0 +1,371 @@ +package opensearch + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/opensearch" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &openSearchCredentialsResource{} + _ resource.ResourceWithConfigure = &openSearchCredentialsResource{} + _ resource.ResourceWithImportState = &openSearchCredentialsResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + CredentialsId types.String `tfsdk:"credentials_id"` + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Host types.String `tfsdk:"host"` + Hosts types.List `tfsdk:"hosts"` + HttpAPIURI types.String `tfsdk:"http_api_uri"` + Name types.String `tfsdk:"name"` + Password types.String `tfsdk:"password"` + Port types.Int64 `tfsdk:"port"` + Uri types.String `tfsdk:"uri"` + Username types.String `tfsdk:"username"` +} + +// NewCredentialsResource is a helper function to simplify the provider implementation. +func NewCredentialsResource() resource.Resource { + return &openSearchCredentialsResource{} +} + +// credentialsResource is the resource implementation. +type openSearchCredentialsResource struct { + client *opensearch.APIClient +} + +// Metadata returns the resource type name. +func (r *openSearchCredentialsResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_opensearch_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *openSearchCredentialsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *opensearch.APIClient + var err error + if providerData.OpenSearchCustomEndpoint != "" { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.OpenSearchCustomEndpoint), + ) + } else { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "OpenSearch zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *openSearchCredentialsResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "OpenSearch credentials resource schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the OpenSearch instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *openSearchCredentialsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Create new recordset + credentialsResp, err := r.client.CreateCredentials(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Calling API: %v", err)) + return + } + if credentialsResp.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", "Got empty credentials id") + return + } + credentialsId := *credentialsResp.Id + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + wr, err := opensearch.CreateCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*opensearch.CredentialsResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "OpenSearch credentials created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *openSearchCredentialsResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "OpenSearch credentials read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *openSearchCredentialsResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating credentials", "credentials can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *openSearchCredentialsResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + // Delete existing record set + err := r.client.DeleteCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", err.Error()) + } + _, err = opensearch.DeleteCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "OpenSearch credentials deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,credentials_id +func (r *openSearchCredentialsResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[instance_id],[credentials_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("credentials_id"), idParts[2])...) + tflog.Info(ctx, "OpenSearch credentials state imported") +} + +func mapFields(credentialsResp *opensearch.CredentialsResponse, model *Model) error { + if credentialsResp == nil { + return fmt.Errorf("response input is nil") + } + if credentialsResp.Raw == nil { + return fmt.Errorf("response credentials raw is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + credentials := credentialsResp.Raw.Credentials + + var credentialsId string + if model.CredentialsId.ValueString() != "" { + credentialsId = model.CredentialsId.ValueString() + } else if credentialsResp.Id != nil { + credentialsId = *credentialsResp.Id + } else { + return fmt.Errorf("credentials id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + credentialsId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.CredentialsId = types.StringValue(credentialsId) + model.Hosts = types.ListNull(types.StringType) + if credentials != nil { + if credentials.Hosts != nil { + var hosts []attr.Value + for _, host := range *credentials.Hosts { + hosts = append(hosts, types.StringValue(host)) + } + hostsList, diags := types.ListValue(types.StringType, hosts) + if diags.HasError() { + return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags)) + } + model.Hosts = hostsList + } + model.Host = types.StringPointerValue(credentials.Host) + model.HttpAPIURI = types.StringPointerValue(credentials.HttpApiUri) + model.Name = types.StringPointerValue(credentials.Name) + model.Password = types.StringPointerValue(credentials.Password) + model.Port = conversion.ToTypeInt64(credentials.Port) + model.Uri = types.StringPointerValue(credentials.Uri) + model.Username = types.StringPointerValue(credentials.Username) + } + return nil +} diff --git a/stackit/services/opensearch/credentials/resource_test.go b/stackit/services/opensearch/credentials/resource_test.go new file mode 100644 index 00000000..bf890038 --- /dev/null +++ b/stackit/services/opensearch/credentials/resource_test.go @@ -0,0 +1,156 @@ +package opensearch + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/opensearch" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *opensearch.CredentialsResponse + expected Model + isValid bool + }{ + { + "default_values", + &opensearch.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &opensearch.RawCredentials{}, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringNull(), + Hosts: types.ListNull(types.StringType), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringNull(), + Port: types.Int64Null(), + Uri: types.StringNull(), + Username: types.StringNull(), + }, + true, + }, + { + "simple_values", + &opensearch.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &opensearch.RawCredentials{ + Credentials: &opensearch.Credentials{ + Host: utils.Ptr("host"), + Hosts: &[]string{ + "host_1", + "", + }, + HttpApiUri: utils.Ptr("http"), + Name: utils.Ptr("name"), + Password: utils.Ptr("password"), + Port: utils.Ptr(int32(1234)), + Uri: utils.Ptr("uri"), + Username: utils.Ptr("username"), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue("host"), + Hosts: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("host_1"), + types.StringValue(""), + }), + HttpAPIURI: types.StringValue("http"), + Name: types.StringValue("name"), + Password: types.StringValue("password"), + Port: types.Int64Value(1234), + Uri: types.StringValue("uri"), + Username: types.StringValue("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &opensearch.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &opensearch.RawCredentials{ + Credentials: &opensearch.Credentials{ + Host: utils.Ptr(""), + Hosts: &[]string{}, + HttpApiUri: nil, + Name: nil, + Password: utils.Ptr(""), + Port: utils.Ptr(int32(2123456789)), + Uri: nil, + Username: utils.Ptr(""), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue(""), + Hosts: types.ListValueMust(types.StringType, []attr.Value{}), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringValue(""), + Port: types.Int64Value(2123456789), + Uri: types.StringNull(), + Username: types.StringValue(""), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &opensearch.CredentialsResponse{}, + Model{}, + false, + }, + { + "nil_raw_credentials", + &opensearch.CredentialsResponse{ + Id: utils.Ptr("cid"), + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + model := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, model) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(model, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/opensearch/instance/datasource.go b/stackit/services/opensearch/instance/datasource.go new file mode 100644 index 00000000..c9169346 --- /dev/null +++ b/stackit/services/opensearch/instance/datasource.go @@ -0,0 +1,181 @@ +package opensearch + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/opensearch" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *opensearch.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_opensearch_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *opensearch.APIClient + var err error + if providerData.OpenSearchCustomEndpoint != "" { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.OpenSearchCustomEndpoint), + ) + } else { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "OpenSearch zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "OpenSearch instance data source schema.", + "id": "Terraform's internal resource identifier.", + "instance_id": "ID of the OpenSearch instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Computed: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Computed: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Computed: true, + }, + }, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read instance", err.Error()) + return + } + + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "OpenSearch instance read") +} diff --git a/stackit/services/opensearch/instance/resource.go b/stackit/services/opensearch/instance/resource.go new file mode 100644 index 00000000..4cec5b3d --- /dev/null +++ b/stackit/services/opensearch/instance/resource.go @@ -0,0 +1,623 @@ +package opensearch + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/opensearch" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + CfGuid types.String `tfsdk:"cf_guid"` + CfSpaceGuid types.String `tfsdk:"cf_space_guid"` + DashboardUrl types.String `tfsdk:"dashboard_url"` + ImageUrl types.String `tfsdk:"image_url"` + Name types.String `tfsdk:"name"` + OrganizationGuid types.String `tfsdk:"organization_guid"` + Parameters types.Object `tfsdk:"parameters"` + Version types.String `tfsdk:"version"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` +} + +// Struct corresponding to DataSourceModel.Parameters +type parametersModel struct { + SgwAcl types.String `tfsdk:"sgw_acl"` +} + +// Types corresponding to parametersModel +var parametersTypes = map[string]attr.Type{ + "sgw_acl": basetypes.StringType{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *opensearch.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_opensearch_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *opensearch.APIClient + var err error + if providerData.OpenSearchCustomEndpoint != "" { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.OpenSearchCustomEndpoint), + ) + } else { + apiClient, err = opensearch.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "opensearch zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "OpenSearch instance resource schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the OpenSearch instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Required: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Required: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + }, + Optional: true, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load OpenSearch service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := *createResp.InstanceId + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := opensearch.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*opensearch.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "opensearch instance created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instances", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "opensearch instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load OpenSearch service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + err = r.client.UpdateInstance(ctx, projectId, instanceId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := opensearch.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*opensearch.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "opensearch instance updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error()) + return + } + _, err = opensearch.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "opensearch instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "OpenSearch instance state imported") +} +func mapFields(instance *opensearch.Instance, model *Model) error { + if instance == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.InstanceId != nil { + instanceId = *instance.InstanceId + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanId = types.StringPointerValue(instance.PlanId) + model.CfGuid = types.StringPointerValue(instance.CfGuid) + model.CfSpaceGuid = types.StringPointerValue(instance.CfSpaceGuid) + model.DashboardUrl = types.StringPointerValue(instance.DashboardUrl) + model.ImageUrl = types.StringPointerValue(instance.ImageUrl) + model.Name = types.StringPointerValue(instance.Name) + model.OrganizationGuid = types.StringPointerValue(instance.OrganizationGuid) + if instance.Parameters == nil { + model.Parameters = types.ObjectNull(parametersTypes) + } else { + parameters, err := mapParameters(*instance.Parameters) + if err != nil { + return fmt.Errorf("mapping parameters: %w", err) + } + model.Parameters = parameters + } + return nil +} + +func mapParameters(params map[string]interface{}) (types.Object, error) { + attributes := map[string]attr.Value{} + for attribute := range parametersTypes { + valueInterface, ok := params[attribute] + if !ok { + // All fields are optional, so this is ok + // Set the value as nil, will be handled accordingly + valueInterface = nil + } + + var value attr.Value + switch parametersTypes[attribute].(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found unexpected attribute type '%T'", parametersTypes[attribute]) + case basetypes.StringType: + if valueInterface == nil { + value = types.StringNull() + } else { + valueString, ok := valueInterface.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as string", attribute, valueInterface) + } + value = types.StringValue(valueString) + } + case basetypes.BoolType: + if valueInterface == nil { + value = types.BoolNull() + } else { + valueBool, ok := valueInterface.(bool) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as bool", attribute, valueInterface) + } + value = types.BoolValue(valueBool) + } + case basetypes.Int64Type: + if valueInterface == nil { + value = types.Int64Null() + } else { + // This may be int64, int32, int or float64 + // We try to assert all 4 + var valueInt64 int64 + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as int", attribute, valueInterface) + case int64: + valueInt64 = temp + case int32: + valueInt64 = int64(temp) + case int: + valueInt64 = int64(temp) + case float64: + valueInt64 = int64(temp) + } + value = types.Int64Value(valueInt64) + } + case basetypes.ListType: // Assumed to be a list of strings + if valueInterface == nil { + value = types.ListNull(types.StringType) + } else { + // This may be []string{} or []interface{} + // We try to assert all 2 + var valueList []attr.Value + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as array of interface", attribute, valueInterface) + case []string: + for _, x := range temp { + valueList = append(valueList, types.StringValue(x)) + } + case []interface{}: + for _, x := range temp { + xString, ok := x.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' with element '%s' of type %T, failed to assert as string", attribute, x, x) + } + valueList = append(valueList, types.StringValue(xString)) + } + } + temp2, diags := types.ListValue(types.StringType, valueList) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to map %s: %w", attribute, core.DiagsToError(diags)) + } + value = temp2 + } + } + attributes[attribute] = value + } + + output, diags := types.ObjectValue(parametersTypes, attributes) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to create object: %w", core.DiagsToError(diags)) + } + return output, nil +} + +func toCreatePayload(model *Model, parameters *parametersModel) (*opensearch.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if parameters == nil { + return &opensearch.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + payloadParams := &opensearch.InstanceParameters{} + if parameters.SgwAcl.ValueString() != "" { + payloadParams.SgwAcl = parameters.SgwAcl.ValueStringPointer() + } + return &opensearch.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + Parameters: payloadParams, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +func toUpdatePayload(model *Model, parameters *parametersModel) (*opensearch.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + if parameters == nil { + return &opensearch.UpdateInstancePayload{ + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + return &opensearch.UpdateInstancePayload{ + Parameters: &opensearch.InstanceParameters{ + SgwAcl: parameters.SgwAcl.ValueStringPointer(), + }, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, diags *diag.Diagnostics, model *Model) { + projectId := model.ProjectId.ValueString() + res, err := r.client.GetOfferings(ctx, projectId).Execute() + if err != nil { + diags.AddError("Failed to list OpenSearch offerings", err.Error()) + return + } + + version := model.Version.ValueString() + planName := model.PlanName.ValueString() + availableVersions := "" + availablePlanNames := "" + isValidVersion := false + for _, offer := range *res.Offerings { + if !strings.EqualFold(*offer.Version, version) { + availableVersions = fmt.Sprintf("%s\n- %s", availableVersions, *offer.Version) + continue + } + isValidVersion = true + + for _, plan := range *offer.Plans { + if plan.Name == nil { + continue + } + if strings.EqualFold(*plan.Name, planName) && plan.Id != nil { + model.PlanId = types.StringPointerValue(plan.Id) + return + } + availablePlanNames = fmt.Sprintf("%s\n- %s", availablePlanNames, *plan.Name) + } + } + + if !isValidVersion { + diags.AddError("Invalid version", fmt.Sprintf("Couldn't find version '%s', available versions are:%s", version, availableVersions)) + return + } + diags.AddError("Invalid plan_name", fmt.Sprintf("Couldn't find plan_name '%s' for version %s, available names are:%s", planName, version, availablePlanNames)) +} diff --git a/stackit/services/opensearch/instance/resource_test.go b/stackit/services/opensearch/instance/resource_test.go new file mode 100644 index 00000000..cdf4c25f --- /dev/null +++ b/stackit/services/opensearch/instance/resource_test.go @@ -0,0 +1,304 @@ +package opensearch + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/opensearch" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *opensearch.Instance + expected Model + isValid bool + }{ + { + "default_values", + &opensearch.Instance{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringNull(), + Name: types.StringNull(), + CfGuid: types.StringNull(), + CfSpaceGuid: types.StringNull(), + DashboardUrl: types.StringNull(), + ImageUrl: types.StringNull(), + OrganizationGuid: types.StringNull(), + Parameters: types.ObjectNull(parametersTypes), + }, + true, + }, + { + "simple_values", + &opensearch.Instance{ + PlanId: utils.Ptr("plan"), + CfGuid: utils.Ptr("cf"), + CfSpaceGuid: utils.Ptr("space"), + DashboardUrl: utils.Ptr("dashboard"), + ImageUrl: utils.Ptr("image"), + InstanceId: utils.Ptr("iid"), + Name: utils.Ptr("name"), + OrganizationGuid: utils.Ptr("org"), + Parameters: &map[string]interface{}{ + "sgw_acl": "acl", + }, + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringValue("plan"), + Name: types.StringValue("name"), + CfGuid: types.StringValue("cf"), + CfSpaceGuid: types.StringValue("space"), + DashboardUrl: types.StringValue("dashboard"), + ImageUrl: types.StringValue("image"), + OrganizationGuid: types.StringValue("org"), + Parameters: types.ObjectValueMust(parametersTypes, map[string]attr.Value{ + "sgw_acl": types.StringValue("acl"), + }), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &opensearch.Instance{}, + Model{}, + false, + }, + { + "wrong_param_types_1", + &opensearch.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": true, + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_2", + &opensearch.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": 1, + }, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *opensearch.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &opensearch.CreateInstancePayload{ + Parameters: &opensearch.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &opensearch.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + Parameters: &opensearch.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Name: types.StringValue(""), + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &opensearch.CreateInstancePayload{ + InstanceName: utils.Ptr(""), + Parameters: &opensearch.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + nil, + &opensearch.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *opensearch.UpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &opensearch.UpdateInstancePayload{ + Parameters: &opensearch.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &opensearch.UpdateInstancePayload{ + Parameters: &opensearch.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &opensearch.UpdateInstancePayload{ + Parameters: &opensearch.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + PlanId: types.StringValue("plan"), + }, + nil, + &opensearch.UpdateInstancePayload{ + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/opensearch/opensearch_acc_test.go b/stackit/services/opensearch/opensearch_acc_test.go new file mode 100644 index 00000000..a62c9ffa --- /dev/null +++ b/stackit/services/opensearch/opensearch_acc_test.go @@ -0,0 +1,263 @@ +package opensearch_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/opensearch" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("opensearch"), + "plan_id": "9e4eac4b-b03d-4d7b-b01b-6d1224aa2d68", + "sgw_acl": "192.168.0.0/24", +} + +func resourceConfig() string { + return fmt.Sprintf(` + %s + + resource "stackit_opensearch_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + } + + resource "stackit_opensearch_credentials" "credentials" { + project_id = stackit_opensearch_instance.instance.project_id + instance_id = stackit_opensearch_instance.instance.instance_id + } + `, + testutil.OpenSearchProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + ) +} + +func resourceConfigUpdate() string { + return fmt.Sprintf(` + %s + + resource "stackit_opensearch_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + sgw_acl = "%s" + } + } + + resource "stackit_opensearch_credentials" "credentials" { + project_id = stackit_opensearch_instance.instance.project_id + instance_id = stackit_opensearch_instance.instance.instance_id + } + `, + testutil.OpenSearchProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + instanceResource["sgw_acl"], + ) +} + +func TestAccOpenSearchResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckOpenSearchDestroy, + Steps: []resource.TestStep{ + + // Creation + { + Config: resourceConfig(), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_opensearch_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_opensearch_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_opensearch_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_opensearch_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrSet("stackit_opensearch_instance.instance", "parameters.sgw_acl"), + + // Credentials data + resource.TestCheckResourceAttrPair( + "stackit_opensearch_credentials.credentials", "project_id", + "stackit_opensearch_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_opensearch_credentials.credentials", "instance_id", + "stackit_opensearch_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_opensearch_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("stackit_opensearch_credentials.credentials", "host"), + ), + }, + // Data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_opensearch_instance" "instance" { + project_id = stackit_opensearch_instance.instance.project_id + instance_id = stackit_opensearch_instance.instance.instance_id + } + + data "stackit_opensearch_credentials" "credentials" { + project_id = stackit_opensearch_credentials.credentials.project_id + instance_id = stackit_opensearch_credentials.credentials.instance_id + credentials_id = stackit_opensearch_credentials.credentials.credentials_id + }`, + resourceConfig(), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_opensearch_instance.instance", "project_id", instanceResource["project_id"]), + + resource.TestCheckResourceAttrPair("stackit_opensearch_instance.instance", "instance_id", + "data.stackit_opensearch_instance.instance", "instance_id"), + + resource.TestCheckResourceAttrPair("stackit_opensearch_credentials.credentials", "credentials_id", + "data.stackit_opensearch_credentials.credentials", "credentials_id"), + + resource.TestCheckResourceAttr("data.stackit_opensearch_instance.instance", "plan_id", instanceResource["plan_id"]), + + resource.TestCheckResourceAttr("data.stackit_opensearch_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrSet("data.stackit_opensearch_instance.instance", "parameters.sgw_acl"), + + // Credentials data + resource.TestCheckResourceAttr("data.stackit_opensearch_credentials.credentials", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_opensearch_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("data.stackit_opensearch_credentials.credentials", "host"), + resource.TestCheckResourceAttrSet("data.stackit_opensearch_credentials.credentials", "port"), + resource.TestCheckResourceAttrSet("data.stackit_opensearch_credentials.credentials", "uri"), + ), + }, + // Import + { + ResourceName: "stackit_opensearch_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_opensearch_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_opensearch_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_opensearch_credentials.credentials", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_opensearch_credentials.credentials"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_opensearch_credentials.credentials") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + credentialsId, ok := r.Primary.Attributes["credentials_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute credentials_id") + } + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, credentialsId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfigUpdate(), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_opensearch_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_opensearch_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_opensearch_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_opensearch_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_opensearch_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckOpenSearchDestroy(s *terraform.State) error { + ctx := context.Background() + var client *opensearch.APIClient + var err error + if testutil.OpenSearchCustomEndpoint == "" { + client, err = opensearch.NewAPIClient() + } else { + client, err = opensearch.NewAPIClient( + config.WithEndpoint(testutil.OpenSearchCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_opensearch_instance" { + continue + } + // instance terraform ID: "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if instances[i].InstanceId == nil { + continue + } + if utils.Contains(instancesToDestroy, *instances[i].InstanceId) { + if !checkInstanceDeleteSuccess(&instances[i]) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].InstanceId) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].InstanceId, err) + } + _, err = opensearch.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].InstanceId).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].InstanceId, err) + } + } + } + } + return nil +} + +func checkInstanceDeleteSuccess(i *opensearch.Instance) bool { + if *i.LastOperation.Type != opensearch.InstanceTypeDelete { + return false + } + + if *i.LastOperation.Type == opensearch.InstanceTypeDelete { + if *i.LastOperation.State != opensearch.InstanceStateSuccess { + return false + } else if strings.Contains(*i.LastOperation.Description, "DeleteFailed") || strings.Contains(*i.LastOperation.Description, "failed") { + return false + } + } + return true +} diff --git a/stackit/services/postgresflex/instance/datasource.go b/stackit/services/postgresflex/instance/datasource.go new file mode 100644 index 00000000..f8c493c0 --- /dev/null +++ b/stackit/services/postgresflex/instance/datasource.go @@ -0,0 +1,205 @@ +package postgresflex + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *postgresflex.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresflex_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *postgresflex.APIClient + var err error + if providerData.PostgresFlexCustomEndpoint != "" { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgresFlexCustomEndpoint), + ) + } else { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresflex instance client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgresFlex instance data source schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the PostgresFlex instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "acl": "The Access Control List (ACL) for the PostgresFlex instance.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "acl": schema.ListAttribute{ + Description: descriptions["acl"], + ElementType: types.StringType, + Computed: true, + }, + "backup_schedule": schema.StringAttribute{ + Computed: true, + }, + "flavor": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "replicas": schema.Int64Attribute{ + Computed: true, + }, + "storage": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Computed: true, + }, + "size": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "version": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read instance", err.Error()) + return + } + + var flavor = &flavorModel{} + if !(state.Flavor.IsNull() || state.Flavor.IsUnknown()) { + diags = state.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var storage = &storageModel{} + if !(state.Storage.IsNull() || state.Storage.IsUnknown()) { + diags = state.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + err = mapFields(instanceResp, &state, flavor, storage) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql instance read") +} diff --git a/stackit/services/postgresflex/instance/resource.go b/stackit/services/postgresflex/instance/resource.go new file mode 100644 index 00000000..6d874d7e --- /dev/null +++ b/stackit/services/postgresflex/instance/resource.go @@ -0,0 +1,703 @@ +package postgresflex + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Name types.String `tfsdk:"name"` + ACL types.List `tfsdk:"acl"` + BackupSchedule types.String `tfsdk:"backup_schedule"` + Flavor types.Object `tfsdk:"flavor"` + Replicas types.Int64 `tfsdk:"replicas"` + Storage types.Object `tfsdk:"storage"` + Version types.String `tfsdk:"version"` +} + +// Struct corresponding to Model.Flavor +type flavorModel struct { + Id types.String `tfsdk:"id"` + Description types.String `tfsdk:"description"` + CPU types.Int64 `tfsdk:"cpu"` + RAM types.Int64 `tfsdk:"ram"` +} + +// Types corresponding to flavorModel +var flavorTypes = map[string]attr.Type{ + "id": basetypes.StringType{}, + "description": basetypes.StringType{}, + "cpu": basetypes.Int64Type{}, + "ram": basetypes.Int64Type{}, +} + +// Struct corresponding to DataSourceModel.Storage +type storageModel struct { + Class types.String `tfsdk:"class"` + Size types.Int64 `tfsdk:"size"` +} + +// Types corresponding to storageModel +var storageTypes = map[string]attr.Type{ + "class": basetypes.StringType{}, + "size": basetypes.Int64Type{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *postgresflex.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresflex_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *postgresflex.APIClient + var err error + if providerData.PostgresFlexCustomEndpoint != "" { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgresFlexCustomEndpoint), + ) + } else { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresflex instance client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgresFlex instance resource schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the PostgresFlex instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "acl": "The Access Control List (ACL) for the PostgresFlex instance.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.RegexMatches( + regexp.MustCompile("^[a-z]([-a-z0-9]*[a-z0-9])?$"), + "must start with a letter, must have lower case letters, numbers or hyphens, and no hyphen at the end", + ), + }, + }, + "acl": schema.ListAttribute{ + Description: descriptions["acl"], + ElementType: types.StringType, + Required: true, + }, + "backup_schedule": schema.StringAttribute{ + Required: true, + }, + "flavor": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "description": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "cpu": schema.Int64Attribute{ + Required: true, + }, + "ram": schema.Int64Attribute{ + Required: true, + }, + }, + }, + "replicas": schema.Int64Attribute{ + Required: true, + }, + "storage": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Required: true, + }, + "size": schema.Int64Attribute{ + Required: true, + }, + }, + }, + "version": schema.StringAttribute{ + Required: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + var acl []string + if !(model.ACL.IsNull() || model.ACL.IsUnknown()) { + diags = model.ACL.ElementsAs(ctx, &acl, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var flavor = &flavorModel{} + if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) { + diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + r.loadFlavorId(ctx, &resp.Diagnostics, &model, flavor) + if resp.Diagnostics.HasError() { + return + } + } + var storage = &storageModel{} + if !(model.Storage.IsNull() || model.Storage.IsUnknown()) { + diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, acl, flavor, storage) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + if createResp == nil || createResp.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", "Didn't get ID of created instance. An instance might have been created") + return + } + instanceId := *createResp.Id + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := postgresflex.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*postgresflex.InstanceResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model, flavor, storage) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresflex instance created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + var flavor = &flavorModel{} + if !(state.Flavor.IsNull() || state.Flavor.IsUnknown()) { + diags = state.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var storage = &storageModel{} + if !(state.Storage.IsNull() || state.Storage.IsUnknown()) { + diags = state.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(instanceResp, &state, flavor, storage) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresflex instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + var acl []string + if !(model.ACL.IsNull() || model.ACL.IsUnknown()) { + diags = model.ACL.ElementsAs(ctx, &acl, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var flavor = &flavorModel{} + if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) { + diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + r.loadFlavorId(ctx, &resp.Diagnostics, &model, flavor) + if resp.Diagnostics.HasError() { + return + } + } + var storage = &storageModel{} + if !(model.Storage.IsNull() || model.Storage.IsUnknown()) { + diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, acl, flavor, storage) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + _, err = r.client.UpdateInstance(ctx, projectId, instanceId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := postgresflex.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*postgresflex.InstanceResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model, flavor, storage) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresflex instance updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error()) + return + } + _, err = postgresflex.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "Postgresflex instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "Postgresql instance state imported") +} + +func mapFields(resp *postgresflex.InstanceResponse, model *Model, flavor *flavorModel, storage *storageModel) error { + if resp == nil { + return fmt.Errorf("response input is nil") + } + if resp.Item == nil { + return fmt.Errorf("no instance provided") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + instance := resp.Item + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.Id != nil { + instanceId = *instance.Id + } else { + return fmt.Errorf("instance id not present") + } + + var aclList basetypes.ListValue + var diags diag.Diagnostics + if instance.Acl == nil || instance.Acl.Items == nil { + aclList = types.ListNull(types.StringType) + } else { + acl := []attr.Value{} + for _, ip := range *instance.Acl.Items { + acl = append(acl, types.StringValue(ip)) + } + aclList, diags = types.ListValue(types.StringType, acl) + if diags.HasError() { + return fmt.Errorf("failed to map ACL: %w", core.DiagsToError(diags)) + } + } + + var flavorValues map[string]attr.Value + if instance.Flavor == nil { + flavorValues = map[string]attr.Value{ + "id": types.StringNull(), + "description": types.StringNull(), + "cpu": flavor.CPU, + "ram": flavor.RAM, + } + } else { + flavorValues = map[string]attr.Value{ + "id": types.StringValue(*instance.Flavor.Id), + "description": types.StringValue(*instance.Flavor.Description), + "cpu": types.Int64Value(int64(*instance.Flavor.Cpu)), + "ram": types.Int64Value(int64(*instance.Flavor.Memory)), + } + } + flavorObject, diags := types.ObjectValue(flavorTypes, flavorValues) + if diags.HasError() { + return fmt.Errorf("failed to create flavor: %w", core.DiagsToError(diags)) + } + + var storageValues map[string]attr.Value + if instance.Storage == nil { + storageValues = map[string]attr.Value{ + "class": storage.Class, + "size": storage.Size, + } + } else { + storageValues = map[string]attr.Value{ + "class": types.StringValue(*instance.Storage.Class), + "size": types.Int64Value(int64(*instance.Storage.Size)), + } + } + storageObject, diags := types.ObjectValue(storageTypes, storageValues) + if diags.HasError() { + return fmt.Errorf("failed to create storage: %w", core.DiagsToError(diags)) + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + if instance.Name == nil { + model.Name = types.StringNull() + } else { + model.Name = types.StringValue(*instance.Name) + } + model.ACL = aclList + if instance.BackupSchedule == nil { + model.BackupSchedule = types.StringNull() + } else { + model.BackupSchedule = types.StringValue(*instance.BackupSchedule) + } + model.Flavor = flavorObject + if instance.Replicas == nil { + model.Replicas = types.Int64Null() + } else { + model.Replicas = types.Int64Value(int64(*instance.Replicas)) + } + model.Storage = storageObject + if instance.Version == nil { + model.Version = types.StringNull() + } else { + model.Version = types.StringValue(*instance.Version) + } + return nil +} + +func toCreatePayload(model *Model, acl []string, flavor *flavorModel, storage *storageModel) (*postgresflex.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if acl == nil { + return nil, fmt.Errorf("nil acl") + } + if flavor == nil { + return nil, fmt.Errorf("nil flavor") + } + if storage == nil { + return nil, fmt.Errorf("nil storage") + } + + return &postgresflex.CreateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &acl, + }, + BackupSchedule: model.BackupSchedule.ValueStringPointer(), + FlavorId: flavor.Id.ValueStringPointer(), + Name: model.Name.ValueStringPointer(), + Replicas: conversion.ToPtrInt32(model.Replicas), + Storage: &postgresflex.InstanceStorage{ + Class: storage.Class.ValueStringPointer(), + Size: conversion.ToPtrInt32(storage.Size), + }, + Version: model.Version.ValueStringPointer(), + }, nil +} + +func toUpdatePayload(model *Model, acl []string, flavor *flavorModel, storage *storageModel) (*postgresflex.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if acl == nil { + return nil, fmt.Errorf("nil acl") + } + if flavor == nil { + return nil, fmt.Errorf("nil flavor") + } + if storage == nil { + return nil, fmt.Errorf("nil storage") + } + + return &postgresflex.UpdateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &acl, + }, + BackupSchedule: model.BackupSchedule.ValueStringPointer(), + FlavorId: flavor.Id.ValueStringPointer(), + Name: model.Name.ValueStringPointer(), + Replicas: conversion.ToPtrInt32(model.Replicas), + Storage: &postgresflex.InstanceStorage{ + Class: storage.Class.ValueStringPointer(), + Size: conversion.ToPtrInt32(storage.Size), + }, + Version: model.Version.ValueStringPointer(), + }, nil +} + +func (r *instanceResource) loadFlavorId(ctx context.Context, diags *diag.Diagnostics, model *Model, flavor *flavorModel) { + if model == nil { + diags.AddError("invalid model", "nil model") + return + } + if flavor == nil { + diags.AddError("invalid flavor", "nil flavor") + return + } + cpu := conversion.ToPtrInt32(flavor.CPU) + if cpu == nil { + diags.AddError("invalid flavor", "nil CPU") + return + } + ram := conversion.ToPtrInt32(flavor.RAM) + if ram == nil { + diags.AddError("invalid flavor", "nil RAM") + return + } + + projectId := model.ProjectId.ValueString() + res, err := r.client.GetFlavors(ctx, projectId).Execute() + if err != nil { + diags.AddError("failed to list postgresflex flavors", err.Error()) + return + } + + avl := "" + if res.Flavors == nil { + diags.AddError("no flavors", fmt.Sprintf("couldn't find flavors for id %s", flavor.Id.ValueString())) + return + } + for _, f := range *res.Flavors { + if f.Id == nil || f.Cpu == nil || f.Memory == nil { + continue + } + if *f.Cpu == *cpu && *f.Memory == *ram { + flavor.Id = types.StringValue(*f.Id) + break + } + avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Cpu) + } + if flavor.Id.ValueString() == "" { + diags.AddError("invalid flavor", fmt.Sprintf("couldn't find flavor.\navailable specs are:%s", avl)) + return + } +} diff --git a/stackit/services/postgresflex/instance/resource_test.go b/stackit/services/postgresflex/instance/resource_test.go new file mode 100644 index 00000000..a70c9bb5 --- /dev/null +++ b/stackit/services/postgresflex/instance/resource_test.go @@ -0,0 +1,509 @@ +package postgresflex + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *postgresflex.InstanceResponse + flavor *flavorModel + storage *storageModel + expected Model + isValid bool + }{ + { + "default_values", + &postgresflex.InstanceResponse{ + Item: &postgresflex.InstanceSingleInstance{}, + }, + &flavorModel{}, + &storageModel{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringNull(), + ACL: types.ListNull(types.StringType), + BackupSchedule: types.StringNull(), + Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{ + "id": types.StringNull(), + "description": types.StringNull(), + "cpu": types.Int64Null(), + "ram": types.Int64Null(), + }), + Replicas: types.Int64Null(), + Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{ + "class": types.StringNull(), + "size": types.Int64Null(), + }), + Version: types.StringNull(), + }, + true, + }, + { + "simple_values", + &postgresflex.InstanceResponse{ + Item: &postgresflex.InstanceSingleInstance{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{ + "ip1", + "ip2", + "", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + Flavor: &postgresflex.InstanceFlavor{ + Cpu: utils.Ptr(int32(12)), + Description: utils.Ptr("description"), + Id: utils.Ptr("flavor_id"), + Memory: utils.Ptr(int32(34)), + }, + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(56)), + Status: utils.Ptr("status"), + Storage: &postgresflex.InstanceStorage{ + Class: utils.Ptr("class"), + Size: utils.Ptr(int32(78)), + }, + Version: utils.Ptr("version"), + }, + }, + &flavorModel{}, + &storageModel{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + ACL: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("ip1"), + types.StringValue("ip2"), + types.StringValue(""), + }), + BackupSchedule: types.StringValue("schedule"), + Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{ + "id": types.StringValue("flavor_id"), + "description": types.StringValue("description"), + "cpu": types.Int64Value(12), + "ram": types.Int64Value(34), + }), + Replicas: types.Int64Value(56), + Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{ + "class": types.StringValue("class"), + "size": types.Int64Value(78), + }), + Version: types.StringValue("version"), + }, + true, + }, + { + "simple_values_no_flavor_and_storage", + &postgresflex.InstanceResponse{ + Item: &postgresflex.InstanceSingleInstance{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{ + "ip1", + "ip2", + "", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + Flavor: nil, + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(56)), + Status: utils.Ptr("status"), + Storage: nil, + Version: utils.Ptr("version"), + }, + }, + &flavorModel{ + CPU: types.Int64Value(12), + RAM: types.Int64Value(34), + }, + &storageModel{ + Class: types.StringValue("class"), + Size: types.Int64Value(78), + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + ACL: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("ip1"), + types.StringValue("ip2"), + types.StringValue(""), + }), + BackupSchedule: types.StringValue("schedule"), + Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{ + "id": types.StringNull(), + "description": types.StringNull(), + "cpu": types.Int64Value(12), + "ram": types.Int64Value(34), + }), + Replicas: types.Int64Value(56), + Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{ + "class": types.StringValue("class"), + "size": types.Int64Value(78), + }), + Version: types.StringValue("version"), + }, + true, + }, + { + "nil_response", + nil, + &flavorModel{}, + &storageModel{}, + Model{}, + false, + }, + { + "no_resource_id", + &postgresflex.InstanceResponse{}, + &flavorModel{}, + &storageModel{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state, tt.flavor, tt.storage) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputAcl []string + inputFlavor *flavorModel + inputStorage *storageModel + expected *postgresflex.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + []string{}, + &flavorModel{}, + &storageModel{}, + &postgresflex.CreateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{}, + }, + Storage: &postgresflex.InstanceStorage{}, + }, + true, + }, + { + "simple_values", + &Model{ + BackupSchedule: types.StringValue("schedule"), + Name: types.StringValue("name"), + Replicas: types.Int64Value(12), + Version: types.StringValue("version"), + }, + []string{ + "ip_1", + "ip_2", + }, + &flavorModel{ + Id: types.StringValue("flavor_id"), + }, + &storageModel{ + Class: types.StringValue("class"), + Size: types.Int64Value(34), + }, + &postgresflex.CreateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{ + "ip_1", + "ip_2", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + FlavorId: utils.Ptr("flavor_id"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(12)), + Storage: &postgresflex.InstanceStorage{ + Class: utils.Ptr("class"), + Size: utils.Ptr(int32(34)), + }, + Version: utils.Ptr("version"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + BackupSchedule: types.StringNull(), + Name: types.StringNull(), + Replicas: types.Int64Value(2123456789), + Version: types.StringNull(), + }, + []string{ + "", + }, + &flavorModel{ + Id: types.StringNull(), + }, + &storageModel{ + Class: types.StringNull(), + Size: types.Int64Null(), + }, + &postgresflex.CreateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{ + "", + }, + }, + BackupSchedule: nil, + FlavorId: nil, + Name: nil, + Replicas: utils.Ptr(int32(2123456789)), + Storage: &postgresflex.InstanceStorage{ + Class: nil, + Size: nil, + }, + Version: nil, + }, + true, + }, + { + "nil_model", + nil, + []string{}, + &flavorModel{}, + &storageModel{}, + nil, + false, + }, + { + "nil_acl", + &Model{}, + nil, + &flavorModel{}, + &storageModel{}, + nil, + false, + }, + { + "nil_flavor", + &Model{}, + []string{}, + nil, + &storageModel{}, + nil, + false, + }, + { + "nil_storage", + &Model{}, + []string{}, + &flavorModel{}, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputAcl, tt.inputFlavor, tt.inputStorage) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputAcl []string + inputFlavor *flavorModel + inputStorage *storageModel + expected *postgresflex.UpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + []string{}, + &flavorModel{}, + &storageModel{}, + &postgresflex.UpdateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{}, + }, + Storage: &postgresflex.InstanceStorage{}, + }, + true, + }, + { + "simple_values", + &Model{ + BackupSchedule: types.StringValue("schedule"), + Name: types.StringValue("name"), + Replicas: types.Int64Value(12), + Version: types.StringValue("version"), + }, + []string{ + "ip_1", + "ip_2", + }, + &flavorModel{ + Id: types.StringValue("flavor_id"), + }, + &storageModel{ + Class: types.StringValue("class"), + Size: types.Int64Value(34), + }, + &postgresflex.UpdateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{ + "ip_1", + "ip_2", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + FlavorId: utils.Ptr("flavor_id"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(12)), + Storage: &postgresflex.InstanceStorage{ + Class: utils.Ptr("class"), + Size: utils.Ptr(int32(34)), + }, + Version: utils.Ptr("version"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + BackupSchedule: types.StringNull(), + Name: types.StringNull(), + Replicas: types.Int64Value(2123456789), + Version: types.StringNull(), + }, + []string{ + "", + }, + &flavorModel{ + Id: types.StringNull(), + }, + &storageModel{ + Class: types.StringNull(), + Size: types.Int64Null(), + }, + &postgresflex.UpdateInstancePayload{ + Acl: &postgresflex.InstanceAcl{ + Items: &[]string{ + "", + }, + }, + BackupSchedule: nil, + FlavorId: nil, + Name: nil, + Replicas: utils.Ptr(int32(2123456789)), + Storage: &postgresflex.InstanceStorage{ + Class: nil, + Size: nil, + }, + Version: nil, + }, + true, + }, + { + "nil_model", + nil, + []string{}, + &flavorModel{}, + &storageModel{}, + nil, + false, + }, + { + "nil_acl", + &Model{}, + nil, + &flavorModel{}, + &storageModel{}, + nil, + false, + }, + { + "nil_flavor", + &Model{}, + []string{}, + nil, + &storageModel{}, + nil, + false, + }, + { + "nil_storage", + &Model{}, + []string{}, + &flavorModel{}, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputAcl, tt.inputFlavor, tt.inputStorage) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/postgresflex/postgresflex_acc_test.go b/stackit/services/postgresflex/postgresflex_acc_test.go new file mode 100644 index 00000000..420fa994 --- /dev/null +++ b/stackit/services/postgresflex/postgresflex_acc_test.go @@ -0,0 +1,324 @@ +package postgresflex_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum)), + "acl": "192.168.0.0/16", + "backup_schedule": "00 16 * * *", + "backup_schedule_update": "00 12 * * *", + "flavor_cpu": "2", + "flavor_ram": "4", + "flavor_description": "Small, Compute optimized", + "replicas": "1", + "storage_class": "premium-perf12-stackit", + "storage_size": "5", + "version": "14", + "flavor_id": "2.4", +} + +// User resource data +var userResource = map[string]string{ + "username": fmt.Sprintf("tf-acc-user-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlpha)), + "role": "login", + "project_id": instanceResource["project_id"], +} + +func configResources() string { + return fmt.Sprintf(` + %s + + resource "stackit_postgresflex_instance" "instance" { + project_id = "%s" + name = "%s" + acl = ["%s"] + backup_schedule = "%s" + flavor = { + cpu = %s + ram = %s + } + replicas = %s + storage = { + class = "%s" + size = %s + } + version = "%s" + } + + resource "stackit_postgresflex_user" "user" { + project_id = stackit_postgresflex_instance.instance.project_id + instance_id = stackit_postgresflex_instance.instance.instance_id + username = "%s" + roles = ["%s"] + } + `, + testutil.PostgresFlexProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["acl"], + instanceResource["backup_schedule"], + instanceResource["flavor_cpu"], + instanceResource["flavor_ram"], + instanceResource["replicas"], + instanceResource["storage_class"], + instanceResource["storage_size"], + instanceResource["version"], + userResource["username"], + userResource["role"], + ) +} + +func TestAccPostgresFlexFlexResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckPostgresFlexDestroy, + Steps: []resource.TestStep{ + // Creation + { + Config: configResources(), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "acl.#", "1"), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "acl.0", instanceResource["acl"]), + resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "flavor.id"), + resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "flavor.description"), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "backup_schedule", instanceResource["backup_schedule"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "replicas", instanceResource["replicas"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "storage.class", instanceResource["storage_class"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "storage.size", instanceResource["storage_size"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "version", instanceResource["version"]), + + // User + resource.TestCheckResourceAttrPair( + "stackit_postgresflex_user.user", "project_id", + "stackit_postgresflex_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_postgresflex_user.user", "instance_id", + "stackit_postgresflex_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_postgresflex_user.user", "user_id"), + resource.TestCheckResourceAttrSet("stackit_postgresflex_user.user", "password"), + ), + }, + // data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_postgresflex_instance" "instance" { + project_id = stackit_postgresflex_instance.instance.project_id + instance_id = stackit_postgresflex_instance.instance.instance_id + } + + data "stackit_postgresflex_user" "user" { + project_id = stackit_postgresflex_instance.instance.project_id + instance_id = stackit_postgresflex_instance.instance.instance_id + user_id = stackit_postgresflex_user.user.user_id + } + `, + configResources(), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrPair( + "data.stackit_postgresflex_instance.instance", "project_id", + "stackit_postgresflex_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_postgresflex_instance.instance", "instance_id", + "stackit_postgresflex_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_postgresflex_user.user", "instance_id", + "stackit_postgresflex_user.user", "instance_id", + ), + + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "acl.#", "1"), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "acl.0", instanceResource["acl"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "backup_schedule", instanceResource["backup_schedule"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "flavor.id", instanceResource["flavor_id"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "flavor.description", instanceResource["flavor_description"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_instance.instance", "replicas", instanceResource["replicas"]), + + // User data + resource.TestCheckResourceAttr("data.stackit_postgresflex_user.user", "project_id", userResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_postgresflex_user.user", "user_id"), + resource.TestCheckResourceAttr("data.stackit_postgresflex_user.user", "username", userResource["username"]), + resource.TestCheckResourceAttr("data.stackit_postgresflex_user.user", "roles.#", "1"), + resource.TestCheckResourceAttr("data.stackit_postgresflex_user.user", "roles.0", userResource["role"]), + resource.TestCheckResourceAttrSet("data.stackit_postgresflex_user.user", "host"), + resource.TestCheckResourceAttrSet("data.stackit_postgresflex_user.user", "port"), + ), + }, + // Import + { + ResourceName: "stackit_postgresflex_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_postgresflex_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_postgresflex_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_postgresflex_user.user", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_postgresflex_user.user"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_postgresflex_user.user") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + userId, ok := r.Primary.Attributes["user_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute user_id") + } + + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, userId), nil + }, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, + // Update + { + Config: fmt.Sprintf(` + %s + + resource "stackit_postgresflex_instance" "instance" { + project_id = "%s" + name = "%s" + acl = ["%s"] + backup_schedule = "%s" + flavor = { + cpu = %s + ram = %s + } + replicas = %s + storage = { + class = "%s" + size = %s + } + version = "%s" + } + `, + testutil.PostgresFlexProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["acl"], + instanceResource["backup_schedule_update"], + instanceResource["flavor_cpu"], + instanceResource["flavor_ram"], + instanceResource["replicas"], + instanceResource["storage_class"], + instanceResource["storage_size"], + instanceResource["version"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "acl.#", "1"), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "acl.0", instanceResource["acl"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "backup_schedule", instanceResource["backup_schedule_update"]), + resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "flavor.id"), + resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "flavor.description"), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "replicas", instanceResource["replicas"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "storage.class", instanceResource["storage_class"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "storage.size", instanceResource["storage_size"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "version", instanceResource["version"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckPostgresFlexDestroy(s *terraform.State) error { + ctx := context.Background() + var client *postgresflex.APIClient + var err error + if testutil.PostgresFlexCustomEndpoint == "" { + client, err = postgresflex.NewAPIClient() + } else { + client, err = postgresflex.NewAPIClient( + config.WithEndpoint(testutil.PostgresFlexCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_postgresql_instance" { + continue + } + // instance terraform ID: = "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + items := *instancesResp.Items + for i := range items { + if items[i].Id == nil { + continue + } + if utils.Contains(instancesToDestroy, *items[i].Id) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *items[i].Id) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *items[i].Id, err) + } + _, err = postgresflex.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *items[i].Id).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *items[i].Id, err) + } + } + } + return nil +} diff --git a/stackit/services/postgresflex/user/datasource.go b/stackit/services/postgresflex/user/datasource.go new file mode 100644 index 00000000..b9e8f1f7 --- /dev/null +++ b/stackit/services/postgresflex/user/datasource.go @@ -0,0 +1,168 @@ +package postgresflex + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &userDataSource{} +) + +// NewUserDataSource is a helper function to simplify the provider implementation. +func NewUserDataSource() datasource.DataSource { + return &userDataSource{} +} + +// userDataSource is the data source implementation. +type userDataSource struct { + client *postgresflex.APIClient +} + +// Metadata returns the resource type name. +func (r *userDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresflex_user" +} + +// Configure adds the provider configured client to the resource. +func (r *userDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *postgresflex.APIClient + var err error + if providerData.PostgresFlexCustomEndpoint != "" { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgresFlexCustomEndpoint), + ) + } else { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresflex user client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *userDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgresFlex user data source schema.", + "id": "Terraform's internal resource ID.", + "user_id": "User ID.", + "instance_id": "ID of the PostgresFlex instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "user_id": schema.StringAttribute{ + Description: descriptions["user_id"], + Required: true, + Validators: []validator.String{ + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + "roles": schema.SetAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *userDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + + recordSetResp, err := r.client.GetUser(ctx, projectId, instanceId, userId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading user", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql user read") +} diff --git a/stackit/services/postgresflex/user/resource.go b/stackit/services/postgresflex/user/resource.go new file mode 100644 index 00000000..44eb9078 --- /dev/null +++ b/stackit/services/postgresflex/user/resource.go @@ -0,0 +1,431 @@ +package postgresflex + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &userResource{} + _ resource.ResourceWithConfigure = &userResource{} + _ resource.ResourceWithImportState = &userResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + UserId types.String `tfsdk:"user_id"` + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Username types.String `tfsdk:"username"` + Roles types.Set `tfsdk:"roles"` + Password types.String `tfsdk:"password"` + Host types.String `tfsdk:"host"` + Port types.Int64 `tfsdk:"port"` +} + +// NewUserResource is a helper function to simplify the provider implementation. +func NewUserResource() resource.Resource { + return &userResource{} +} + +// userResource is the resource implementation. +type userResource struct { + client *postgresflex.APIClient +} + +// Metadata returns the resource type name. +func (r *userResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresflex_user" +} + +// Configure adds the provider configured client to the resource. +func (r *userResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *postgresflex.APIClient + var err error + if providerData.PostgresFlexCustomEndpoint != "" { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgresFlexCustomEndpoint), + ) + } else { + apiClient, err = postgresflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresflex user client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *userResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgresFlex user resource schema.", + "id": "Terraform's internal resource ID.", + "user_id": "User ID.", + "instance_id": "ID of the PostgresFlex instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "user_id": schema.StringAttribute{ + Description: descriptions["user_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "username": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "roles": schema.SetAttribute{ + ElementType: types.StringType, + Required: true, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Validators: []validator.Set{ + setvalidator.ValueStringsAre( + stringvalidator.OneOf("login", "createdb"), + ), + }, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *userResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + var roles []string + if !(model.Roles.IsNull() || model.Roles.IsUnknown()) { + diags = model.Roles.ElementsAs(ctx, &roles, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, roles) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating user", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new user + userResp, err := r.client.CreateUser(ctx, projectId, instanceId).CreateUserPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating user", fmt.Sprintf("Calling API: %v", err)) + return + } + if userResp == nil || userResp.Item == nil || userResp.Item.Id == nil || *userResp.Item.Id == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating user", "Didn't get ID of created user. A user might have been created") + return + } + userId := *userResp.Item.Id + ctx = tflog.SetField(ctx, "user_id", userId) + + // Map response body to schema and populate Computed attribute values + err = mapFieldsCreate(userResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresflex user created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *userResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + + recordSetResp, err := r.client.GetUser(ctx, projectId, instanceId, userId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading user", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresflex user read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *userResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating user", "user can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *userResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + + // Delete existing record set + err := r.client.DeleteUser(ctx, projectId, instanceId, userId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting user", err.Error()) + } + _, err = postgresflex.DeleteUserWaitHandler(ctx, r.client, projectId, instanceId, userId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting user", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "Postgresflex user deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *userResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[instance_id],[user_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_id"), idParts[2])...) + tflog.Info(ctx, "Postgresflex user state imported") +} + +func mapFieldsCreate(userResp *postgresflex.CreateUserResponse, model *Model) error { + if userResp == nil || userResp.Item == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp.Item + + if user.Id == nil { + return fmt.Errorf("user id not present") + } + userId := *user.Id + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + userId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.UserId = types.StringValue(userId) + model.Username = types.StringPointerValue(user.Username) + + if user.Password == nil { + return fmt.Errorf("user password not present") + } + model.Password = types.StringValue(*user.Password) + + if user.Roles == nil { + model.Roles = types.SetNull(types.StringType) + } else { + roles := []attr.Value{} + for _, role := range *user.Roles { + roles = append(roles, types.StringValue(role)) + } + rolesSet, diags := types.SetValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = rolesSet + } + model.Host = types.StringPointerValue(user.Host) + model.Port = conversion.ToTypeInt64(user.Port) + return nil +} + +func mapFields(userResp *postgresflex.UserResponse, model *Model) error { + if userResp == nil || userResp.Item == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp.Item + + var userId string + if model.UserId.ValueString() != "" { + userId = model.UserId.ValueString() + } else if user.Id != nil { + userId = *user.Id + } else { + return fmt.Errorf("user id not present") + } + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + userId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.UserId = types.StringValue(userId) + model.Username = types.StringPointerValue(user.Username) + + if user.Roles == nil { + model.Roles = types.SetNull(types.StringType) + } else { + roles := []attr.Value{} + for _, role := range *user.Roles { + roles = append(roles, types.StringValue(role)) + } + rolesSet, diags := types.SetValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = rolesSet + } + model.Host = types.StringPointerValue(user.Host) + model.Port = conversion.ToTypeInt64(user.Port) + return nil +} + +func toCreatePayload(model *Model, roles []string) (*postgresflex.CreateUserPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if roles == nil { + return nil, fmt.Errorf("nil roles") + } + + return &postgresflex.CreateUserPayload{ + Roles: &roles, + Username: model.Username.ValueStringPointer(), + }, nil +} diff --git a/stackit/services/postgresflex/user/resource_test.go b/stackit/services/postgresflex/user/resource_test.go new file mode 100644 index 00000000..56d6115d --- /dev/null +++ b/stackit/services/postgresflex/user/resource_test.go @@ -0,0 +1,359 @@ +package postgresflex + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" +) + +func TestMapFieldsCreate(t *testing.T) { + tests := []struct { + description string + input *postgresflex.CreateUserResponse + expected Model + isValid bool + }{ + { + "default_values", + &postgresflex.CreateUserResponse{ + Item: &postgresflex.InstanceUser{ + Id: utils.Ptr("uid"), + Password: utils.Ptr(""), + }, + }, + Model{ + Id: types.StringValue("pid,iid,uid"), + UserId: types.StringValue("uid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.SetNull(types.StringType), + Password: types.StringValue(""), + Host: types.StringNull(), + Port: types.Int64Null(), + }, + true, + }, + { + "simple_values", + &postgresflex.CreateUserResponse{ + Item: &postgresflex.InstanceUser{ + Id: utils.Ptr("uid"), + Roles: &[]string{ + "role_1", + "role_2", + "", + }, + Username: utils.Ptr("username"), + Password: utils.Ptr("password"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int32(1234)), + }, + }, + Model{ + Id: types.StringValue("pid,iid,uid"), + UserId: types.StringValue("uid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("role_1"), + types.StringValue("role_2"), + types.StringValue(""), + }), + Password: types.StringValue("password"), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + }, + true, + }, + { + "null_fields_and_int_conversions", + &postgresflex.CreateUserResponse{ + Item: &postgresflex.InstanceUser{ + Id: utils.Ptr("uid"), + Roles: &[]string{}, + Username: nil, + Password: utils.Ptr(""), + Host: nil, + Port: utils.Ptr(int32(2123456789)), + }, + }, + Model{ + Id: types.StringValue("pid,iid,uid"), + UserId: types.StringValue("uid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.SetValueMust(types.StringType, []attr.Value{}), + Password: types.StringValue(""), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "nil_response_2", + &postgresflex.CreateUserResponse{}, + Model{}, + false, + }, + { + "no_resource_id", + &postgresflex.CreateUserResponse{ + Item: &postgresflex.InstanceUser{}, + }, + Model{}, + false, + }, + { + "no_password", + &postgresflex.CreateUserResponse{ + Item: &postgresflex.InstanceUser{ + Id: utils.Ptr("uid"), + }, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFieldsCreate(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestMapCreate(t *testing.T) { + tests := []struct { + description string + input *postgresflex.UserResponse + expected Model + isValid bool + }{ + { + "default_values", + &postgresflex.UserResponse{ + Item: &postgresflex.UserResponseUser{}, + }, + Model{ + Id: types.StringValue("pid,iid,uid"), + UserId: types.StringValue("uid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.SetNull(types.StringType), + Host: types.StringNull(), + Port: types.Int64Null(), + }, + true, + }, + { + "simple_values", + &postgresflex.UserResponse{ + Item: &postgresflex.UserResponseUser{ + Roles: &[]string{ + "role_1", + "role_2", + "", + }, + Username: utils.Ptr("username"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int32(1234)), + }, + }, + Model{ + Id: types.StringValue("pid,iid,uid"), + UserId: types.StringValue("uid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("role_1"), + types.StringValue("role_2"), + types.StringValue(""), + }), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + }, + true, + }, + { + "null_fields_and_int_conversions", + &postgresflex.UserResponse{ + Item: &postgresflex.UserResponseUser{ + Id: utils.Ptr("uid"), + Roles: &[]string{}, + Username: nil, + Host: nil, + Port: utils.Ptr(int32(2123456789)), + }, + }, + Model{ + Id: types.StringValue("pid,iid,uid"), + UserId: types.StringValue("uid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.SetValueMust(types.StringType, []attr.Value{}), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "nil_response_2", + &postgresflex.UserResponse{}, + Model{}, + false, + }, + { + "no_resource_id", + &postgresflex.UserResponse{ + Item: &postgresflex.UserResponseUser{}, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + UserId: tt.expected.UserId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputRoles []string + expected *postgresflex.CreateUserPayload + isValid bool + }{ + { + "default_values", + &Model{}, + []string{}, + &postgresflex.CreateUserPayload{ + Roles: &[]string{}, + Username: nil, + }, + true, + }, + { + "default_values", + &Model{ + Username: types.StringValue("username"), + }, + []string{ + "role_1", + "role_2", + }, + &postgresflex.CreateUserPayload{ + Roles: &[]string{ + "role_1", + "role_2", + }, + Username: utils.Ptr("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Username: types.StringNull(), + }, + []string{ + "", + }, + &postgresflex.CreateUserPayload{ + Roles: &[]string{ + "", + }, + Username: nil, + }, + true, + }, + { + "nil_model", + nil, + []string{}, + nil, + false, + }, + { + "nil_roles", + &Model{}, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputRoles) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/postgresql/credentials/datasource.go b/stackit/services/postgresql/credentials/datasource.go new file mode 100644 index 00000000..dcd47d14 --- /dev/null +++ b/stackit/services/postgresql/credentials/datasource.go @@ -0,0 +1,178 @@ +package postgresql + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresql" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &credentialsDataSource{} +) + +// NewCredentialsDataSource is a helper function to simplify the provider implementation. +func NewCredentialsDataSource() datasource.DataSource { + return &credentialsDataSource{} +} + +// credentialsDataSource is the data source implementation. +type credentialsDataSource struct { + client *postgresql.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresql_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *postgresql.APIClient + var err error + if providerData.PostgreSQLCustomEndpoint != "" { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgreSQLCustomEndpoint), + ) + } else { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresql zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *credentialsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgreSQL credentials data source schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the PostgreSQL instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql credentials read") +} diff --git a/stackit/services/postgresql/credentials/resource.go b/stackit/services/postgresql/credentials/resource.go new file mode 100644 index 00000000..955ae5b0 --- /dev/null +++ b/stackit/services/postgresql/credentials/resource.go @@ -0,0 +1,371 @@ +package postgresql + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresql" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &credentialsResource{} + _ resource.ResourceWithConfigure = &credentialsResource{} + _ resource.ResourceWithImportState = &credentialsResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + CredentialsId types.String `tfsdk:"credentials_id"` + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Host types.String `tfsdk:"host"` + Hosts types.List `tfsdk:"hosts"` + HttpAPIURI types.String `tfsdk:"http_api_uri"` + Name types.String `tfsdk:"name"` + Password types.String `tfsdk:"password"` + Port types.Int64 `tfsdk:"port"` + Uri types.String `tfsdk:"uri"` + Username types.String `tfsdk:"username"` +} + +// NewCredentialsResource is a helper function to simplify the provider implementation. +func NewCredentialsResource() resource.Resource { + return &credentialsResource{} +} + +// credentialsResource is the resource implementation. +type credentialsResource struct { + client *postgresql.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialsResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresql_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *postgresql.APIClient + var err error + if providerData.PostgreSQLCustomEndpoint != "" { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgreSQLCustomEndpoint), + ) + } else { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresql zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *credentialsResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgreSQL credentials resource schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the PostgreSQL instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *credentialsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Create new recordset + credentialsResp, err := r.client.CreateCredentials(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Calling API: %v", err)) + return + } + if credentialsResp.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", "Got empty credentials id") + return + } + credentialsId := *credentialsResp.Id + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + wr, err := postgresql.CreateCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*postgresql.CredentialsResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql credentials created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialsResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql credentials read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *credentialsResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating credentials", "credentials can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *credentialsResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + // Delete existing record set + err := r.client.DeleteCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", err.Error()) + } + _, err = postgresql.DeleteCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "Postgresql credentials deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,credentials_id +func (r *credentialsResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[instance_id],[credentials_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("credentials_id"), idParts[2])...) + tflog.Info(ctx, "Postgresql credentials state imported") +} + +func mapFields(credentialsResp *postgresql.CredentialsResponse, model *Model) error { + if credentialsResp == nil { + return fmt.Errorf("response input is nil") + } + if credentialsResp.Raw == nil { + return fmt.Errorf("response credentials raw is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + credentials := credentialsResp.Raw.Credentials + + var credentialsId string + if model.CredentialsId.ValueString() != "" { + credentialsId = model.CredentialsId.ValueString() + } else if credentialsResp.Id != nil { + credentialsId = *credentialsResp.Id + } else { + return fmt.Errorf("credentials id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + credentialsId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.CredentialsId = types.StringValue(credentialsId) + model.Hosts = types.ListNull(types.StringType) + if credentials != nil { + if credentials.Hosts != nil { + var hosts []attr.Value + for _, host := range *credentials.Hosts { + hosts = append(hosts, types.StringValue(host)) + } + hostsList, diags := types.ListValue(types.StringType, hosts) + if diags.HasError() { + return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags)) + } + model.Hosts = hostsList + } + model.Host = types.StringPointerValue(credentials.Host) + model.HttpAPIURI = types.StringPointerValue(credentials.HttpApiUri) + model.Name = types.StringPointerValue(credentials.Name) + model.Password = types.StringPointerValue(credentials.Password) + model.Port = conversion.ToTypeInt64(credentials.Port) + model.Uri = types.StringPointerValue(credentials.Uri) + model.Username = types.StringPointerValue(credentials.Username) + } + return nil +} diff --git a/stackit/services/postgresql/credentials/resource_test.go b/stackit/services/postgresql/credentials/resource_test.go new file mode 100644 index 00000000..b9c9e9b9 --- /dev/null +++ b/stackit/services/postgresql/credentials/resource_test.go @@ -0,0 +1,156 @@ +package postgresql + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/postgresql" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *postgresql.CredentialsResponse + expected Model + isValid bool + }{ + { + "default_values", + &postgresql.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &postgresql.RawCredentials{}, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringNull(), + Hosts: types.ListNull(types.StringType), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringNull(), + Port: types.Int64Null(), + Uri: types.StringNull(), + Username: types.StringNull(), + }, + true, + }, + { + "simple_values", + &postgresql.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &postgresql.RawCredentials{ + Credentials: &postgresql.Credentials{ + Host: utils.Ptr("host"), + Hosts: &[]string{ + "host_1", + "", + }, + HttpApiUri: utils.Ptr("http"), + Name: utils.Ptr("name"), + Password: utils.Ptr("password"), + Port: utils.Ptr(int32(1234)), + Uri: utils.Ptr("uri"), + Username: utils.Ptr("username"), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue("host"), + Hosts: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("host_1"), + types.StringValue(""), + }), + HttpAPIURI: types.StringValue("http"), + Name: types.StringValue("name"), + Password: types.StringValue("password"), + Port: types.Int64Value(1234), + Uri: types.StringValue("uri"), + Username: types.StringValue("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &postgresql.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &postgresql.RawCredentials{ + Credentials: &postgresql.Credentials{ + Host: utils.Ptr(""), + Hosts: &[]string{}, + HttpApiUri: nil, + Name: nil, + Password: nil, + Port: utils.Ptr(int32(2123456789)), + Uri: nil, + Username: nil, + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue(""), + Hosts: types.ListValueMust(types.StringType, []attr.Value{}), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringNull(), + Port: types.Int64Value(2123456789), + Uri: types.StringNull(), + Username: types.StringNull(), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &postgresql.CredentialsResponse{}, + Model{}, + false, + }, + { + "nil_raw_credentials", + &postgresql.CredentialsResponse{ + Id: utils.Ptr("cid"), + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/postgresql/instance/datasource.go b/stackit/services/postgresql/instance/datasource.go new file mode 100644 index 00000000..bdcb66ad --- /dev/null +++ b/stackit/services/postgresql/instance/datasource.go @@ -0,0 +1,198 @@ +package postgresql + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresql" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *postgresql.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresql_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *postgresql.APIClient + var err error + if providerData.PostgreSQLCustomEndpoint != "" { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgreSQLCustomEndpoint), + ) + } else { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresql zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgreSQL instance data source schema.", + "id": "Terraform's internal resource identifier.", + "instance_id": "ID of the PostgreSQL instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Computed: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Computed: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enable_monitoring": schema.BoolAttribute{ + Computed: true, + }, + "metrics_frequency": schema.Int64Attribute{ + Computed: true, + }, + "metrics_prefix": schema.StringAttribute{ + Computed: true, + }, + "monitoring_instance_id": schema.StringAttribute{ + Computed: true, + }, + "plugins": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "sgw_acl": schema.StringAttribute{ + Computed: true, + }, + }, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read instance", err.Error()) + return + } + + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql instance read") +} diff --git a/stackit/services/postgresql/instance/resource.go b/stackit/services/postgresql/instance/resource.go new file mode 100644 index 00000000..e42aea6d --- /dev/null +++ b/stackit/services/postgresql/instance/resource.go @@ -0,0 +1,704 @@ +package postgresql + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/postgresql" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + CfGuid types.String `tfsdk:"cf_guid"` + CfSpaceGuid types.String `tfsdk:"cf_space_guid"` + DashboardUrl types.String `tfsdk:"dashboard_url"` + ImageUrl types.String `tfsdk:"image_url"` + Name types.String `tfsdk:"name"` + OrganizationGuid types.String `tfsdk:"organization_guid"` + Parameters types.Object `tfsdk:"parameters"` + Version types.String `tfsdk:"version"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` +} + +// Struct corresponding to DataSourceModel.Parameters +type parametersModel struct { + EnableMonitoring types.Bool `tfsdk:"enable_monitoring"` + MetricsFrequency types.Int64 `tfsdk:"metrics_frequency"` + MetricsPrefix types.String `tfsdk:"metrics_prefix"` + MonitoringInstanceId types.String `tfsdk:"monitoring_instance_id"` + Plugins types.List `tfsdk:"plugins"` + SgwAcl types.String `tfsdk:"sgw_acl"` +} + +// Types corresponding to parametersModel +var parametersTypes = map[string]attr.Type{ + "enable_monitoring": basetypes.BoolType{}, + "metrics_frequency": basetypes.Int64Type{}, + "metrics_prefix": basetypes.StringType{}, + "monitoring_instance_id": basetypes.StringType{}, + "plugins": basetypes.ListType{ElemType: types.StringType}, + "sgw_acl": basetypes.StringType{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *postgresql.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_postgresql_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *postgresql.APIClient + var err error + if providerData.PostgreSQLCustomEndpoint != "" { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.PostgreSQLCustomEndpoint), + ) + } else { + apiClient, err = postgresql.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Postgresql zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "PostgreSQL instance resource schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the PostgreSQL instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Required: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Required: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enable_monitoring": schema.BoolAttribute{ + Optional: true, + }, + "metrics_frequency": schema.Int64Attribute{ + Optional: true, + }, + "metrics_prefix": schema.StringAttribute{ + Optional: true, + }, + "monitoring_instance_id": schema.StringAttribute{ + Optional: true, + }, + "plugins": schema.ListAttribute{ + ElementType: types.StringType, + Optional: true, + }, + "sgw_acl": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + }, + Optional: true, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "image_url": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load PostgreSQL service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + var parametersPlugins *[]string + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + if !(parameters.Plugins.IsNull() || parameters.Plugins.IsUnknown()) { + var pp []types.String + var res []string + diags = parameters.Plugins.ElementsAs(ctx, &pp, false) + resp.Diagnostics.Append(diags...) + for _, v := range pp { + res = append(res, v.ValueString()) + } + parametersPlugins = &res + } + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, parameters, parametersPlugins) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := *createResp.InstanceId + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := postgresql.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*postgresql.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql instance created") +} + +func toCreatePayload(model *Model, parameters *parametersModel, parametersPlugins *[]string) (*postgresql.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + if parameters == nil { + return &postgresql.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + return &postgresql.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + Parameters: &postgresql.InstanceParameters{ + EnableMonitoring: parameters.EnableMonitoring.ValueBoolPointer(), + MetricsFrequency: conversion.ToPtrInt32(parameters.MetricsFrequency), + MetricsPrefix: parameters.MetricsPrefix.ValueStringPointer(), + MonitoringInstanceId: parameters.MonitoringInstanceId.ValueStringPointer(), + Plugins: parametersPlugins, + SgwAcl: parameters.SgwAcl.ValueStringPointer(), + }, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instances", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load PostgreSQL service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + var parametersPlugins *[]string + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + if !(parameters.Plugins.IsNull() || parameters.Plugins.IsUnknown()) { + var pp []types.String + var res []string + diags = parameters.Plugins.ElementsAs(ctx, &pp, false) + resp.Diagnostics.Append(diags...) + for _, v := range pp { + res = append(res, v.ValueString()) + } + parametersPlugins = &res + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, parameters, parametersPlugins) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + err = r.client.UpdateInstance(ctx, projectId, instanceId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := postgresql.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*postgresql.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Postgresql instance updated") +} + +func toUpdatePayload(model *Model, parameters *parametersModel, parametersPlugins *[]string) (*postgresql.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + if parameters == nil { + return &postgresql.UpdateInstancePayload{ + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + return &postgresql.UpdateInstancePayload{ + Parameters: &postgresql.InstanceParameters{ + EnableMonitoring: parameters.EnableMonitoring.ValueBoolPointer(), + MetricsFrequency: conversion.ToPtrInt32(parameters.MetricsFrequency), + MetricsPrefix: parameters.MetricsPrefix.ValueStringPointer(), + MonitoringInstanceId: parameters.MonitoringInstanceId.ValueStringPointer(), + Plugins: parametersPlugins, + SgwAcl: parameters.SgwAcl.ValueStringPointer(), + }, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error()) + return + } + _, err = postgresql.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "Postgresql instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "Postgresql instance state imported") +} + +func mapFields(instance *postgresql.Instance, model *Model) error { + if instance == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.InstanceId != nil { + instanceId = *instance.InstanceId + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanId = types.StringPointerValue(instance.PlanId) + model.CfGuid = types.StringPointerValue(instance.CfGuid) + model.CfSpaceGuid = types.StringPointerValue(instance.CfSpaceGuid) + model.DashboardUrl = types.StringPointerValue(instance.DashboardUrl) + model.ImageUrl = types.StringPointerValue(instance.ImageUrl) + model.Name = types.StringPointerValue(instance.Name) + model.OrganizationGuid = types.StringPointerValue(instance.OrganizationGuid) + + if instance.Parameters == nil { + model.Parameters = types.ObjectNull(parametersTypes) + } else { + parameters, err := mapParameters(*instance.Parameters) + if err != nil { + return fmt.Errorf("mapping parameters: %w", err) + } + model.Parameters = parameters + } + return nil +} + +func mapParameters(params map[string]interface{}) (types.Object, error) { + attributes := map[string]attr.Value{} + for attribute := range parametersTypes { + valueInterface, ok := params[attribute] + if !ok { + // All fields are optional, so this is ok + // Set the value as nil, will be handled accordingly + valueInterface = nil + } + + var value attr.Value + switch parametersTypes[attribute].(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found unexpected attribute type '%T'", parametersTypes[attribute]) + case basetypes.StringType: + if valueInterface == nil { + value = types.StringNull() + } else { + valueString, ok := valueInterface.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as string", attribute, valueInterface) + } + value = types.StringValue(valueString) + } + case basetypes.BoolType: + if valueInterface == nil { + value = types.BoolNull() + } else { + valueBool, ok := valueInterface.(bool) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as bool", attribute, valueInterface) + } + value = types.BoolValue(valueBool) + } + case basetypes.Int64Type: + if valueInterface == nil { + value = types.Int64Null() + } else { + // This may be int64, int32, int or float64 + // We try to assert all 4 + var valueInt64 int64 + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as int", attribute, valueInterface) + case int64: + valueInt64 = temp + case int32: + valueInt64 = int64(temp) + case int: + valueInt64 = int64(temp) + case float64: + valueInt64 = int64(temp) + } + value = types.Int64Value(valueInt64) + } + case basetypes.ListType: // Assumed to be a list of strings + if valueInterface == nil { + value = types.ListNull(types.StringType) + } else { + // This may be []string{} or []interface{} + // We try to assert all 2 + var valueList []attr.Value + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as array of interface", attribute, valueInterface) + case []string: + for _, x := range temp { + valueList = append(valueList, types.StringValue(x)) + } + case []interface{}: + for _, x := range temp { + xString, ok := x.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' with element '%s' of type %T, failed to assert as string", attribute, x, x) + } + valueList = append(valueList, types.StringValue(xString)) + } + } + temp2, diags := types.ListValue(types.StringType, valueList) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to map %s: %w", attribute, core.DiagsToError(diags)) + } + value = temp2 + } + } + attributes[attribute] = value + } + + output, diags := types.ObjectValue(parametersTypes, attributes) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to create object: %w", core.DiagsToError(diags)) + } + return output, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, diags *diag.Diagnostics, model *Model) { + projectId := model.ProjectId.ValueString() + res, err := r.client.GetOfferings(ctx, projectId).Execute() + if err != nil { + diags.AddError("Failed to list PostgreSQL offerings", err.Error()) + return + } + + version := model.Version.ValueString() + planName := model.PlanName.ValueString() + availableVersions := "" + availablePlanNames := "" + isValidVersion := false + for _, offer := range *res.Offerings { + if !strings.EqualFold(*offer.Version, version) { + availableVersions = fmt.Sprintf("%s\n- %s", availableVersions, *offer.Version) + continue + } + isValidVersion = true + + for _, plan := range *offer.Plans { + if plan.Name == nil { + continue + } + if strings.EqualFold(*plan.Name, planName) && plan.Id != nil { + model.PlanId = types.StringPointerValue(plan.Id) + return + } + availablePlanNames = fmt.Sprintf("%s\n- %s", availablePlanNames, *plan.Name) + } + } + + if !isValidVersion { + diags.AddError("Invalid version", fmt.Sprintf("Couldn't find version '%s', available versions are:%s", version, availableVersions)) + return + } + diags.AddError("Invalid plan_name", fmt.Sprintf("Couldn't find plan_name '%s' for version %s, available names are:%s", planName, version, availablePlanNames)) +} diff --git a/stackit/services/postgresql/instance/resource_test.go b/stackit/services/postgresql/instance/resource_test.go new file mode 100644 index 00000000..d3c9cc76 --- /dev/null +++ b/stackit/services/postgresql/instance/resource_test.go @@ -0,0 +1,435 @@ +package postgresql + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/postgresql" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *postgresql.Instance + expected Model + isValid bool + }{ + { + "default_values", + &postgresql.Instance{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringNull(), + Name: types.StringNull(), + CfGuid: types.StringNull(), + CfSpaceGuid: types.StringNull(), + DashboardUrl: types.StringNull(), + ImageUrl: types.StringNull(), + OrganizationGuid: types.StringNull(), + Parameters: types.ObjectNull(parametersTypes), + }, + true, + }, + { + "simple_values", + &postgresql.Instance{ + PlanId: utils.Ptr("plan"), + CfGuid: utils.Ptr("cf"), + CfSpaceGuid: utils.Ptr("space"), + DashboardUrl: utils.Ptr("dashboard"), + ImageUrl: utils.Ptr("image"), + InstanceId: utils.Ptr("iid"), + Name: utils.Ptr("name"), + OrganizationGuid: utils.Ptr("org"), + Parameters: &map[string]interface{}{ + "enable_monitoring": true, + "metrics_frequency": 1234, + "plugins": []string{ + "plugin_1", + "plugin_2", + "", + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringValue("plan"), + Name: types.StringValue("name"), + CfGuid: types.StringValue("cf"), + CfSpaceGuid: types.StringValue("space"), + DashboardUrl: types.StringValue("dashboard"), + ImageUrl: types.StringValue("image"), + OrganizationGuid: types.StringValue("org"), + Parameters: types.ObjectValueMust(parametersTypes, map[string]attr.Value{ + "enable_monitoring": types.BoolValue(true), + "metrics_frequency": types.Int64Value(1234), + "metrics_prefix": types.StringNull(), + "monitoring_instance_id": types.StringNull(), + "plugins": types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("plugin_1"), + types.StringValue("plugin_2"), + types.StringValue(""), + }), + "sgw_acl": types.StringNull(), + }), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &postgresql.Instance{}, + Model{}, + false, + }, + { + "wrong_param_types_1", + &postgresql.Instance{ + Parameters: &map[string]interface{}{ + "enable_monitoring": "true", + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_2", + &postgresql.Instance{ + Parameters: &map[string]interface{}{ + "metrics_frequency": true, + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_3", + &postgresql.Instance{ + Parameters: &map[string]interface{}{ + "metrics_frequency": 12.34, + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_4", + &postgresql.Instance{ + Parameters: &map[string]interface{}{ + "plugins": "foo", + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_5", + &postgresql.Instance{ + Parameters: &map[string]interface{}{ + "plugins": []bool{ + true, + }, + }, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + inputParametersPlugins *[]string + expected *postgresql.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &[]string{}, + &postgresql.CreateInstancePayload{ + Parameters: &postgresql.InstanceParameters{ + Plugins: &[]string{}, + }, + }, + true, + }, + { + "nil_values", + &Model{}, + ¶metersModel{}, + nil, + &postgresql.CreateInstancePayload{ + Parameters: &postgresql.InstanceParameters{ + Plugins: nil, + }, + }, + true, + }, + { + "simple_values", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + EnableMonitoring: types.BoolValue(true), + MetricsFrequency: types.Int64Value(123), + MetricsPrefix: types.StringValue("prefix"), + MonitoringInstanceId: types.StringValue("monitoring"), + SgwAcl: types.StringValue("sgw"), + }, + &[]string{ + "plugin_1", + "plugin_2", + }, + &postgresql.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + Parameters: &postgresql.InstanceParameters{ + EnableMonitoring: utils.Ptr(true), + MetricsFrequency: utils.Ptr(int32(123)), + MetricsPrefix: utils.Ptr("prefix"), + MonitoringInstanceId: utils.Ptr("monitoring"), + Plugins: &[]string{ + "plugin_1", + "plugin_2", + }, + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Name: types.StringValue(""), + PlanId: types.StringValue(""), + }, + ¶metersModel{ + EnableMonitoring: types.BoolNull(), + MetricsFrequency: types.Int64Value(2123456789), + MetricsPrefix: types.StringNull(), + MonitoringInstanceId: types.StringNull(), + SgwAcl: types.StringNull(), + }, + &[]string{ + "", + }, + &postgresql.CreateInstancePayload{ + InstanceName: utils.Ptr(""), + Parameters: &postgresql.InstanceParameters{ + EnableMonitoring: nil, + MetricsFrequency: utils.Ptr(int32(2123456789)), + MetricsPrefix: nil, + MonitoringInstanceId: nil, + Plugins: &[]string{ + "", + }, + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + &[]string{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + nil, + nil, + &postgresql.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputParameters, tt.inputParametersPlugins) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + inputParametersPlugins *[]string + expected *postgresql.UpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &[]string{}, + &postgresql.UpdateInstancePayload{ + Parameters: &postgresql.InstanceParameters{ + Plugins: &[]string{}, + }, + }, + true, + }, + { + "simple_values", + &Model{ + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + EnableMonitoring: types.BoolValue(true), + MetricsFrequency: types.Int64Value(123), + MetricsPrefix: types.StringValue("prefix"), + MonitoringInstanceId: types.StringValue("monitoring"), + SgwAcl: types.StringValue("sgw"), + }, + &[]string{ + "plugin_1", + "plugin_2", + }, + &postgresql.UpdateInstancePayload{ + Parameters: &postgresql.InstanceParameters{ + EnableMonitoring: utils.Ptr(true), + MetricsFrequency: utils.Ptr(int32(123)), + MetricsPrefix: utils.Ptr("prefix"), + MonitoringInstanceId: utils.Ptr("monitoring"), + Plugins: &[]string{ + "plugin_1", + "plugin_2", + }, + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + PlanId: types.StringValue(""), + }, + ¶metersModel{ + EnableMonitoring: types.BoolNull(), + MetricsFrequency: types.Int64Value(2123456789), + MetricsPrefix: types.StringNull(), + MonitoringInstanceId: types.StringNull(), + SgwAcl: types.StringNull(), + }, + &[]string{ + "", + }, + &postgresql.UpdateInstancePayload{ + Parameters: &postgresql.InstanceParameters{ + EnableMonitoring: nil, + MetricsFrequency: utils.Ptr(int32(2123456789)), + MetricsPrefix: nil, + MonitoringInstanceId: nil, + Plugins: &[]string{ + "", + }, + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + &[]string{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + PlanId: types.StringValue("plan"), + }, + nil, + nil, + &postgresql.UpdateInstancePayload{ + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputParameters, tt.inputParametersPlugins) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/postgresql/postgresql_acc_test.go b/stackit/services/postgresql/postgresql_acc_test.go new file mode 100644 index 00000000..360d1155 --- /dev/null +++ b/stackit/services/postgresql/postgresql_acc_test.go @@ -0,0 +1,252 @@ +package postgresql_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/postgresql" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("postgresql"), + "plan_id": "57d40175-0f4c-4bcc-b52d-cf5d2ee9f5a7", + "sgw_acl": "192.168.0.0/16", + "metrics_frequency": "34", + "plugins": "foo-bar", +} + +func resourceConfig(acls, frequency, plugins string) string { + return fmt.Sprintf(` + %s + + resource "stackit_postgresql_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + sgw_acl = "%s" + plugins = ["%s"] + # metrics_frequency = %s + # metrics_prefix = "pre" + # enable_monitoring = true + # monitoring_instance_id = "b9e38481-4f3d-4a28-8ed0-43fd32c024c7" + } + } + + resource "stackit_postgresql_credentials" "credentials" { + project_id = stackit_postgresql_instance.instance.project_id + instance_id = stackit_postgresql_instance.instance.instance_id + } + `, + testutil.PostgreSQLProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + acls, + plugins, + frequency, + ) +} +func TestAccPostgreSQLResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckPostgreSQLDestroy, + Steps: []resource.TestStep{ + + // Creation + { + Config: resourceConfig(instanceResource["sgw_acl"], instanceResource["metrics_frequency"], instanceResource["plugins"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_postgresql_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl"]), + + // Credentials data + resource.TestCheckResourceAttrPair( + "stackit_postgresql_credentials.credentials", "project_id", + "stackit_postgresql_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_postgresql_credentials.credentials", "instance_id", + "stackit_postgresql_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_postgresql_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("stackit_postgresql_credentials.credentials", "host"), + ), + }, + { // Data source + Config: fmt.Sprintf(` + %s + + data "stackit_postgresql_instance" "instance" { + project_id = stackit_postgresql_instance.instance.project_id + instance_id = stackit_postgresql_instance.instance.instance_id + } + + data "stackit_postgresql_credentials" "credentials" { + project_id = stackit_postgresql_credentials.credentials.project_id + instance_id = stackit_postgresql_credentials.credentials.instance_id + credentials_id = stackit_postgresql_credentials.credentials.credentials_id + }`, + resourceConfig(instanceResource["sgw_acl"], instanceResource["metrics_frequency"], instanceResource["plugins"]), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_postgresql_instance.instance", "project_id", instanceResource["project_id"]), + + resource.TestCheckResourceAttrPair("stackit_postgresql_instance.instance", "instance_id", + "data.stackit_postgresql_instance.instance", "instance_id"), + + resource.TestCheckResourceAttrPair("stackit_postgresql_credentials.credentials", "credentials_id", + "data.stackit_postgresql_credentials.credentials", "credentials_id"), + + resource.TestCheckResourceAttr("data.stackit_postgresql_instance.instance", "plan_id", instanceResource["plan_id"]), + + resource.TestCheckResourceAttr("data.stackit_postgresql_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("data.stackit_postgresql_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl"]), + resource.TestCheckResourceAttr("data.stackit_postgresql_instance.instance", "parameters.plugins.#", "1"), + resource.TestCheckResourceAttr("data.stackit_postgresql_instance.instance", "parameters.plugins.0", instanceResource["plugins"]), + + // Credentials data + resource.TestCheckResourceAttr("data.stackit_postgresql_credentials.credentials", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_postgresql_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("data.stackit_postgresql_credentials.credentials", "host"), + resource.TestCheckResourceAttrSet("data.stackit_postgresql_credentials.credentials", "port"), + resource.TestCheckResourceAttrSet("data.stackit_postgresql_credentials.credentials", "uri"), + ), + }, + // Import + { + ResourceName: "stackit_postgresql_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_postgresql_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_postgresql_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_postgresql_credentials.credentials", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_postgresql_credentials.credentials"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_postgresql_credentials.credentials") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + credentialsId, ok := r.Primary.Attributes["credentials_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute credentials_id") + } + + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, credentialsId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfig(instanceResource["sgw_acl"], fmt.Sprintf("%s0", instanceResource["metrics_frequency"]), fmt.Sprintf("%s-baz", instanceResource["plugins"])), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_postgresql_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl"]), + resource.TestCheckResourceAttr("stackit_postgresql_instance.instance", "parameters.plugins.0", fmt.Sprintf("%s-baz", instanceResource["plugins"])), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckPostgreSQLDestroy(s *terraform.State) error { + ctx := context.Background() + var client *postgresql.APIClient + var err error + if testutil.PostgreSQLCustomEndpoint == "" { + client, err = postgresql.NewAPIClient() + } else { + client, err = postgresql.NewAPIClient( + config.WithEndpoint(testutil.PostgreSQLCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_postgresql_instance" { + continue + } + // instance terraform ID: "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if instances[i].InstanceId == nil { + continue + } + if utils.Contains(instancesToDestroy, *instances[i].InstanceId) { + if !checkInstanceDeleteSuccess(&instances[i]) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].InstanceId) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].InstanceId, err) + } + _, err = postgresql.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].InstanceId).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].InstanceId, err) + } + } + } + } + return nil +} + +func checkInstanceDeleteSuccess(i *postgresql.Instance) bool { + if *i.LastOperation.Type != postgresql.InstanceTypeDelete { + return false + } + + if *i.LastOperation.Type == postgresql.InstanceTypeDelete { + if *i.LastOperation.State != postgresql.InstanceStateSuccess { + return false + } else if strings.Contains(*i.LastOperation.Description, "DeleteFailed") || strings.Contains(*i.LastOperation.Description, "failed") { + return false + } + } + return true +} diff --git a/stackit/services/rabbitmq/credentials/datasource.go b/stackit/services/rabbitmq/credentials/datasource.go new file mode 100644 index 00000000..b62685f8 --- /dev/null +++ b/stackit/services/rabbitmq/credentials/datasource.go @@ -0,0 +1,178 @@ +package rabbitmq + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/rabbitmq" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &credentialsDataSource{} +) + +// NewCredentialsDataSource is a helper function to simplify the provider implementation. +func NewCredentialsDataSource() datasource.DataSource { + return &credentialsDataSource{} +} + +// credentialsDataSource is the data source implementation. +type credentialsDataSource struct { + client *rabbitmq.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_rabbitmq_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *rabbitmq.APIClient + var err error + if providerData.RabbitMQCustomEndpoint != "" { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RabbitMQCustomEndpoint), + ) + } else { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "RabbitMQ zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *credentialsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "RabbitMQ credentials data source schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the RabbitMQ instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "RabbitMQ credentials read") +} diff --git a/stackit/services/rabbitmq/credentials/resource.go b/stackit/services/rabbitmq/credentials/resource.go new file mode 100644 index 00000000..27af5c96 --- /dev/null +++ b/stackit/services/rabbitmq/credentials/resource.go @@ -0,0 +1,371 @@ +package rabbitmq + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/rabbitmq" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &rabbitMQCredentialsResource{} + _ resource.ResourceWithConfigure = &rabbitMQCredentialsResource{} + _ resource.ResourceWithImportState = &rabbitMQCredentialsResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + CredentialsId types.String `tfsdk:"credentials_id"` + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Host types.String `tfsdk:"host"` + Hosts types.List `tfsdk:"hosts"` + HttpAPIURI types.String `tfsdk:"http_api_uri"` + Name types.String `tfsdk:"name"` + Password types.String `tfsdk:"password"` + Port types.Int64 `tfsdk:"port"` + Uri types.String `tfsdk:"uri"` + Username types.String `tfsdk:"username"` +} + +// NewCredentialsResource is a helper function to simplify the provider implementation. +func NewCredentialsResource() resource.Resource { + return &rabbitMQCredentialsResource{} +} + +// credentialsResource is the resource implementation. +type rabbitMQCredentialsResource struct { + client *rabbitmq.APIClient +} + +// Metadata returns the resource type name. +func (r *rabbitMQCredentialsResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_rabbitmq_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *rabbitMQCredentialsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *rabbitmq.APIClient + var err error + if providerData.RabbitMQCustomEndpoint != "" { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RabbitMQCustomEndpoint), + ) + } else { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "RabbitMQ zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *rabbitMQCredentialsResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "RabbitMQ credentials resource schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the RabbitMQ instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *rabbitMQCredentialsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Create new recordset + credentialsResp, err := r.client.CreateCredentials(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Calling API: %v", err)) + return + } + if credentialsResp.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", "Got empty credentials id") + return + } + credentialsId := *credentialsResp.Id + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + wr, err := rabbitmq.CreateCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*rabbitmq.CredentialsResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "RabbitMQ credentials created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *rabbitMQCredentialsResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "RabbitMQ credentials read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *rabbitMQCredentialsResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating credentials", "credentials can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *rabbitMQCredentialsResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + // Delete existing record set + err := r.client.DeleteCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", err.Error()) + } + _, err = rabbitmq.DeleteCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "RabbitMQ credentials deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,credentials_id +func (r *rabbitMQCredentialsResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[instance_id],[credentials_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("credentials_id"), idParts[2])...) + tflog.Info(ctx, "RabbitMQ credentials state imported") +} + +func mapFields(credentialsResp *rabbitmq.CredentialsResponse, model *Model) error { + if credentialsResp == nil { + return fmt.Errorf("response input is nil") + } + if credentialsResp.Raw == nil { + return fmt.Errorf("response credentials raw is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + credentials := credentialsResp.Raw.Credentials + + var credentialsId string + if model.CredentialsId.ValueString() != "" { + credentialsId = model.CredentialsId.ValueString() + } else if credentialsResp.Id != nil { + credentialsId = *credentialsResp.Id + } else { + return fmt.Errorf("credentials id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + credentialsId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.CredentialsId = types.StringValue(credentialsId) + model.Hosts = types.ListNull(types.StringType) + if credentials != nil { + if credentials.Hosts != nil { + var hosts []attr.Value + for _, host := range *credentials.Hosts { + hosts = append(hosts, types.StringValue(host)) + } + hostsList, diags := types.ListValue(types.StringType, hosts) + if diags.HasError() { + return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags)) + } + model.Hosts = hostsList + } + model.Host = types.StringPointerValue(credentials.Host) + model.HttpAPIURI = types.StringPointerValue(credentials.HttpApiUri) + model.Name = types.StringPointerValue(credentials.Name) + model.Password = types.StringPointerValue(credentials.Password) + model.Port = conversion.ToTypeInt64(credentials.Port) + model.Uri = types.StringPointerValue(credentials.Uri) + model.Username = types.StringPointerValue(credentials.Username) + } + return nil +} diff --git a/stackit/services/rabbitmq/credentials/resource_test.go b/stackit/services/rabbitmq/credentials/resource_test.go new file mode 100644 index 00000000..02dac499 --- /dev/null +++ b/stackit/services/rabbitmq/credentials/resource_test.go @@ -0,0 +1,156 @@ +package rabbitmq + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/rabbitmq" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *rabbitmq.CredentialsResponse + expected Model + isValid bool + }{ + { + "default_values", + &rabbitmq.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &rabbitmq.RawCredentials{}, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringNull(), + Hosts: types.ListNull(types.StringType), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringNull(), + Port: types.Int64Null(), + Uri: types.StringNull(), + Username: types.StringNull(), + }, + true, + }, + { + "simple_values", + &rabbitmq.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &rabbitmq.RawCredentials{ + Credentials: &rabbitmq.Credentials{ + Host: utils.Ptr("host"), + Hosts: &[]string{ + "host_1", + "", + }, + HttpApiUri: utils.Ptr("http"), + Name: utils.Ptr("name"), + Password: utils.Ptr("password"), + Port: utils.Ptr(int32(1234)), + Uri: utils.Ptr("uri"), + Username: utils.Ptr("username"), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue("host"), + Hosts: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("host_1"), + types.StringValue(""), + }), + HttpAPIURI: types.StringValue("http"), + Name: types.StringValue("name"), + Password: types.StringValue("password"), + Port: types.Int64Value(1234), + Uri: types.StringValue("uri"), + Username: types.StringValue("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &rabbitmq.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &rabbitmq.RawCredentials{ + Credentials: &rabbitmq.Credentials{ + Host: utils.Ptr(""), + Hosts: &[]string{}, + HttpApiUri: nil, + Name: nil, + Password: utils.Ptr(""), + Port: utils.Ptr(int32(2123456789)), + Uri: nil, + Username: utils.Ptr(""), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue(""), + Hosts: types.ListValueMust(types.StringType, []attr.Value{}), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringValue(""), + Port: types.Int64Value(2123456789), + Uri: types.StringNull(), + Username: types.StringValue(""), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &rabbitmq.CredentialsResponse{}, + Model{}, + false, + }, + { + "nil_raw_credentials", + &rabbitmq.CredentialsResponse{ + Id: utils.Ptr("cid"), + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + model := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, model) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(model, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/rabbitmq/instance/datasource.go b/stackit/services/rabbitmq/instance/datasource.go new file mode 100644 index 00000000..01238fff --- /dev/null +++ b/stackit/services/rabbitmq/instance/datasource.go @@ -0,0 +1,181 @@ +package rabbitmq + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/rabbitmq" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *rabbitmq.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_rabbitmq_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *rabbitmq.APIClient + var err error + if providerData.RabbitMQCustomEndpoint != "" { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RabbitMQCustomEndpoint), + ) + } else { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "RabbitMQ zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "RabbitMQ instance data source schema.", + "id": "Terraform's internal resource identifier.", + "instance_id": "ID of the RabbitMQ instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Computed: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Computed: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Computed: true, + }, + }, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read instance", err.Error()) + return + } + + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "RabbitMQ instance read") +} diff --git a/stackit/services/rabbitmq/instance/resource.go b/stackit/services/rabbitmq/instance/resource.go new file mode 100644 index 00000000..f98b05a9 --- /dev/null +++ b/stackit/services/rabbitmq/instance/resource.go @@ -0,0 +1,637 @@ +package rabbitmq + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/rabbitmq" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + CfGuid types.String `tfsdk:"cf_guid"` + CfSpaceGuid types.String `tfsdk:"cf_space_guid"` + DashboardUrl types.String `tfsdk:"dashboard_url"` + ImageUrl types.String `tfsdk:"image_url"` + Name types.String `tfsdk:"name"` + OrganizationGuid types.String `tfsdk:"organization_guid"` + Parameters types.Object `tfsdk:"parameters"` + Version types.String `tfsdk:"version"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` +} + +// Struct corresponding to DataSourceModel.Parameters +type parametersModel struct { + SgwAcl types.String `tfsdk:"sgw_acl"` +} + +// Types corresponding to parametersModel +var parametersTypes = map[string]attr.Type{ + "sgw_acl": basetypes.StringType{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *rabbitmq.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_rabbitmq_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *rabbitmq.APIClient + var err error + if providerData.RabbitMQCustomEndpoint != "" { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RabbitMQCustomEndpoint), + ) + } else { + apiClient, err = rabbitmq.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "rabbitmq zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "RabbitMQ instance resource schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the RabbitMQ instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Required: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Required: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load RabbitMQ service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := *createResp.InstanceId + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := rabbitmq.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*rabbitmq.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "rabbitmq instance created") +} + +func toCreatePayload(model *Model, parameters *parametersModel) (*rabbitmq.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if parameters == nil { + return &rabbitmq.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + payloadParams := &rabbitmq.InstanceParameters{} + if parameters.SgwAcl.ValueString() != "" { + payloadParams.SgwAcl = parameters.SgwAcl.ValueStringPointer() + } + return &rabbitmq.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + Parameters: payloadParams, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instances", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "rabbitmq instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load RabbitMQ service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + err = r.client.UpdateInstance(ctx, projectId, instanceId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := rabbitmq.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*rabbitmq.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "rabbitmq instance updated") +} + +func toUpdatePayload(model *Model, parameters *parametersModel) (*rabbitmq.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + if parameters == nil { + return &rabbitmq.UpdateInstancePayload{ + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + return &rabbitmq.UpdateInstancePayload{ + Parameters: &rabbitmq.InstanceParameters{ + SgwAcl: parameters.SgwAcl.ValueStringPointer(), + }, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error()) + return + } + _, err = rabbitmq.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "rabbitmq instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "RabbitMQ instance state imported") +} + +func mapFields(instance *rabbitmq.Instance, model *Model) error { + if instance == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.InstanceId != nil { + instanceId = *instance.InstanceId + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanId = types.StringPointerValue(instance.PlanId) + model.CfGuid = types.StringPointerValue(instance.CfGuid) + model.CfSpaceGuid = types.StringPointerValue(instance.CfSpaceGuid) + model.DashboardUrl = types.StringPointerValue(instance.DashboardUrl) + model.ImageUrl = types.StringPointerValue(instance.ImageUrl) + model.Name = types.StringPointerValue(instance.Name) + model.OrganizationGuid = types.StringPointerValue(instance.OrganizationGuid) + + if instance.Parameters == nil { + model.Parameters = types.ObjectNull(parametersTypes) + } else { + parameters, err := mapParameters(*instance.Parameters) + if err != nil { + return fmt.Errorf("mapping parameters: %w", err) + } + model.Parameters = parameters + } + return nil +} + +func mapParameters(params map[string]interface{}) (types.Object, error) { + attributes := map[string]attr.Value{} + for attribute := range parametersTypes { + valueInterface, ok := params[attribute] + if !ok { + // All fields are optional, so this is ok + // Set the value as nil, will be handled accordingly + valueInterface = nil + } + + var value attr.Value + switch parametersTypes[attribute].(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found unexpected attribute type '%T'", parametersTypes[attribute]) + case basetypes.StringType: + if valueInterface == nil { + value = types.StringNull() + } else { + valueString, ok := valueInterface.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as string", attribute, valueInterface) + } + value = types.StringValue(valueString) + } + case basetypes.BoolType: + if valueInterface == nil { + value = types.BoolNull() + } else { + valueBool, ok := valueInterface.(bool) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as bool", attribute, valueInterface) + } + value = types.BoolValue(valueBool) + } + case basetypes.Int64Type: + if valueInterface == nil { + value = types.Int64Null() + } else { + // This may be int64, int32, int or float64 + // We try to assert all 4 + var valueInt64 int64 + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as int", attribute, valueInterface) + case int64: + valueInt64 = temp + case int32: + valueInt64 = int64(temp) + case int: + valueInt64 = int64(temp) + case float64: + valueInt64 = int64(temp) + } + value = types.Int64Value(valueInt64) + } + case basetypes.ListType: // Assumed to be a list of strings + if valueInterface == nil { + value = types.ListNull(types.StringType) + } else { + // This may be []string{} or []interface{} + // We try to assert all 2 + var valueList []attr.Value + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as array of interface", attribute, valueInterface) + case []string: + for _, x := range temp { + valueList = append(valueList, types.StringValue(x)) + } + case []interface{}: + for _, x := range temp { + xString, ok := x.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' with element '%s' of type %T, failed to assert as string", attribute, x, x) + } + valueList = append(valueList, types.StringValue(xString)) + } + } + temp2, diags := types.ListValue(types.StringType, valueList) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to map %s: %w", attribute, core.DiagsToError(diags)) + } + value = temp2 + } + } + attributes[attribute] = value + } + + output, diags := types.ObjectValue(parametersTypes, attributes) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to create object: %w", core.DiagsToError(diags)) + } + return output, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, diags *diag.Diagnostics, model *Model) { + projectId := model.ProjectId.ValueString() + res, err := r.client.GetOfferings(ctx, projectId).Execute() + if err != nil { + diags.AddError("Failed to list RabbitMQ offerings", err.Error()) + return + } + + version := model.Version.ValueString() + planName := model.PlanName.ValueString() + availableVersions := "" + availablePlanNames := "" + isValidVersion := false + for _, offer := range *res.Offerings { + if !strings.EqualFold(*offer.Version, version) { + availableVersions = fmt.Sprintf("%s\n- %s", availableVersions, *offer.Version) + continue + } + isValidVersion = true + + for _, plan := range *offer.Plans { + if plan.Name == nil { + continue + } + if strings.EqualFold(*plan.Name, planName) && plan.Id != nil { + model.PlanId = types.StringPointerValue(plan.Id) + return + } + availablePlanNames = fmt.Sprintf("%s\n- %s", availablePlanNames, *plan.Name) + } + } + + if !isValidVersion { + diags.AddError("Invalid version", fmt.Sprintf("Couldn't find version '%s', available versions are:%s", version, availableVersions)) + return + } + diags.AddError("Invalid plan_name", fmt.Sprintf("Couldn't find plan_name '%s' for version %s, available names are:%s", planName, version, availablePlanNames)) +} diff --git a/stackit/services/rabbitmq/instance/resource_test.go b/stackit/services/rabbitmq/instance/resource_test.go new file mode 100644 index 00000000..02bded5b --- /dev/null +++ b/stackit/services/rabbitmq/instance/resource_test.go @@ -0,0 +1,304 @@ +package rabbitmq + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/rabbitmq" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *rabbitmq.Instance + expected Model + isValid bool + }{ + { + "default_values", + &rabbitmq.Instance{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringNull(), + Name: types.StringNull(), + CfGuid: types.StringNull(), + CfSpaceGuid: types.StringNull(), + DashboardUrl: types.StringNull(), + ImageUrl: types.StringNull(), + OrganizationGuid: types.StringNull(), + Parameters: types.ObjectNull(parametersTypes), + }, + true, + }, + { + "simple_values", + &rabbitmq.Instance{ + PlanId: utils.Ptr("plan"), + CfGuid: utils.Ptr("cf"), + CfSpaceGuid: utils.Ptr("space"), + DashboardUrl: utils.Ptr("dashboard"), + ImageUrl: utils.Ptr("image"), + InstanceId: utils.Ptr("iid"), + Name: utils.Ptr("name"), + OrganizationGuid: utils.Ptr("org"), + Parameters: &map[string]interface{}{ + "sgw_acl": "acl", + }, + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringValue("plan"), + Name: types.StringValue("name"), + CfGuid: types.StringValue("cf"), + CfSpaceGuid: types.StringValue("space"), + DashboardUrl: types.StringValue("dashboard"), + ImageUrl: types.StringValue("image"), + OrganizationGuid: types.StringValue("org"), + Parameters: types.ObjectValueMust(parametersTypes, map[string]attr.Value{ + "sgw_acl": types.StringValue("acl"), + }), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &rabbitmq.Instance{}, + Model{}, + false, + }, + { + "wrong_param_types_1", + &rabbitmq.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": true, + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_2", + &rabbitmq.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": 1, + }, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *rabbitmq.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &rabbitmq.CreateInstancePayload{ + Parameters: &rabbitmq.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &rabbitmq.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + Parameters: &rabbitmq.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Name: types.StringValue(""), + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &rabbitmq.CreateInstancePayload{ + InstanceName: utils.Ptr(""), + Parameters: &rabbitmq.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + nil, + &rabbitmq.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *rabbitmq.UpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &rabbitmq.UpdateInstancePayload{ + Parameters: &rabbitmq.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &rabbitmq.UpdateInstancePayload{ + Parameters: &rabbitmq.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &rabbitmq.UpdateInstancePayload{ + Parameters: &rabbitmq.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + PlanId: types.StringValue("plan"), + }, + nil, + &rabbitmq.UpdateInstancePayload{ + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/rabbitmq/rabbitmq_acc_test.go b/stackit/services/rabbitmq/rabbitmq_acc_test.go new file mode 100644 index 00000000..6659d5b7 --- /dev/null +++ b/stackit/services/rabbitmq/rabbitmq_acc_test.go @@ -0,0 +1,286 @@ +package rabbitmq_test + +import ( + "context" + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/rabbitmq" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("rabbitmq"), + "plan_id": "7e1f8394-5dd5-40b1-8608-16b4344eb51b", + "sgw_acl_invalid": "1.2.3.4/4", + "sgw_acl_valid": "1.2.3.4/31", +} + +func resourceConfig(acls *string) string { + aclsLine := "" + if acls != nil { + aclsLine = fmt.Sprintf(`sgw_acl = %q`, *acls) + } + return fmt.Sprintf(` + %s + + resource "stackit_rabbitmq_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + %s + metrics_frequency = "%s" + } + } + + %s + `, + testutil.RabbitMQProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + aclsLine, + instanceResource["metrics_frequency"], + resourceConfigCredentials(), + ) +} + +func resourceConfigWithUpdate() string { + return fmt.Sprintf(` + %s + + resource "stackit_rabbitmq_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + sgw_acl = "%s" + } + } + + %s + `, + testutil.RabbitMQProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + instanceResource["sgw_acl_valid"], + resourceConfigCredentials(), + ) +} + +func resourceConfigCredentials() string { + return ` + resource "stackit_rabbitmq_credentials" "credentials" { + project_id = stackit_rabbitmq_instance.instance.project_id + instance_id = stackit_rabbitmq_instance.instance.instance_id + } + ` +} + +func TestAccRabbitMQResource(t *testing.T) { + acls := instanceResource["sgw_acl_invalid"] + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckRabbitMQDestroy, + Steps: []resource.TestStep{ + // Creation fail + { + Config: resourceConfig(&acls), + ExpectError: regexp.MustCompile(`.*sgw_acl is invalid.*`), + }, + // Creation + { + Config: resourceConfig(nil), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_rabbitmq_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_rabbitmq_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_rabbitmq_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_rabbitmq_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrSet("stackit_rabbitmq_instance.instance", "parameters.sgw_acl"), + + // Credentials data + resource.TestCheckResourceAttrPair( + "stackit_rabbitmq_credentials.credentials", "project_id", + "stackit_rabbitmq_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_rabbitmq_credentials.credentials", "instance_id", + "stackit_rabbitmq_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_rabbitmq_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("stackit_rabbitmq_credentials.credentials", "host"), + ), + }, + // data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_rabbitmq_instance" "instance" { + project_id = stackit_rabbitmq_instance.instance.project_id + instance_id = stackit_rabbitmq_instance.instance.instance_id + } + + data "stackit_rabbitmq_credentials" "credentials" { + project_id = stackit_rabbitmq_credentials.credentials.project_id + instance_id = stackit_rabbitmq_credentials.credentials.instance_id + credentials_id = stackit_rabbitmq_credentials.credentials.credentials_id + }`, + resourceConfig(nil), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_rabbitmq_instance.instance", "project_id", instanceResource["project_id"]), + + resource.TestCheckResourceAttrPair("stackit_rabbitmq_instance.instance", "instance_id", + "data.stackit_rabbitmq_credentials.credentials", "instance_id"), + + resource.TestCheckResourceAttrPair("data.stackit_rabbitmq_instance.instance", "instance_id", + "data.stackit_rabbitmq_credentials.credentials", "instance_id"), + + resource.TestCheckResourceAttr("data.stackit_rabbitmq_instance.instance", "plan_id", instanceResource["plan_id"]), + + resource.TestCheckResourceAttr("data.stackit_rabbitmq_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrSet("data.stackit_rabbitmq_instance.instance", "parameters.sgw_acl"), + + // Credentials data + resource.TestCheckResourceAttr("data.stackit_rabbitmq_credentials.credentials", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_rabbitmq_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("data.stackit_rabbitmq_credentials.credentials", "host"), + resource.TestCheckResourceAttrSet("data.stackit_rabbitmq_credentials.credentials", "port"), + resource.TestCheckResourceAttrSet("data.stackit_rabbitmq_credentials.credentials", "uri"), + ), + }, + // Import + { + ResourceName: "stackit_rabbitmq_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_rabbitmq_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_rabbitmq_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_rabbitmq_credentials.credentials", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_rabbitmq_credentials.credentials"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_rabbitmq_credentials.credentials") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + credentialsId, ok := r.Primary.Attributes["credentials_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute credentials_id") + } + + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, credentialsId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfigWithUpdate(), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_rabbitmq_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_rabbitmq_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_rabbitmq_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_rabbitmq_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_rabbitmq_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl_valid"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckRabbitMQDestroy(s *terraform.State) error { + ctx := context.Background() + var client *rabbitmq.APIClient + var err error + if testutil.RabbitMQCustomEndpoint == "" { + client, err = rabbitmq.NewAPIClient() + } else { + client, err = rabbitmq.NewAPIClient( + config.WithEndpoint(testutil.RabbitMQCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_rabbitmq_instance" { + continue + } + // instance terraform ID: "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if instances[i].InstanceId == nil { + continue + } + if utils.Contains(instancesToDestroy, *instances[i].InstanceId) { + if !checkInstanceDeleteSuccess(&instances[i]) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].InstanceId) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].InstanceId, err) + } + _, err = rabbitmq.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].InstanceId).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].InstanceId, err) + } + } + } + } + return nil +} + +func checkInstanceDeleteSuccess(i *rabbitmq.Instance) bool { + if *i.LastOperation.Type != rabbitmq.InstanceTypeDelete { + return false + } + + if *i.LastOperation.Type == rabbitmq.InstanceTypeDelete { + if *i.LastOperation.State != rabbitmq.InstanceStateSuccess { + return false + } else if strings.Contains(*i.LastOperation.Description, "DeleteFailed") || strings.Contains(*i.LastOperation.Description, "failed") { + return false + } + } + return true +} diff --git a/stackit/services/redis/credentials/datasource.go b/stackit/services/redis/credentials/datasource.go new file mode 100644 index 00000000..38853a7e --- /dev/null +++ b/stackit/services/redis/credentials/datasource.go @@ -0,0 +1,178 @@ +package redis + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/redis" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &credentialsDataSource{} +) + +// NewCredentialsDataSource is a helper function to simplify the provider implementation. +func NewCredentialsDataSource() datasource.DataSource { + return &credentialsDataSource{} +} + +// credentialsDataSource is the data source implementation. +type credentialsDataSource struct { + client *redis.APIClient +} + +// Metadata returns the resource type name. +func (r *credentialsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_redis_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *credentialsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *redis.APIClient + var err error + if providerData.RedisCustomEndpoint != "" { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RedisCustomEndpoint), + ) + } else { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Redis zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *credentialsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Redis credentials data source schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the Redis instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *credentialsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Redis credentials read") +} diff --git a/stackit/services/redis/credentials/resource.go b/stackit/services/redis/credentials/resource.go new file mode 100644 index 00000000..95ea13ad --- /dev/null +++ b/stackit/services/redis/credentials/resource.go @@ -0,0 +1,371 @@ +package redis + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/redis" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &postgresCredentialsResource{} + _ resource.ResourceWithConfigure = &postgresCredentialsResource{} + _ resource.ResourceWithImportState = &postgresCredentialsResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + CredentialsId types.String `tfsdk:"credentials_id"` + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Host types.String `tfsdk:"host"` + Hosts types.List `tfsdk:"hosts"` + HttpAPIURI types.String `tfsdk:"http_api_uri"` + Name types.String `tfsdk:"name"` + Password types.String `tfsdk:"password"` + Port types.Int64 `tfsdk:"port"` + Uri types.String `tfsdk:"uri"` + Username types.String `tfsdk:"username"` +} + +// NewCredentialsResource is a helper function to simplify the provider implementation. +func NewCredentialsResource() resource.Resource { + return &postgresCredentialsResource{} +} + +// credentialsResource is the resource implementation. +type postgresCredentialsResource struct { + client *redis.APIClient +} + +// Metadata returns the resource type name. +func (r *postgresCredentialsResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_redis_credentials" +} + +// Configure adds the provider configured client to the resource. +func (r *postgresCredentialsResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *redis.APIClient + var err error + if providerData.RedisCustomEndpoint != "" { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RedisCustomEndpoint), + ) + } else { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Redis zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *postgresCredentialsResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Redis credentials resource schema.", + "id": "Terraform's internal resource identifier.", + "credentials_id": "The credentials ID.", + "instance_id": "ID of the Redis instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "credentials_id": schema.StringAttribute{ + Description: descriptions["credentials_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "host": schema.StringAttribute{ + Computed: true, + }, + "hosts": schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "http_api_uri": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "uri": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *postgresCredentialsResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Create new recordset + credentialsResp, err := r.client.CreateCredentials(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Calling API: %v", err)) + return + } + if credentialsResp.Id == nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", "Got empty credentials id") + return + } + credentialsId := *credentialsResp.Id + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + wr, err := redis.CreateCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*redis.CredentialsResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating credentials", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Redis credentials created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *postgresCredentialsResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + recordSetResp, err := r.client.GetCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading credentials", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(recordSetResp, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Redis credentials read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *postgresCredentialsResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating credentials", "credentials can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *postgresCredentialsResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + credentialsId := model.CredentialsId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "credentials_id", credentialsId) + + // Delete existing record set + err := r.client.DeleteCredentials(ctx, projectId, instanceId, credentialsId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", err.Error()) + } + _, err = redis.DeleteCredentialsWaitHandler(ctx, r.client, projectId, instanceId, credentialsId).SetTimeout(1 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credentials", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "Redis credentials deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id,credentials_id +func (r *postgresCredentialsResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format [project_id],[instance_id],[credentials_id], got %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("credentials_id"), idParts[2])...) + tflog.Info(ctx, "Redis credentials state imported") +} + +func mapFields(credentialsResp *redis.CredentialsResponse, model *Model) error { + if credentialsResp == nil { + return fmt.Errorf("response input is nil") + } + if credentialsResp.Raw == nil { + return fmt.Errorf("response credentials raw is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + credentials := credentialsResp.Raw.Credentials + + var credentialsId string + if model.CredentialsId.ValueString() != "" { + credentialsId = model.CredentialsId.ValueString() + } else if credentialsResp.Id != nil { + credentialsId = *credentialsResp.Id + } else { + return fmt.Errorf("credentials id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + model.InstanceId.ValueString(), + credentialsId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.CredentialsId = types.StringValue(credentialsId) + model.Hosts = types.ListNull(types.StringType) + if credentials != nil { + if credentials.Hosts != nil { + var hosts []attr.Value + for _, host := range *credentials.Hosts { + hosts = append(hosts, types.StringValue(host)) + } + hostsList, diags := types.ListValue(types.StringType, hosts) + if diags.HasError() { + return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags)) + } + model.Hosts = hostsList + } + model.Host = types.StringPointerValue(credentials.Host) + model.HttpAPIURI = types.StringPointerValue(credentials.HttpApiUri) + model.Name = types.StringPointerValue(credentials.Name) + model.Password = types.StringPointerValue(credentials.Password) + model.Port = conversion.ToTypeInt64(credentials.Port) + model.Uri = types.StringPointerValue(credentials.Uri) + model.Username = types.StringPointerValue(credentials.Username) + } + return nil +} diff --git a/stackit/services/redis/credentials/resource_test.go b/stackit/services/redis/credentials/resource_test.go new file mode 100644 index 00000000..04bc3a5c --- /dev/null +++ b/stackit/services/redis/credentials/resource_test.go @@ -0,0 +1,156 @@ +package redis + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/redis" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *redis.CredentialsResponse + expected Model + isValid bool + }{ + { + "default_values", + &redis.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &redis.RawCredentials{}, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringNull(), + Hosts: types.ListNull(types.StringType), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringNull(), + Port: types.Int64Null(), + Uri: types.StringNull(), + Username: types.StringNull(), + }, + true, + }, + { + "simple_values", + &redis.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &redis.RawCredentials{ + Credentials: &redis.Credentials{ + Host: utils.Ptr("host"), + Hosts: &[]string{ + "host_1", + "", + }, + HttpApiUri: utils.Ptr("http"), + Name: utils.Ptr("name"), + Password: utils.Ptr("password"), + Port: utils.Ptr(int32(1234)), + Uri: utils.Ptr("uri"), + Username: utils.Ptr("username"), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue("host"), + Hosts: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("host_1"), + types.StringValue(""), + }), + HttpAPIURI: types.StringValue("http"), + Name: types.StringValue("name"), + Password: types.StringValue("password"), + Port: types.Int64Value(1234), + Uri: types.StringValue("uri"), + Username: types.StringValue("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &redis.CredentialsResponse{ + Id: utils.Ptr("cid"), + Raw: &redis.RawCredentials{ + Credentials: &redis.Credentials{ + Host: utils.Ptr(""), + Hosts: &[]string{}, + HttpApiUri: nil, + Name: nil, + Password: utils.Ptr(""), + Port: utils.Ptr(int32(2123456789)), + Uri: nil, + Username: utils.Ptr(""), + }, + }, + }, + Model{ + Id: types.StringValue("pid,iid,cid"), + CredentialsId: types.StringValue("cid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Host: types.StringValue(""), + Hosts: types.ListValueMust(types.StringType, []attr.Value{}), + HttpAPIURI: types.StringNull(), + Name: types.StringNull(), + Password: types.StringValue(""), + Port: types.Int64Value(2123456789), + Uri: types.StringNull(), + Username: types.StringValue(""), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &redis.CredentialsResponse{}, + Model{}, + false, + }, + { + "nil_raw_credentials", + &redis.CredentialsResponse{ + Id: utils.Ptr("cid"), + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + model := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, model) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(model, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/redis/instance/datasource.go b/stackit/services/redis/instance/datasource.go new file mode 100644 index 00000000..e8aa7267 --- /dev/null +++ b/stackit/services/redis/instance/datasource.go @@ -0,0 +1,181 @@ +package redis + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/redis" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *redis.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_redis_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *redis.APIClient + var err error + if providerData.RedisCustomEndpoint != "" { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RedisCustomEndpoint), + ) + } else { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Redis zone client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Redis instance data source schema.", + "id": "Terraform's internal resource identifier.", + "instance_id": "ID of the Redis instance.", + "project_id": "STACKIT Project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Computed: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Computed: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Computed: true, + }, + }, + Computed: true, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read instance", err.Error()) + return + } + + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Redis instance read") +} diff --git a/stackit/services/redis/instance/resource.go b/stackit/services/redis/instance/resource.go new file mode 100644 index 00000000..00202171 --- /dev/null +++ b/stackit/services/redis/instance/resource.go @@ -0,0 +1,634 @@ +package redis + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/redis" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + CfGuid types.String `tfsdk:"cf_guid"` + CfSpaceGuid types.String `tfsdk:"cf_space_guid"` + DashboardUrl types.String `tfsdk:"dashboard_url"` + ImageUrl types.String `tfsdk:"image_url"` + Name types.String `tfsdk:"name"` + OrganizationGuid types.String `tfsdk:"organization_guid"` + Parameters types.Object `tfsdk:"parameters"` + Version types.String `tfsdk:"version"` + PlanName types.String `tfsdk:"plan_name"` + PlanId types.String `tfsdk:"plan_id"` +} + +// Struct corresponding to DataSourceModel.Parameters +type parametersModel struct { + SgwAcl types.String `tfsdk:"sgw_acl"` +} + +// Types corresponding to parametersModel +var parametersTypes = map[string]attr.Type{ + "sgw_acl": basetypes.StringType{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *redis.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_redis_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *redis.APIClient + var err error + if providerData.RedisCustomEndpoint != "" { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.RedisCustomEndpoint), + ) + } else { + apiClient, err = redis.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "redis client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Redis instance resource schema.", + "id": "Terraform's internal resource ID.", + "instance_id": "ID of the Redis instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "version": "The service version.", + "plan_name": "The selected plan name.", + "plan_id": "The selected plan ID.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "version": schema.StringAttribute{ + Description: descriptions["version"], + Required: true, + }, + "plan_name": schema.StringAttribute{ + Description: descriptions["plan_name"], + Required: true, + }, + "plan_id": schema.StringAttribute{ + Description: descriptions["plan_id"], + Computed: true, + }, + "parameters": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "sgw_acl": schema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + }, + "cf_guid": schema.StringAttribute{ + Computed: true, + }, + "cf_space_guid": schema.StringAttribute{ + Computed: true, + }, + "dashboard_url": schema.StringAttribute{ + Computed: true, + }, + "image_url": schema.StringAttribute{ + Computed: true, + }, + "organization_guid": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load Redis service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := *createResp.InstanceId + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := redis.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*redis.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "redis instance created") +} + +func toCreatePayload(model *Model, parameters *parametersModel) (*redis.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if parameters == nil { + return &redis.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + payloadParams := &redis.InstanceParameters{} + if parameters.SgwAcl.ValueString() != "" { + payloadParams.SgwAcl = parameters.SgwAcl.ValueStringPointer() + } + return &redis.CreateInstancePayload{ + InstanceName: model.Name.ValueStringPointer(), + Parameters: payloadParams, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + instanceId := state.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instances", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(instanceResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "redis instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + r.loadPlanId(ctx, &resp.Diagnostics, &model) + if diags.HasError() { + core.LogAndAddError(ctx, &diags, "Failed to load Redis service plan", "plan "+model.PlanName.ValueString()) + return + } + + var parameters = ¶metersModel{} + if !(model.Parameters.IsNull() || model.Parameters.IsUnknown()) { + diags = model.Parameters.As(ctx, parameters, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, parameters) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing instance + err = r.client.UpdateInstance(ctx, projectId, instanceId).UpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := redis.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*redis.Instance) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "redis instance updated") +} + +func toUpdatePayload(model *Model, parameters *parametersModel) (*redis.UpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + if parameters == nil { + return &redis.UpdateInstancePayload{ + PlanId: model.PlanId.ValueStringPointer(), + }, nil + } + return &redis.UpdateInstancePayload{ + Parameters: &redis.InstanceParameters{ + SgwAcl: parameters.SgwAcl.ValueStringPointer(), + }, + PlanId: model.PlanId.ValueStringPointer(), + }, nil +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error()) + return + } + _, err = redis.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "redis instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "Redis instance state imported") +} + +func mapFields(instance *redis.Instance, model *Model) error { + if instance == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.InstanceId != nil { + instanceId = *instance.InstanceId + } else { + return fmt.Errorf("instance id not present") + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.PlanId = types.StringPointerValue(instance.PlanId) + model.CfGuid = types.StringPointerValue(instance.CfGuid) + model.CfSpaceGuid = types.StringPointerValue(instance.CfSpaceGuid) + model.DashboardUrl = types.StringPointerValue(instance.DashboardUrl) + model.ImageUrl = types.StringPointerValue(instance.ImageUrl) + model.Name = types.StringPointerValue(instance.Name) + model.OrganizationGuid = types.StringPointerValue(instance.OrganizationGuid) + + if instance.Parameters == nil { + model.Parameters = types.ObjectNull(parametersTypes) + } else { + parameters, err := mapParameters(*instance.Parameters) + if err != nil { + return fmt.Errorf("mapping parameters: %w", err) + } + model.Parameters = parameters + } + return nil +} + +func mapParameters(params map[string]interface{}) (types.Object, error) { + attributes := map[string]attr.Value{} + for attribute := range parametersTypes { + valueInterface, ok := params[attribute] + if !ok { + // All fields are optional, so this is ok + // Set the value as nil, will be handled accordingly + valueInterface = nil + } + + var value attr.Value + switch parametersTypes[attribute].(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found unexpected attribute type '%T'", parametersTypes[attribute]) + case basetypes.StringType: + if valueInterface == nil { + value = types.StringNull() + } else { + valueString, ok := valueInterface.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as string", attribute, valueInterface) + } + value = types.StringValue(valueString) + } + case basetypes.BoolType: + if valueInterface == nil { + value = types.BoolNull() + } else { + valueBool, ok := valueInterface.(bool) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as bool", attribute, valueInterface) + } + value = types.BoolValue(valueBool) + } + case basetypes.Int64Type: + if valueInterface == nil { + value = types.Int64Null() + } else { + // This may be int64, int32, int or float64 + // We try to assert all 4 + var valueInt64 int64 + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as int", attribute, valueInterface) + case int64: + valueInt64 = temp + case int32: + valueInt64 = int64(temp) + case int: + valueInt64 = int64(temp) + case float64: + valueInt64 = int64(temp) + } + value = types.Int64Value(valueInt64) + } + case basetypes.ListType: // Assumed to be a list of strings + if valueInterface == nil { + value = types.ListNull(types.StringType) + } else { + // This may be []string{} or []interface{} + // We try to assert all 2 + var valueList []attr.Value + switch temp := valueInterface.(type) { + default: + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' of type %T, failed to assert as array of interface", attribute, valueInterface) + case []string: + for _, x := range temp { + valueList = append(valueList, types.StringValue(x)) + } + case []interface{}: + for _, x := range temp { + xString, ok := x.(string) + if !ok { + return types.ObjectNull(parametersTypes), fmt.Errorf("found attribute '%s' with element '%s' of type %T, failed to assert as string", attribute, x, x) + } + valueList = append(valueList, types.StringValue(xString)) + } + } + temp2, diags := types.ListValue(types.StringType, valueList) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to map %s: %w", attribute, core.DiagsToError(diags)) + } + value = temp2 + } + } + attributes[attribute] = value + } + + output, diags := types.ObjectValue(parametersTypes, attributes) + if diags.HasError() { + return types.ObjectNull(parametersTypes), fmt.Errorf("failed to create object: %w", core.DiagsToError(diags)) + } + return output, nil +} + +func (r *instanceResource) loadPlanId(ctx context.Context, diags *diag.Diagnostics, model *Model) { + projectId := model.ProjectId.ValueString() + res, err := r.client.GetOfferings(ctx, projectId).Execute() + if err != nil { + diags.AddError("Failed to list Redis offerings", err.Error()) + return + } + + version := model.Version.ValueString() + planName := model.PlanName.ValueString() + availableVersions := "" + availablePlanNames := "" + isValidVersion := false + for _, offer := range *res.Offerings { + if !strings.EqualFold(*offer.Version, version) { + availableVersions = fmt.Sprintf("%s\n- %s", availableVersions, *offer.Version) + continue + } + isValidVersion = true + + for _, plan := range *offer.Plans { + if plan.Name == nil { + continue + } + if strings.EqualFold(*plan.Name, planName) && plan.Id != nil { + model.PlanId = types.StringPointerValue(plan.Id) + return + } + availablePlanNames = fmt.Sprintf("%s\n- %s", availablePlanNames, *plan.Name) + } + } + + if !isValidVersion { + diags.AddError("Invalid version", fmt.Sprintf("Couldn't find version '%s', available versions are:%s", version, availableVersions)) + return + } + diags.AddError("Invalid plan_name", fmt.Sprintf("Couldn't find plan_name '%s' for version %s, available names are:%s", planName, version, availablePlanNames)) +} diff --git a/stackit/services/redis/instance/resource_test.go b/stackit/services/redis/instance/resource_test.go new file mode 100644 index 00000000..5f6884e1 --- /dev/null +++ b/stackit/services/redis/instance/resource_test.go @@ -0,0 +1,304 @@ +package redis + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/redis" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *redis.Instance + expected Model + isValid bool + }{ + { + "default_values", + &redis.Instance{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringNull(), + Name: types.StringNull(), + CfGuid: types.StringNull(), + CfSpaceGuid: types.StringNull(), + DashboardUrl: types.StringNull(), + ImageUrl: types.StringNull(), + OrganizationGuid: types.StringNull(), + Parameters: types.ObjectNull(parametersTypes), + }, + true, + }, + { + "simple_values", + &redis.Instance{ + PlanId: utils.Ptr("plan"), + CfGuid: utils.Ptr("cf"), + CfSpaceGuid: utils.Ptr("space"), + DashboardUrl: utils.Ptr("dashboard"), + ImageUrl: utils.Ptr("image"), + InstanceId: utils.Ptr("iid"), + Name: utils.Ptr("name"), + OrganizationGuid: utils.Ptr("org"), + Parameters: &map[string]interface{}{ + "sgw_acl": "acl", + }, + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + PlanId: types.StringValue("plan"), + Name: types.StringValue("name"), + CfGuid: types.StringValue("cf"), + CfSpaceGuid: types.StringValue("space"), + DashboardUrl: types.StringValue("dashboard"), + ImageUrl: types.StringValue("image"), + OrganizationGuid: types.StringValue("org"), + Parameters: types.ObjectValueMust(parametersTypes, map[string]attr.Value{ + "sgw_acl": types.StringValue("acl"), + }), + }, + true, + }, + { + "nil_response", + nil, + Model{}, + false, + }, + { + "no_resource_id", + &redis.Instance{}, + Model{}, + false, + }, + { + "wrong_param_types_1", + &redis.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": true, + }, + }, + Model{}, + false, + }, + { + "wrong_param_types_2", + &redis.Instance{ + Parameters: &map[string]interface{}{ + "sgw_acl": 1, + }, + }, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *redis.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &redis.CreateInstancePayload{ + Parameters: &redis.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &redis.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + Parameters: &redis.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + Name: types.StringValue(""), + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &redis.CreateInstancePayload{ + InstanceName: utils.Ptr(""), + Parameters: &redis.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + Name: types.StringValue("name"), + PlanId: types.StringValue("plan"), + }, + nil, + &redis.CreateInstancePayload{ + InstanceName: utils.Ptr("name"), + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputParameters *parametersModel + expected *redis.UpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + ¶metersModel{}, + &redis.UpdateInstancePayload{ + Parameters: &redis.InstanceParameters{}, + }, + true, + }, + { + "simple_values", + &Model{ + PlanId: types.StringValue("plan"), + }, + ¶metersModel{ + SgwAcl: types.StringValue("sgw"), + }, + &redis.UpdateInstancePayload{ + Parameters: &redis.InstanceParameters{ + SgwAcl: utils.Ptr("sgw"), + }, + PlanId: utils.Ptr("plan"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + PlanId: types.StringValue(""), + }, + ¶metersModel{ + SgwAcl: types.StringNull(), + }, + &redis.UpdateInstancePayload{ + Parameters: &redis.InstanceParameters{ + SgwAcl: nil, + }, + PlanId: utils.Ptr(""), + }, + true, + }, + { + "nil_model", + nil, + ¶metersModel{}, + nil, + false, + }, + { + "nil_parameters", + &Model{ + PlanId: types.StringValue("plan"), + }, + nil, + &redis.UpdateInstancePayload{ + PlanId: utils.Ptr("plan"), + }, + true, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputParameters) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/redis/redis_acc_test.go b/stackit/services/redis/redis_acc_test.go new file mode 100644 index 00000000..d2c28a96 --- /dev/null +++ b/stackit/services/redis/redis_acc_test.go @@ -0,0 +1,286 @@ +package redis_test + +import ( + "context" + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/redis" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": testutil.ResourceNameWithDateTime("redis"), + "plan_id": "7e1f8394-5dd5-40b1-8608-16b4344eb51b", + "sgw_acl_invalid": "1.2.3.4/4", + "sgw_acl_valid": "1.2.3.4/31", +} + +func resourceConfig(acls *string) string { + aclsLine := "" + if acls != nil { + aclsLine = fmt.Sprintf(`sgw_acl = %q`, *acls) + } + return fmt.Sprintf(` + %s + + resource "stackit_redis_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + %s + metrics_frequency = "%s" + } + } + + %s + `, + testutil.RedisProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + aclsLine, + instanceResource["metrics_frequency"], + resourceConfigCredentials(), + ) +} + +func resourceConfigWithUpdate() string { + return fmt.Sprintf(` + %s + + resource "stackit_redis_instance" "instance" { + project_id = "%s" + name = "%s" + plan_id = "%s" + parameters = { + sgw_acl = "%s" + } + } + + %s + `, + testutil.RedisProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["plan_id"], + instanceResource["sgw_acl_valid"], + resourceConfigCredentials(), + ) +} + +func resourceConfigCredentials() string { + return ` + resource "stackit_redis_credentials" "credentials" { + project_id = stackit_redis_instance.instance.project_id + instance_id = stackit_redis_instance.instance.instance_id + } + ` +} + +func TestAccRedisResource(t *testing.T) { + acls := instanceResource["sgw_acl_invalid"] + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckRedisDestroy, + Steps: []resource.TestStep{ + // Creation fail + { + Config: resourceConfig(&acls), + ExpectError: regexp.MustCompile(`.*sgw_acl is invalid.*`), + }, + // Creation + { + Config: resourceConfig(nil), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_redis_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_redis_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_redis_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_redis_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrSet("stackit_redis_instance.instance", "parameters.sgw_acl"), + + // Credentials data + resource.TestCheckResourceAttrPair( + "stackit_redis_credentials.credentials", "project_id", + "stackit_redis_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "stackit_redis_credentials.credentials", "instance_id", + "stackit_redis_instance.instance", "instance_id", + ), + resource.TestCheckResourceAttrSet("stackit_redis_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("stackit_redis_credentials.credentials", "host"), + ), + }, + // data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_redis_instance" "instance" { + project_id = stackit_redis_instance.instance.project_id + instance_id = stackit_redis_instance.instance.instance_id + } + + data "stackit_redis_credentials" "credentials" { + project_id = stackit_redis_credentials.credentials.project_id + instance_id = stackit_redis_credentials.credentials.instance_id + credentials_id = stackit_redis_credentials.credentials.credentials_id + }`, + resourceConfig(nil), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_redis_instance.instance", "project_id", instanceResource["project_id"]), + + resource.TestCheckResourceAttrPair("stackit_redis_instance.instance", "instance_id", + "data.stackit_redis_credentials.credentials", "instance_id"), + + resource.TestCheckResourceAttrPair("data.stackit_redis_instance.instance", "instance_id", + "data.stackit_redis_credentials.credentials", "instance_id"), + + resource.TestCheckResourceAttr("data.stackit_redis_instance.instance", "plan_id", instanceResource["plan_id"]), + + resource.TestCheckResourceAttr("data.stackit_redis_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrSet("data.stackit_redis_instance.instance", "parameters.sgw_acl"), + + // Credentials data + resource.TestCheckResourceAttr("data.stackit_redis_credentials.credentials", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("data.stackit_redis_credentials.credentials", "credentials_id"), + resource.TestCheckResourceAttrSet("data.stackit_redis_credentials.credentials", "host"), + resource.TestCheckResourceAttrSet("data.stackit_redis_credentials.credentials", "port"), + resource.TestCheckResourceAttrSet("data.stackit_redis_credentials.credentials", "uri"), + ), + }, + // Import + { + ResourceName: "stackit_redis_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_redis_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_redis_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "stackit_redis_credentials.credentials", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_redis_credentials.credentials"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_redis_credentials.credentials") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + credentialsId, ok := r.Primary.Attributes["credentials_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute credentials_id") + } + + return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, instanceId, credentialsId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: resourceConfigWithUpdate(), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_redis_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_redis_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_redis_instance.instance", "plan_id", instanceResource["plan_id"]), + resource.TestCheckResourceAttr("stackit_redis_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_redis_instance.instance", "parameters.sgw_acl", instanceResource["sgw_acl_valid"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func checkInstanceDeleteSuccess(i *redis.Instance) bool { + if *i.LastOperation.Type != redis.InstanceTypeDelete { + return false + } + + if *i.LastOperation.Type == redis.InstanceTypeDelete { + if *i.LastOperation.State != redis.InstanceStateSuccess { + return false + } else if strings.Contains(*i.LastOperation.Description, "DeleteFailed") || strings.Contains(*i.LastOperation.Description, "failed") { + return false + } + } + return true +} + +func testAccCheckRedisDestroy(s *terraform.State) error { + ctx := context.Background() + var client *redis.APIClient + var err error + if testutil.RedisCustomEndpoint == "" { + client, err = redis.NewAPIClient() + } else { + client, err = redis.NewAPIClient( + config.WithEndpoint(testutil.RedisCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_redis_instance" { + continue + } + // instance terraform ID: "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + instances := *instancesResp.Instances + for i := range instances { + if instances[i].InstanceId == nil { + continue + } + if utils.Contains(instancesToDestroy, *instances[i].InstanceId) { + if !checkInstanceDeleteSuccess(&instances[i]) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *instances[i].InstanceId) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *instances[i].InstanceId, err) + } + _, err = redis.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *instances[i].InstanceId).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *instances[i].InstanceId, err) + } + } + } + } + return nil +} diff --git a/stackit/services/resourcemanager/project/datasource.go b/stackit/services/resourcemanager/project/datasource.go new file mode 100644 index 00000000..2c84c0c1 --- /dev/null +++ b/stackit/services/resourcemanager/project/datasource.go @@ -0,0 +1,216 @@ +package project + +import ( + "context" + "fmt" + "regexp" + + "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/resourcemanager" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &projectDataSource{} +) + +type ProjectData struct { + Id types.String `tfsdk:"id"` // needed by TF + ContainerId types.String `tfsdk:"container_id"` + ContainerParentId types.String `tfsdk:"parent_container_id"` + Name types.String `tfsdk:"name"` + Labels types.Map `tfsdk:"labels"` +} + +// NewProjectDataSource is a helper function to simplify the provider implementation. +func NewProjectDataSource() datasource.DataSource { + return &projectDataSource{} +} + +// projectDataSource is the data source implementation. +type projectDataSource struct { + client *resourcemanager.APIClient +} + +// Metadata returns the data source type name. +func (d *projectDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_resourcemanager_project" +} + +func (d *projectDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + var apiClient *resourcemanager.APIClient + var err error + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + if providerData.ResourceManagerCustomEndpoint != "" { + apiClient, err = resourcemanager.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithServiceAccountEmail(providerData.ServiceAccountEmail), + config.WithEndpoint(providerData.ResourceManagerCustomEndpoint), + ) + } else { + apiClient, err = resourcemanager.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithServiceAccountEmail(providerData.ServiceAccountEmail), + config.WithRegion(providerData.Region), + ) + } + if err != nil { + resp.Diagnostics.AddError( + "Could not Configure API Client", + err.Error(), + ) + return + } + + tflog.Info(ctx, "Resource Manager project client configured") + d.client = apiClient +} + +// Schema defines the schema for the data source. +func (d *projectDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Resource Manager project data source schema.", + "id": "Terraform's internal unique identifier of the project, equivalent to the container ID", + "container_id": "Project container ID.", + "parent_container_id": "Parent container ID", + "name": "Project name.", + "labels": `Labels are key-value string pairs which can be attached to a resource container. A label key must match the regex [A-ZÄÜÖa-zäüöß0-9_-]{1,64}. A label value must match the regex ^$|[A-ZÄÜÖa-zäüöß0-9_-]{1,64}`, + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "container_id": schema.StringAttribute{ + Description: descriptions["container_id"], + Required: true, + Validators: []validator.String{ + validate.NoSeparator(), + }, + }, + "parent_container_id": schema.StringAttribute{ + Description: descriptions["parent_container_id"], + Computed: true, + Validators: []validator.String{ + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(63), + }, + }, + "labels": schema.MapAttribute{ + Description: descriptions["labels"], + ElementType: types.StringType, + Computed: true, + Validators: []validator.Map{ + mapvalidator.KeysAre( + stringvalidator.RegexMatches( + regexp.MustCompile(`[A-ZÄÜÖa-zäüöß0-9_-]{1,64}`), + "must match expression"), + ), + mapvalidator.ValueStringsAre( + stringvalidator.RegexMatches( + regexp.MustCompile(`[A-ZÄÜÖa-zäüöß0-9_-]{1,64}`), + "must match expression"), + ), + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (d *projectDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state ProjectData + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + containerId := state.ContainerId.ValueString() + ctx = tflog.SetField(ctx, "project_id", containerId) + + projectResp, err := d.client.GetProject(ctx, containerId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to Read Project", err.Error()) + return + } + + err = mapDataFields(ctx, projectResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "Resource Manager project read") +} + +func mapDataFields(ctx context.Context, projectResp *resourcemanager.ProjectResponseWithParents, model *ProjectData) (err error) { + if projectResp == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var containerId string + if model.ContainerId.ValueString() != "" { + containerId = model.ContainerId.ValueString() + } else if projectResp.ContainerId != nil { + containerId = *projectResp.ContainerId + } else { + return fmt.Errorf("container id not present") + } + + var labels basetypes.MapValue + if projectResp.Labels != nil { + labels, err = conversion.ToTerraformStringMap(ctx, *projectResp.Labels) + if err != nil { + return fmt.Errorf("converting to StringValue map: %w", err) + } + } else { + labels = types.MapNull(types.StringType) + } + + model.Id = types.StringValue(containerId) + model.ContainerId = types.StringValue(containerId) + model.ContainerParentId = types.StringPointerValue(projectResp.Parent.ContainerId) + model.Name = types.StringPointerValue(projectResp.Name) + model.Labels = labels + return nil +} diff --git a/stackit/services/resourcemanager/project/resource.go b/stackit/services/resourcemanager/project/resource.go new file mode 100644 index 00000000..219ad006 --- /dev/null +++ b/stackit/services/resourcemanager/project/resource.go @@ -0,0 +1,434 @@ +package project + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/mapvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/resourcemanager" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &projectResource{} + _ resource.ResourceWithConfigure = &projectResource{} + _ resource.ResourceWithImportState = &projectResource{} +) + +const ( + projectOwner = "project.owner" +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + ContainerId types.String `tfsdk:"container_id"` + ContainerParentId types.String `tfsdk:"parent_container_id"` + Name types.String `tfsdk:"name"` + Labels types.Map `tfsdk:"labels"` + OwnerEmail types.String `tfsdk:"owner_email"` +} + +// NewProjectResource is a helper function to simplify the provider implementation. +func NewProjectResource() resource.Resource { + return &projectResource{} +} + +// projectResource is the resource implementation. +type projectResource struct { + client *resourcemanager.APIClient +} + +// Metadata returns the resource type name. +func (r *projectResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_resourcemanager_project" +} + +// Configure adds the provider configured client to the resource. +func (r *projectResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *resourcemanager.APIClient + var err error + if providerData.ResourceManagerCustomEndpoint != "" { + ctx = tflog.SetField(ctx, "resourcemanager_custom_endpoint", providerData.ResourceManagerCustomEndpoint) + apiClient, err = resourcemanager.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithServiceAccountEmail(providerData.ServiceAccountEmail), + config.WithEndpoint(providerData.ResourceManagerCustomEndpoint), + ) + } else { + apiClient, err = resourcemanager.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithServiceAccountEmail(providerData.ServiceAccountEmail), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "Resource Manager project client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *projectResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "Resource Manager project resource schema.", + "id": "Terraform's internal unique identifier of the project, equivalent to the container ID", + "container_id": "Project container ID. Globally unique, user-friendly identifier.", + "parent_container_id": "Parent container ID", + "name": "Project name.", + "labels": "Labels are key-value string pairs which can be attached to a resource container. A label key must match the regex [A-ZÄÜÖa-zäüöß0-9_-]{1,64}. A label value must match the regex ^$|[A-ZÄÜÖa-zäüöß0-9_-]{1,64}", + "owner_email": "Email address of the owner of the project. This value is only considered during creation. Changing it afterwards will have no effect.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "container_id": schema.StringAttribute{ + Description: descriptions["container_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.NoSeparator(), + }, + }, + "parent_container_id": schema.StringAttribute{ + Description: descriptions["parent_container_id"], + Required: true, + Validators: []validator.String{ + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(63), + }, + }, + "labels": schema.MapAttribute{ + Description: descriptions["labels"], + ElementType: types.StringType, + Optional: true, + Validators: []validator.Map{ + mapvalidator.KeysAre( + stringvalidator.RegexMatches( + regexp.MustCompile(`[A-ZÄÜÖa-zäüöß0-9_-]{1,64}`), + "must match expression"), + ), + mapvalidator.ValueStringsAre( + stringvalidator.RegexMatches( + regexp.MustCompile(`[A-ZÄÜÖa-zäüöß0-9_-]{1,64}`), + "must match expression"), + ), + }, + }, + "owner_email": schema.StringAttribute{ + Description: descriptions["owner_email"], + Required: true, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *projectResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + containerId := model.ContainerId.ValueString() + ctx = tflog.SetField(ctx, "project_container_id", containerId) + + serviceAccountEmail := r.client.GetConfig().ServiceAccountEmail + if serviceAccountEmail == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", "The service account e-mail cannot be empty: set it in the provider configuration or through the STACKIT_SERVICE_ACCOUNT_EMAIL or in your credentials file (default filepath is ~/stackit/.credentials.json)") + return + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, serviceAccountEmail) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new project + createResp, err := r.client.CreateProject(ctx).CreateProjectPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", fmt.Sprintf("Calling API: %v", err)) + return + } + respContainerId := *createResp.ContainerId + if respContainerId == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", "API didn't return project id") + return + } + + // If the request has not been processed yet and the containerId doesnt exist, + // the waiter will fail with authentication error, so wait some time before checking the creation + wr, err := resourcemanager.CreateProjectWaitHandler(ctx, r.client, respContainerId).SetSleepBeforeWait(1 * time.Minute).SetTimeout(10 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*resourcemanager.ProjectResponseWithParents) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", fmt.Sprintf("Wait result conversion, got %+v", wr)) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(ctx, got, &model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Resource Manager project created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *projectResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state = &Model{} + diags := req.State.Get(ctx, state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + containerId := state.ContainerId.ValueString() + ctx = tflog.SetField(ctx, "container_id", containerId) + + projectResp, err := r.client.GetProject(ctx, containerId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading project", err.Error()) + return + } + + // Map response body to schema and populate Computed attribute values + err = mapFields(ctx, projectResp, state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, *state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Resource Manager project read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *projectResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + containerId := model.ContainerId.ValueString() + ctx = tflog.SetField(ctx, "container_id", containerId) + + // Generate API request body from model + payload, err := toUpdatePayload(&model) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating project", fmt.Sprintf("Could not create API payload: %v", err)) + return + } + // Update existing project + _, err = r.client.UpdateProject(ctx, containerId).UpdateProjectPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating project", err.Error()) + return + } + + diags = resp.State.Set(ctx, &model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "Resource Manager project updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *projectResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + containerId := model.ContainerId.ValueString() + ctx = tflog.SetField(ctx, "container_id", containerId) + + // Delete existing project + err := r.client.DeleteProject(ctx, containerId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", err.Error()) + return + } + + _, err = resourcemanager.DeleteProjectWaitHandler(ctx, r.client, containerId).SetTimeout(10 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + + tflog.Info(ctx, "Resource Manager project deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: container_id +func (r *projectResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 1 || idParts[0] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [container_id] Got: %q", req.ID), + ) + return + } + + ctx = tflog.SetField(ctx, "container_id", req.ID) + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("container_id"), req.ID)...) + tflog.Info(ctx, "Resource Manager Project state imported") +} + +func mapFields(ctx context.Context, projectResp *resourcemanager.ProjectResponseWithParents, model *Model) (err error) { + if projectResp == nil { + return fmt.Errorf("response input is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var containerId string + if model.ContainerId.ValueString() != "" { + containerId = model.ContainerId.ValueString() + } else if projectResp.ContainerId != nil { + containerId = *projectResp.ContainerId + } else { + return fmt.Errorf("container id not present") + } + + var labels basetypes.MapValue + if projectResp.Labels != nil && len(*projectResp.Labels) != 0 { + labels, err = conversion.ToTerraformStringMap(ctx, *projectResp.Labels) + if err != nil { + return fmt.Errorf("converting to StringValue map: %w", err) + } + } else { + labels = types.MapNull(types.StringType) + } + + model.Id = types.StringValue(containerId) + model.ContainerId = types.StringValue(containerId) + if projectResp.Parent != nil { + model.ContainerParentId = types.StringPointerValue(projectResp.Parent.ContainerId) + } else { + model.ContainerParentId = types.StringNull() + } + model.Name = types.StringPointerValue(projectResp.Name) + model.Labels = labels + return nil +} + +func toCreatePayload(model *Model, serviceAccountEmail string) (*resourcemanager.CreateProjectPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + owner := projectOwner + serviceAccountSubject := serviceAccountEmail + members := []resourcemanager.ProjectMember{ + { + Subject: &serviceAccountSubject, + Role: &owner, + }, + } + + ownerSubject := model.OwnerEmail.ValueString() + if ownerSubject != "" && ownerSubject != serviceAccountSubject { + members = append(members, + resourcemanager.ProjectMember{ + Subject: &ownerSubject, + Role: &owner, + }) + } + + modelLabels := model.Labels.Elements() + labels, err := conversion.ToOptStringMap(modelLabels) + if err != nil { + return nil, fmt.Errorf("converting to GO map: %w", err) + } + + return &resourcemanager.CreateProjectPayload{ + ContainerParentId: model.ContainerParentId.ValueStringPointer(), + Labels: labels, + Members: &members, + Name: model.Name.ValueStringPointer(), + }, nil +} + +func toUpdatePayload(model *Model) (*resourcemanager.UpdateProjectPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + modelLabels := model.Labels.Elements() + labels, err := conversion.ToOptStringMap(modelLabels) + if err != nil { + return nil, fmt.Errorf("converting to GO map: %w", err) + } + + return &resourcemanager.UpdateProjectPayload{ + ContainerParentId: model.ContainerParentId.ValueStringPointer(), + Name: model.Name.ValueStringPointer(), + Labels: labels, + }, nil +} diff --git a/stackit/services/resourcemanager/project/resource_test.go b/stackit/services/resourcemanager/project/resource_test.go new file mode 100644 index 00000000..5c65ce25 --- /dev/null +++ b/stackit/services/resourcemanager/project/resource_test.go @@ -0,0 +1,278 @@ +package project + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/resourcemanager" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" +) + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *resourcemanager.ProjectResponseWithParents + expected Model + expectedLabels *map[string]string + isValid bool + }{ + { + "default_ok", + &resourcemanager.ProjectResponseWithParents{ + ContainerId: utils.Ptr("cid"), + }, + Model{ + Id: types.StringValue("cid"), + ContainerId: types.StringValue("cid"), + ContainerParentId: types.StringNull(), + Name: types.StringNull(), + }, + nil, + true, + }, + { + "values_ok", + &resourcemanager.ProjectResponseWithParents{ + ContainerId: utils.Ptr("cid"), + Labels: &map[string]string{ + "label1": "ref1", + "label2": "ref2", + }, + Parent: &resourcemanager.Parent{ + ContainerId: utils.Ptr("pid"), + }, + Name: utils.Ptr("name"), + }, + Model{ + Id: types.StringValue("cid"), + ContainerId: types.StringValue("cid"), + ContainerParentId: types.StringValue("pid"), + Name: types.StringValue("name"), + }, + &map[string]string{ + "label1": "ref1", + "label2": "ref2", + }, + true, + }, + { + "response_nil_fail", + nil, + Model{}, + nil, + false, + }, + { + "no_resource_id", + &resourcemanager.ProjectResponseWithParents{}, + Model{}, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + if tt.expectedLabels == nil { + tt.expected.Labels = types.MapNull(types.StringType) + } else { + convertedLabels, err := conversion.ToTerraformStringMap(context.Background(), *tt.expectedLabels) + if err != nil { + t.Fatalf("Error converting to terraform string map: %v", err) + } + tt.expected.Labels = convertedLabels + } + state := &Model{ + ContainerId: tt.expected.ContainerId, + } + + err := mapFields(context.Background(), tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputLabels *map[string]string + expected *resourcemanager.CreateProjectPayload + isValid bool + }{ + { + "default_ok", + &Model{}, + nil, + &resourcemanager.CreateProjectPayload{ + ContainerParentId: nil, + Labels: nil, + Members: &[]resourcemanager.ProjectMember{ + { + Role: utils.Ptr(projectOwner), + Subject: utils.Ptr("service_account_email"), + }, + }, + Name: nil, + }, + true, + }, + { + "mapping_with_conversions_ok", + &Model{ + ContainerParentId: types.StringValue("pid"), + Name: types.StringValue("name"), + OwnerEmail: types.StringValue("owner_email"), + }, + &map[string]string{ + "label1": "1", + "label2": "2", + }, + &resourcemanager.CreateProjectPayload{ + ContainerParentId: utils.Ptr("pid"), + Labels: &map[string]string{ + "label1": "1", + "label2": "2", + }, + Members: &[]resourcemanager.ProjectMember{ + { + Role: utils.Ptr(projectOwner), + Subject: utils.Ptr("service_account_email"), + }, + { + Role: utils.Ptr(projectOwner), + Subject: utils.Ptr("owner_email"), + }, + }, + Name: utils.Ptr("name"), + }, + true, + }, + { + "nil_model", + nil, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + if tt.input != nil { + if tt.inputLabels == nil { + tt.input.Labels = types.MapNull(types.StringType) + } else { + convertedLabels, err := conversion.ToTerraformStringMap(context.Background(), *tt.inputLabels) + if err != nil { + t.Fatalf("Error converting to terraform string map: %v", err) + } + tt.input.Labels = convertedLabels + } + } + output, err := toCreatePayload(tt.input, "service_account_email") + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputLabels *map[string]string + expected *resourcemanager.UpdateProjectPayload + isValid bool + }{ + { + "default_ok", + &Model{}, + nil, + &resourcemanager.UpdateProjectPayload{ + ContainerParentId: nil, + Labels: nil, + Name: nil, + }, + true, + }, + { + "mapping_with_conversions_ok", + &Model{ + ContainerParentId: types.StringValue("pid"), + Name: types.StringValue("name"), + OwnerEmail: types.StringValue("owner_email"), + }, + &map[string]string{ + "label1": "1", + "label2": "2", + }, + &resourcemanager.UpdateProjectPayload{ + ContainerParentId: utils.Ptr("pid"), + Labels: &map[string]string{ + "label1": "1", + "label2": "2", + }, + Name: utils.Ptr("name"), + }, + true, + }, + { + "nil_model", + nil, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + if tt.input != nil { + if tt.inputLabels == nil { + tt.input.Labels = types.MapNull(types.StringType) + } else { + convertedLabels, err := conversion.ToTerraformStringMap(context.Background(), *tt.inputLabels) + if err != nil { + t.Fatalf("Error converting to terraform string map: %v", err) + } + tt.input.Labels = convertedLabels + } + } + output, err := toUpdatePayload(tt.input) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/services/resourcemanager/resourcemanager_acc_test.go b/stackit/services/resourcemanager/resourcemanager_acc_test.go new file mode 100644 index 00000000..39872e01 --- /dev/null +++ b/stackit/services/resourcemanager/resourcemanager_acc_test.go @@ -0,0 +1,170 @@ +package resourcemanager_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/resourcemanager" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +// Project resource data +var projectResource = map[string]string{ + "name": fmt.Sprintf("acc-pj-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)), + "parent_container_id": testutil.TestProjectParentContainerID, + "billing_reference": "TEST-REF", + "new_label": "a-label", +} + +func resourceConfig(name, label string) string { + return fmt.Sprintf(` + %s + + resource "stackit_resourcemanager_project" "project" { + parent_container_id = "%s" + name = "%s" + labels = { + "billing_reference" = "%s" + %s + } + owner_email = "%s" + } + `, + testutil.ResourceManagerProviderConfig(), + projectResource["parent_container_id"], + name, + projectResource["billing_reference"], + label, + testutil.TestProjectServiceAccountEmail, + ) +} + +func TestAccResourceManagerResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckResourceManagerDestroy, + Steps: []resource.TestStep{ + // Creation + { + Config: resourceConfig(projectResource["name"], ""), + Check: resource.ComposeAggregateTestCheckFunc( + // Project data + resource.TestCheckResourceAttrSet("stackit_resourcemanager_project.project", "container_id"), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "name", projectResource["name"]), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "parent_container_id", projectResource["parent_container_id"]), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "labels.%", "1"), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "labels.billing_reference", projectResource["billing_reference"]), + ), + }, + // Data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_resourcemanager_project" "project" { + container_id = stackit_resourcemanager_project.project.container_id + }`, + resourceConfig(projectResource["name"], ""), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Project data + resource.TestCheckResourceAttrSet("data.stackit_resourcemanager_project.project", "id"), + resource.TestCheckResourceAttrSet("data.stackit_resourcemanager_project.project", "container_id"), + resource.TestCheckResourceAttr("data.stackit_resourcemanager_project.project", "name", projectResource["name"]), + resource.TestCheckResourceAttrSet("data.stackit_resourcemanager_project.project", "parent_container_id"), + resource.TestCheckResourceAttr("data.stackit_resourcemanager_project.project", "labels.%", "1"), + resource.TestCheckResourceAttr("data.stackit_resourcemanager_project.project", "labels.billing_reference", projectResource["billing_reference"]), + resource.TestCheckResourceAttrPair("data.stackit_resourcemanager_project.project", "project_id", + "stackit_resourcemanager_project.project", "project_id"), + ), + }, + // Import + { + ResourceName: "stackit_resourcemanager_project.project", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_resourcemanager_project.project"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_resourcemanager_project.project") + } + containerId, ok := r.Primary.Attributes["container_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute container_id") + } + + return containerId, nil + }, + ImportState: true, + ImportStateVerify: true, + // The owner_email attributes don't exist in the + // API, therefore there is no value for it during import. + ImportStateVerifyIgnore: []string{"owner_email"}, + }, + // Update + { + Config: resourceConfig(fmt.Sprintf("%s-new", projectResource["name"]), "new_label='a-label'"), + Check: resource.ComposeAggregateTestCheckFunc( + // Project data + resource.TestCheckResourceAttrSet("stackit_resourcemanager_project.project", "container_id"), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "name", fmt.Sprintf("%s-new", projectResource["name"])), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "parent_container_id", projectResource["parent_container_id"]), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "labels.%", "2"), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "labels.billing_reference", projectResource["billing_reference"]), + resource.TestCheckResourceAttr("stackit_resourcemanager_project.project", "labels.new_label", projectResource["new_label"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckResourceManagerDestroy(s *terraform.State) error { + ctx := context.Background() + var client *resourcemanager.APIClient + var err error + if testutil.ResourceManagerCustomEndpoint == "" { + client, err = resourcemanager.NewAPIClient() + } else { + client, err = resourcemanager.NewAPIClient( + config.WithEndpoint(testutil.ResourceManagerCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + projectsToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_resourcemanager_project" { + continue + } + // project terraform ID: "[container_id]" + containerId := rs.Primary.ID + projectsToDestroy = append(projectsToDestroy, containerId) + } + + projectsResp, err := client.GetProjects(ctx).ContainerParentId(projectResource["parent_container_id"]).Execute() + if err != nil { + return fmt.Errorf("getting projectsResp: %w", err) + } + + items := *projectsResp.Items + for i := range items { + if utils.Contains(projectsToDestroy, *items[i].ContainerId) { + err := client.DeleteProjectExecute(ctx, *items[i].ContainerId) + if err != nil { + return fmt.Errorf("destroying project %s during CheckDestroy: %w", *items[i].ContainerId, err) + } + _, err = resourcemanager.DeleteProjectWaitHandler(ctx, client, *items[i].ContainerId).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying project %s during CheckDestroy: waiting for deletion %w", *items[i].ContainerId, err) + } + } + } + return nil +} diff --git a/stackit/services/ske/cluster/datasource.go b/stackit/services/ske/cluster/datasource.go new file mode 100644 index 00000000..11428782 --- /dev/null +++ b/stackit/services/ske/cluster/datasource.go @@ -0,0 +1,319 @@ +package ske + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/ske" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &clusterDataSource{} +) + +// NewClusterDataSource is a helper function to simplify the provider implementation. +func NewClusterDataSource() datasource.DataSource { + return &clusterDataSource{} +} + +// clusterDataSource is the data source implementation. +type clusterDataSource struct { + client *ske.APIClient +} + +// Metadata returns the resource type name. +func (r *clusterDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_ske_cluster" +} + +// Configure adds the provider configured client to the resource. +func (r *clusterDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *ske.APIClient + var err error + if providerData.SKECustomEndpoint != "" { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.SKECustomEndpoint), + ) + } else { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "SKE client configured") + r.client = apiClient +} +func (r *clusterDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "SKE Cluster data source schema.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the cluster is associated.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The cluster name.", + Required: true, + }, + "kubernetes_version": schema.StringAttribute{ + Description: "Kubernetes version.", + Computed: true, + }, + "kubernetes_version_used": schema.StringAttribute{ + Description: "Full Kubernetes version used. For example, if `1.22` was selected, this value may result to `1.22.15`", + Computed: true, + }, + "allow_privileged_containers": schema.BoolAttribute{ + Description: "DEPRECATED as of Kubernetes 1.25+\n Flag to specify if privileged mode for containers is enabled or not.\nThis should be used with care since it also disables a couple of other features like the use of some volume type (e.g. PVCs).", + DeprecationMessage: "Please remove this flag from your configuration when using Kubernetes version 1.25+.", + Computed: true, + }, + + "node_pools": schema.ListNestedAttribute{ + Description: "One or more `node_pool` block as defined below.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "Specifies the name of the node pool.", + Computed: true, + }, + "machine_type": schema.StringAttribute{ + Description: "The machine type.", + Computed: true, + }, + "os_name": schema.StringAttribute{ + Description: "The name of the OS image.", + Computed: true, + }, + "os_version": schema.StringAttribute{ + Description: "The OS image version.", + Computed: true, + }, + "minimum": schema.Int64Attribute{ + Description: "Minimum number of nodes in the pool.", + Computed: true, + }, + + "maximum": schema.Int64Attribute{ + Description: "Maximum number of nodes in the pool.", + Computed: true, + }, + + "max_surge": schema.Int64Attribute{ + Description: "The maximum number of nodes upgraded simultaneously.", + Computed: true, + }, + "max_unavailable": schema.Int64Attribute{ + Description: "The maximum number of nodes unavailable during upgraded.", + Computed: true, + }, + "volume_type": schema.StringAttribute{ + Description: "Specifies the volume type.", + Computed: true, + }, + "volume_size": schema.Int64Attribute{ + Description: "The volume size in GB.", + Computed: true, + }, + "labels": schema.MapAttribute{ + Description: "Labels to add to each node.", + Computed: true, + ElementType: types.StringType, + }, + "taints": schema.ListNestedAttribute{ + Description: "Specifies a taint list as defined below.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Description: "The taint effect.", + Computed: true, + }, + "key": schema.StringAttribute{ + Description: "Taint key to be applied to a node.", + Computed: true, + }, + "value": schema.StringAttribute{ + Description: "Taint value corresponding to the taint key.", + Computed: true, + }, + }, + }, + }, + "cri": schema.StringAttribute{ + Description: "Specifies the container runtime.", + Computed: true, + }, + "availability_zones": schema.ListAttribute{ + Description: "Specify a list of availability zones.", + ElementType: types.StringType, + Computed: true, + }, + }, + }, + }, + "maintenance": schema.SingleNestedAttribute{ + Description: "A single maintenance block as defined below", + Computed: true, + Attributes: map[string]schema.Attribute{ + "enable_kubernetes_version_updates": schema.BoolAttribute{ + Description: "Flag to enable/disable auto-updates of the Kubernetes version.", + Computed: true, + }, + "enable_machine_image_version_updates": schema.BoolAttribute{ + Description: "Flag to enable/disable auto-updates of the OS image version.", + Computed: true, + }, + "start": schema.StringAttribute{ + Description: "Date time for maintenance window start.", + Computed: true, + }, + "end": schema.StringAttribute{ + Description: "Date time for maintenance window end.", + Computed: true, + }, + }, + }, + + "hibernations": schema.ListNestedAttribute{ + Description: "One or more hibernation block as defined below.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "start": schema.StringAttribute{ + Description: "Start time of cluster hibernation in crontab syntax.", + Computed: true, + }, + "end": schema.StringAttribute{ + Description: "End time of hibernation, in crontab syntax.", + Computed: true, + }, + "timezone": schema.StringAttribute{ + Description: "Timezone name corresponding to a file in the IANA Time Zone database.", + Computed: true, + }, + }, + }, + }, + + "extensions": schema.SingleNestedAttribute{ + Description: "A single extensions block as defined below", + Computed: true, + Attributes: map[string]schema.Attribute{ + "argus": schema.SingleNestedAttribute{ + Description: "A single argus block as defined below", + Computed: true, + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Description: "Flag to enable/disable argus extensions.", + Computed: true, + }, + "argus_instance_id": schema.StringAttribute{ + Description: "Instance ID of argus", + Computed: true, + }, + }, + }, + "acl": schema.SingleNestedAttribute{ + Description: "Cluster access control configuration", + Computed: true, + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Description: "Is ACL enabled?", + Computed: true, + }, + "allowed_cidrs": schema.ListAttribute{ + Description: "Specify a list of CIDRs to whitelist", + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + }, + "kube_config": schema.StringAttribute{ + Description: "Kube config file used for connecting to the cluster", + Sensitive: true, + Computed: true, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Cluster + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + name := state.Name.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "name", name) + clusterResp, err := r.client.GetCluster(ctx, projectId, name).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, fmt.Sprintf("Unable to read cluster, project_id = %s, name = %s", projectId, name), err.Error()) + return + } + + err = mapFields(ctx, clusterResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error()) + return + } + r.getCredential(ctx, &diags, &state) + // Set refreshed state + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "SKE cluster read") +} + +func (r *clusterDataSource) getCredential(ctx context.Context, diags *diag.Diagnostics, model *Cluster) { + c := r.client + res, err := c.GetCredentials(ctx, model.ProjectId.ValueString(), model.Name.ValueString()).Execute() + if err != nil { + diags.AddError("failed fetching cluster credentials for data source", err.Error()) + return + } + model.KubeConfig = types.StringPointerValue(res.Kubeconfig) +} diff --git a/stackit/services/ske/cluster/resource.go b/stackit/services/ske/cluster/resource.go new file mode 100644 index 00000000..c4cd65de --- /dev/null +++ b/stackit/services/ske/cluster/resource.go @@ -0,0 +1,1170 @@ +package ske + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/ske" + "github.com/stackitcloud/terraform-provider-stackit/stackit/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" + "golang.org/x/mod/semver" +) + +const ( + DefaultOSName = "flatcar" + DefaultCRI = "containerd" + DefaultVolumeType = "storage_premium_perf1" + DefaultVolumeSizeGB int64 = 20 + VersionStateSupported = "supported" + VersionStatePreview = "preview" + VersionStateDeprecated = "deprecated" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &clusterResource{} + _ resource.ResourceWithConfigure = &clusterResource{} + _ resource.ResourceWithImportState = &clusterResource{} +) + +type Cluster struct { + Id types.String `tfsdk:"id"` // needed by TF + ProjectId types.String `tfsdk:"project_id"` + Name types.String `tfsdk:"name"` + KubernetesVersion types.String `tfsdk:"kubernetes_version"` + KubernetesVersionUsed types.String `tfsdk:"kubernetes_version_used"` + AllowPrivilegedContainers types.Bool `tfsdk:"allow_privileged_containers"` + NodePools []NodePool `tfsdk:"node_pools"` + Maintenance types.Object `tfsdk:"maintenance"` + Hibernations []Hibernation `tfsdk:"hibernations"` + Extensions *Extensions `tfsdk:"extensions"` + KubeConfig types.String `tfsdk:"kube_config"` +} + +type NodePool struct { + Name types.String `tfsdk:"name"` + MachineType types.String `tfsdk:"machine_type"` + OSName types.String `tfsdk:"os_name"` + OSVersion types.String `tfsdk:"os_version"` + Minimum types.Int64 `tfsdk:"minimum"` + Maximum types.Int64 `tfsdk:"maximum"` + MaxSurge types.Int64 `tfsdk:"max_surge"` + MaxUnavailable types.Int64 `tfsdk:"max_unavailable"` + VolumeType types.String `tfsdk:"volume_type"` + VolumeSize types.Int64 `tfsdk:"volume_size"` + Labels types.Map `tfsdk:"labels"` + Taints []Taint `tfsdk:"taints"` + CRI types.String `tfsdk:"cri"` + AvailabilityZones types.List `tfsdk:"availability_zones"` +} + +type Taint struct { + Effect types.String `tfsdk:"effect"` + Key types.String `tfsdk:"key"` + Value types.String `tfsdk:"value"` +} + +type Maintenance struct { + EnableKubernetesVersionUpdates types.Bool `tfsdk:"enable_kubernetes_version_updates"` + EnableMachineImageVersionUpdates types.Bool `tfsdk:"enable_machine_image_version_updates"` + Start types.String `tfsdk:"start"` + End types.String `tfsdk:"end"` +} + +var maintenanceTypes = map[string]attr.Type{ + "enable_kubernetes_version_updates": basetypes.BoolType{}, + "enable_machine_image_version_updates": basetypes.BoolType{}, + "start": basetypes.StringType{}, + "end": basetypes.StringType{}, +} + +type Hibernation struct { + Start types.String `tfsdk:"start"` + End types.String `tfsdk:"end"` + Timezone types.String `tfsdk:"timezone"` +} + +type Extensions struct { + Argus *ArgusExtension `tfsdk:"argus"` + ACL *ACL `tfsdk:"acl"` +} + +type ACL struct { + Enabled types.Bool `tfsdk:"enabled"` + AllowedCIDRs types.List `tfsdk:"allowed_cidrs"` +} + +type ArgusExtension struct { + Enabled types.Bool `tfsdk:"enabled"` + ArgusInstanceId types.String `tfsdk:"argus_instance_id"` +} + +// NewClusterResource is a helper function to simplify the provider implementation. +func NewClusterResource() resource.Resource { + return &clusterResource{} +} + +// clusterResource is the resource implementation. +type clusterResource struct { + client *ske.APIClient +} + +// Metadata returns the resource type name. +func (r *clusterResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_ske_cluster" +} + +// Configure adds the provider configured client to the resource. +func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *ske.APIClient + var err error + if providerData.SKECustomEndpoint != "" { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.SKECustomEndpoint), + ) + } else { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "SKE cluster client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "SKE Cluster Resource schema.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT project ID to which the cluster is associated.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: "The cluster name.", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches( + regexp.MustCompile(`^[a-z0-9][a-z0-9-]{0,10}$`), + "must start with a letter, must have lower case letters, numbers or hyphens, no hyphen at the end and less than 11 characters.", + ), + validate.NoSeparator(), + }, + }, + "kubernetes_version": schema.StringAttribute{ + Description: "Kubernetes version. Must only contain major and minor version (e.g. 1.22)", + Required: true, + Validators: []validator.String{ + validate.SemanticMinorVersion(), + }, + }, + "kubernetes_version_used": schema.StringAttribute{ + Description: "Full Kubernetes version used. For example, if 1.22 was selected, this value may result to 1.22.15", + Computed: true, + }, + "allow_privileged_containers": schema.BoolAttribute{ + Description: "Flag to specify if privileged mode for containers is enabled or not.\nThis should be used with care since it also disables a couple of other features like the use of some volume type (e.g. PVCs).\nDeprecated as of Kubernetes 1.25 and later", + Optional: true, + }, + "node_pools": schema.ListNestedAttribute{ + Description: "One or more `node_pool` block as defined below.", + Required: true, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(10), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "Specifies the name of the node pool.", + Required: true, + }, + "machine_type": schema.StringAttribute{ + Description: "The machine type.", + Required: true, + }, + "availability_zones": schema.ListAttribute{ + Description: "Specify a list of availability zones. E.g. `eu01-m`", + Required: true, + ElementType: types.StringType, + }, + "minimum": schema.Int64Attribute{ + Description: "Minimum number of nodes in the pool.", + Required: true, + Validators: []validator.Int64{ + int64validator.AtLeast(1), + int64validator.AtMost(100), + }, + }, + "maximum": schema.Int64Attribute{ + Description: "Maximum number of nodes in the pool.", + Required: true, + Validators: []validator.Int64{ + int64validator.AtLeast(1), + int64validator.AtMost(100), + }, + }, + "max_surge": schema.Int64Attribute{ + Description: "Maximum number of additional VMs that are created during an update.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + Validators: []validator.Int64{ + int64validator.AtLeast(1), + int64validator.AtMost(10), + }, + }, + "max_unavailable": schema.Int64Attribute{ + Description: "Maximum number of VMs that that can be unavailable during an update.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + "os_name": schema.StringAttribute{ + Description: "The name of the OS image. E.g. `flatcar`.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultOSName), + }, + "os_version": schema.StringAttribute{ + Description: "The OS image version.", + Required: true, + }, + "volume_type": schema.StringAttribute{ + Description: "Specifies the volume type. E.g. `storage_premium_perf1`.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultVolumeType), + }, + "volume_size": schema.Int64Attribute{ + Description: "The volume size in GB. E.g. `20`", + Optional: true, + Computed: true, + Default: int64default.StaticInt64(DefaultVolumeSizeGB), + }, + "labels": schema.MapAttribute{ + Description: "Labels to add to each node.", + Optional: true, + Computed: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.UseStateForUnknown(), + }, + }, + "taints": schema.ListNestedAttribute{ + Description: "Specifies a taint list as defined below.", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Description: "The taint effect. E.g `PreferNoSchedule`.", + Required: true, + }, + "key": schema.StringAttribute{ + Description: "Taint key to be applied to a node.", + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + "value": schema.StringAttribute{ + Description: "Taint value corresponding to the taint key.", + Optional: true, + }, + }, + }, + }, + "cri": schema.StringAttribute{ + Description: "Specifies the container runtime. E.g. `containerd`", + Optional: true, + Computed: true, + Default: stringdefault.StaticString(DefaultCRI), + }, + }, + }, + }, + "maintenance": schema.SingleNestedAttribute{ + Description: "A single maintenance block as defined below.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "enable_kubernetes_version_updates": schema.BoolAttribute{ + Description: "Flag to enable/disable auto-updates of the Kubernetes version.", + Required: true, + }, + "enable_machine_image_version_updates": schema.BoolAttribute{ + Description: "Flag to enable/disable auto-updates of the OS image version.", + Required: true, + }, + "start": schema.StringAttribute{ + Description: "Time for maintenance window start. E.g. `01:23:45Z`, `05:00:00+02:00`.", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches( + regexp.MustCompile(`^(((\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$`), + "must be a full-time as defined by RFC3339, Section 5.6. E.g. `01:23:45Z`, `05:00:00+02:00`", + ), + }, + }, + "end": schema.StringAttribute{ + Description: "Time for maintenance window end. E.g. `01:23:45Z`, `05:00:00+02:00`.", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches( + regexp.MustCompile(`^(((\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$`), + "must be a full-time as defined by RFC3339, Section 5.6. E.g. `01:23:45Z`, `05:00:00+02:00`", + ), + }, + }, + }, + }, + "hibernations": schema.ListNestedAttribute{ + Description: "One or more hibernation block as defined below.", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "start": schema.StringAttribute{ + Description: "Start time of cluster hibernation in crontab syntax. E.g. `0 18 * * *` for starting everyday at 6pm.", + Required: true, + }, + "end": schema.StringAttribute{ + Description: "End time of hibernation in crontab syntax. E.g. `0 8 * * *` for waking up the cluster at 8am.", + Required: true, + }, + "timezone": schema.StringAttribute{ + Description: "Timezone name corresponding to a file in the IANA Time Zone database. i.e. `Europe/Berlin`.", + Optional: true, + }, + }, + }, + }, + "extensions": schema.SingleNestedAttribute{ + Description: "A single extensions block as defined below.", + Optional: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "argus": schema.SingleNestedAttribute{ + Description: "A single argus block as defined below.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Description: "Flag to enable/disable Argus extensions.", + Required: true, + }, + "argus_instance_id": schema.StringAttribute{ + Description: "Argus instance ID to choose which Argus instance is used. Required when enabled is set to `true`.", + Optional: true, + }, + }, + }, + "acl": schema.SingleNestedAttribute{ + Description: "Cluster access control configuration.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Description: "Is ACL enabled?", + Required: true, + }, + "allowed_cidrs": schema.ListAttribute{ + Description: "Specify a list of CIDRs to whitelist.", + Required: true, + ElementType: types.StringType, + }, + }, + }, + }, + }, + "kube_config": schema.StringAttribute{ + Description: "Kube config file used for connecting to the cluster", + Sensitive: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +func (r *clusterResource) ValidateConfig(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse) { + var model Cluster + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + diags = checkAllowPrivilegedContainers(model.AllowPrivilegedContainers, model.KubernetesVersion) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func checkAllowPrivilegedContainers(allowPrivilegeContainers types.Bool, kubernetesVersion types.String) diag.Diagnostics { + var diags diag.Diagnostics + + if kubernetesVersion.IsNull() { + diags.AddError("'Kubernetes version' missing", "This field is required") + return diags + } + comparison := semver.Compare(fmt.Sprintf("v%s", kubernetesVersion.ValueString()), "v1.25") + if comparison < 0 { + if allowPrivilegeContainers.IsNull() { + diags.AddError("'Allow privilege containers' missing", "This field is required for Kubernetes prior to 1.25") + } + } else { + if !allowPrivilegeContainers.IsNull() { + diags.AddError("'Allow privilege containers' deprecated", "This field is deprecated as of Kubernetes 1.25 and later. Please remove this field") + } + } + + return diags +} + +// Create creates the resource and sets the initial Terraform state. +func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Cluster + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + clusterName := model.Name.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "name", clusterName) + + availableVersions := r.loadAvaiableVersions(ctx, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + r.createOrUpdateCluster(ctx, &resp.Diagnostics, &model, availableVersions) + if resp.Diagnostics.HasError() { + return + } + + // handle credential + r.getCredential(ctx, &resp.Diagnostics, &model) + if resp.Diagnostics.HasError() { + return + } + + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SKE cluster created") +} + +func (r *clusterResource) loadAvaiableVersions(ctx context.Context, diags *diag.Diagnostics) []ske.KubernetesVersion { + c := r.client + res, err := c.GetOptions(ctx).Execute() + if err != nil { + diags.AddError("Failed loading cluster available versions: getting cluster options", err.Error()) + return nil + } + + if res.KubernetesVersions == nil { + diags.AddError("Failed loading cluster available versions: nil kubernetesVersions", err.Error()) + return nil + } + + return *res.KubernetesVersions +} + +func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag.Diagnostics, model *Cluster, availableVersions []ske.KubernetesVersion) { + // cluster vars + projectId := model.ProjectId.ValueString() + name := model.Name.ValueString() + kubernetes, hasDeprecatedVersion, err := toKubernetesPayload(model, availableVersions) + if err != nil { + diags.AddError("Failed to create cluster config payload", err.Error()) + return + } + if hasDeprecatedVersion { + warningMessage := fmt.Sprintf("Using deprecated kubernetes version %s", *kubernetes.Version) + diags.AddWarning(warningMessage, "") + } + nodePools := toNodepoolsPayload(ctx, model) + maintenance, err := toMaintenancePayload(ctx, model) + if err != nil { + diags.AddError("Failed to create maintenance payload", err.Error()) + return + } + hibernations := toHibernationsPayload(model) + extensions, err := toExtensionsPayload(ctx, model) + if err != nil { + diags.AddError("Failed to create extension payload", err.Error()) + return + } + + payload := ske.CreateOrUpdateClusterPayload{ + Extensions: extensions, + Hibernation: hibernations, + Kubernetes: kubernetes, + Maintenance: maintenance, + Nodepools: &nodePools, + } + _, err = r.client.CreateOrUpdateCluster(ctx, projectId, name).CreateOrUpdateClusterPayload(payload).Execute() + if err != nil { + diags.AddError("failed during SKE create/update", err.Error()) + return + } + + wr, err := ske.CreateOrUpdateClusterWaitHandler(ctx, r.client, projectId, name).SetTimeout(30 * time.Minute).WaitWithContext(ctx) + if err != nil { + diags.AddError("Error creating cluster", fmt.Sprintf("Cluster creation waiting: %v", err)) + return + } + got, ok := wr.(*ske.ClusterResponse) + if !ok { + diags.AddError("Error creating cluster", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + err = mapFields(ctx, got, model) + if err != nil { + diags.AddError("Mapping cluster fields", err.Error()) + return + } +} + +func (r *clusterResource) getCredential(ctx context.Context, diags *diag.Diagnostics, model *Cluster) { + c := r.client + res, err := c.GetCredentials(ctx, model.ProjectId.ValueString(), model.Name.ValueString()).Execute() + if err != nil { + diags.AddError("failed fetching cluster credentials", err.Error()) + return + } + model.KubeConfig = types.StringPointerValue(res.Kubeconfig) +} + +func toNodepoolsPayload(ctx context.Context, m *Cluster) []ske.Nodepool { + cnps := []ske.Nodepool{} + for i := range m.NodePools { + // taints + ts := []ske.Taint{} + nodePool := m.NodePools[i] + for _, v := range nodePool.Taints { + t := ske.Taint{ + Effect: v.Effect.ValueStringPointer(), + Key: v.Key.ValueStringPointer(), + Value: v.Value.ValueStringPointer(), + } + ts = append(ts, t) + } + + // labels + var ls *map[string]string + if nodePool.Labels.IsNull() { + ls = nil + } else { + lsm := map[string]string{} + for k, v := range nodePool.Labels.Elements() { + nv, err := conversion.ToString(ctx, v) + if err != nil { + lsm[k] = "" + continue + } + lsm[k] = nv + } + ls = &lsm + } + + // zones + zs := []string{} + for _, v := range nodePool.AvailabilityZones.Elements() { + if v.IsNull() || v.IsUnknown() { + continue + } + s, err := conversion.ToString(context.TODO(), v) + if err != nil { + continue + } + zs = append(zs, s) + } + + cn := ske.CRI{ + Name: nodePool.CRI.ValueStringPointer(), + } + cnp := ske.Nodepool{ + Name: nodePool.Name.ValueStringPointer(), + Minimum: conversion.ToPtrInt32(nodePool.Minimum), + Maximum: conversion.ToPtrInt32(nodePool.Maximum), + MaxSurge: conversion.ToPtrInt32(nodePool.MaxSurge), + MaxUnavailable: conversion.ToPtrInt32(nodePool.MaxUnavailable), + Machine: &ske.Machine{ + Type: nodePool.MachineType.ValueStringPointer(), + Image: &ske.Image{ + Name: nodePool.OSName.ValueStringPointer(), + Version: nodePool.OSVersion.ValueStringPointer(), + }, + }, + Volume: &ske.Volume{ + Type: nodePool.VolumeType.ValueStringPointer(), + Size: conversion.ToPtrInt32(nodePool.VolumeSize), + }, + Taints: &ts, + Cri: &cn, + Labels: ls, + AvailabilityZones: &zs, + } + cnps = append(cnps, cnp) + } + return cnps +} + +func toHibernationsPayload(m *Cluster) *ske.Hibernation { + scs := []ske.HibernationSchedule{} + for _, h := range m.Hibernations { + sc := ske.HibernationSchedule{ + Start: h.Start.ValueStringPointer(), + End: h.End.ValueStringPointer(), + } + if !h.Timezone.IsNull() && !h.Timezone.IsUnknown() { + tz := h.Timezone.ValueString() + sc.Timezone = &tz + } + scs = append(scs, sc) + } + + if len(scs) == 0 { + return nil + } + + return &ske.Hibernation{ + Schedules: &scs, + } +} + +func toExtensionsPayload(ctx context.Context, m *Cluster) (*ske.Extension, error) { + if m.Extensions == nil { + return nil, nil + } + ex := &ske.Extension{} + if m.Extensions.Argus != nil { + ex.Argus = &ske.Argus{ + Enabled: m.Extensions.Argus.Enabled.ValueBoolPointer(), + ArgusInstanceId: m.Extensions.Argus.ArgusInstanceId.ValueStringPointer(), + } + } + if m.Extensions.ACL != nil { + cidrs := []string{} + diags := m.Extensions.ACL.AllowedCIDRs.ElementsAs(ctx, &cidrs, true) + if diags.HasError() { + return nil, fmt.Errorf("error in extension object converion %v", diags.Errors()) + } + ex.Acl = &ske.ACL{ + Enabled: m.Extensions.ACL.Enabled.ValueBoolPointer(), + AllowedCidrs: &cidrs, + } + } + return ex, nil +} + +func toMaintenancePayload(ctx context.Context, m *Cluster) (*ske.Maintenance, error) { + if m.Maintenance.IsNull() || m.Maintenance.IsUnknown() { + return nil, nil + } + + maintenance := Maintenance{} + diags := m.Maintenance.As(ctx, &maintenance, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, fmt.Errorf("error in maintenance object conversion %v", diags.Errors()) + } + + var timeWindowStart *string + if !(maintenance.Start.IsNull() || maintenance.Start.IsUnknown()) { + // API expects RFC3339 datetime + timeWindowStart = utils.Ptr( + fmt.Sprintf("0000-01-01T%s", maintenance.Start.ValueString()), + ) + } + + var timeWindowEnd *string + if !(maintenance.End.IsNull() || maintenance.End.IsUnknown()) { + // API expects RFC3339 datetime + timeWindowEnd = utils.Ptr( + fmt.Sprintf("0000-01-01T%s", maintenance.End.ValueString()), + ) + } + + return &ske.Maintenance{ + AutoUpdate: &ske.MaintenanceAutoUpdate{ + KubernetesVersion: maintenance.EnableKubernetesVersionUpdates.ValueBoolPointer(), + MachineImageVersion: maintenance.EnableMachineImageVersionUpdates.ValueBoolPointer(), + }, + TimeWindow: &ske.TimeWindow{ + Start: timeWindowStart, + End: timeWindowEnd, + }, + }, nil +} + +func mapFields(ctx context.Context, cl *ske.ClusterResponse, m *Cluster) error { + if cl == nil { + return fmt.Errorf("response input is nil") + } + if m == nil { + return fmt.Errorf("model input is nil") + } + + var name string + if m.Name.ValueString() != "" { + name = m.Name.ValueString() + } else if cl.Name != nil { + name = *cl.Name + } else { + return fmt.Errorf("name not present") + } + m.Name = types.StringValue(name) + idParts := []string{ + m.ProjectId.ValueString(), + name, + } + m.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + + if cl.Kubernetes != nil { + // The k8s version returned by the API includes the patch version, while we only support major and minor in the kubernetes_version field + // This prevents inconsistent state by automatic updates to the patch version in the API + versionPreffixed := "v" + *cl.Kubernetes.Version + majorMinorVersionPreffixed := semver.MajorMinor(versionPreffixed) + majorMinorVersion, _ := strings.CutPrefix(majorMinorVersionPreffixed, "v") + m.KubernetesVersion = types.StringPointerValue(utils.Ptr(majorMinorVersion)) + m.KubernetesVersionUsed = types.StringPointerValue(cl.Kubernetes.Version) + m.AllowPrivilegedContainers = types.BoolPointerValue(cl.Kubernetes.AllowPrivilegedContainers) + } + if cl.Nodepools == nil { + m.NodePools = []NodePool{} + } else { + nodepools := *cl.Nodepools + m.NodePools = []NodePool{} + for i := range nodepools { + np := nodepools[i] + + maimna := types.StringNull() + maimver := types.StringNull() + if np.Machine != nil && np.Machine.Image != nil { + maimna = types.StringPointerValue(np.Machine.Image.Name) + maimver = types.StringPointerValue(np.Machine.Image.Version) + } + vt := types.StringNull() + if np.Volume != nil { + vt = types.StringPointerValue(np.Volume.Type) + } + crin := types.StringNull() + if np.Cri != nil { + crin = types.StringPointerValue(np.Cri.Name) + } + n := NodePool{ + Name: types.StringPointerValue(np.Name), + MachineType: types.StringPointerValue(np.Machine.Type), + OSName: maimna, + OSVersion: maimver, + Minimum: conversion.ToTypeInt64(np.Minimum), + Maximum: conversion.ToTypeInt64(np.Maximum), + MaxSurge: conversion.ToTypeInt64(np.MaxSurge), + MaxUnavailable: conversion.ToTypeInt64(np.MaxUnavailable), + VolumeType: vt, + VolumeSize: conversion.ToTypeInt64(np.Volume.Size), + Labels: types.MapNull(types.StringType), + Taints: nil, + CRI: crin, + AvailabilityZones: types.ListNull(types.StringType), + } + if np.Labels != nil { + elems := map[string]attr.Value{} + for k, v := range *np.Labels { + elems[k] = types.StringValue(v) + } + n.Labels = types.MapValueMust(types.StringType, elems) + } + if np.Taints != nil { + for _, v := range *np.Taints { + if n.Taints == nil { + n.Taints = []Taint{} + } + n.Taints = append(n.Taints, Taint{ + Effect: types.StringPointerValue(v.Effect), + Key: types.StringPointerValue(v.Key), + Value: types.StringPointerValue(v.Value), + }) + } + } + if np.AvailabilityZones == nil { + n.AvailabilityZones = types.ListNull(types.StringType) + } else { + elems := []attr.Value{} + for _, v := range *np.AvailabilityZones { + elems = append(elems, types.StringValue(v)) + } + n.AvailabilityZones = types.ListValueMust(types.StringType, elems) + } + m.NodePools = append(m.NodePools, n) + } + } + + err := mapMaintenance(ctx, cl, m) + if err != nil { + return err + } + mapHibernations(cl, m) + mapExtensions(cl, m) + return nil +} + +func mapHibernations(cl *ske.ClusterResponse, m *Cluster) { + if cl.Hibernation == nil || cl.Hibernation.Schedules == nil { + return + } + + m.Hibernations = []Hibernation{} + for _, h := range *cl.Hibernation.Schedules { + m.Hibernations = append(m.Hibernations, Hibernation{ + Start: types.StringPointerValue(h.Start), + End: types.StringPointerValue(h.End), + Timezone: types.StringPointerValue(h.Timezone), + }) + } +} + +func mapMaintenance(ctx context.Context, cl *ske.ClusterResponse, m *Cluster) error { + // Aligned with SKE team that a flattened data structure is fine, because not extensions are planned. + if cl.Maintenance == nil { + m.Maintenance = types.ObjectNull(map[string]attr.Type{}) + return nil + } + ekvu := types.BoolNull() + if cl.Maintenance.AutoUpdate.KubernetesVersion != nil { + ekvu = types.BoolValue(*cl.Maintenance.AutoUpdate.KubernetesVersion) + } + emvu := types.BoolNull() + if cl.Maintenance.AutoUpdate.KubernetesVersion != nil { + emvu = types.BoolValue(*cl.Maintenance.AutoUpdate.MachineImageVersion) + } + startTime, endTime, err := getMaintenanceTimes(ctx, cl, m) + if err != nil { + return fmt.Errorf("failed to get maintenance times: %w", err) + } + maintenanceValues := map[string]attr.Value{ + "enable_kubernetes_version_updates": ekvu, + "enable_machine_image_version_updates": emvu, + "start": types.StringValue(startTime), + "end": types.StringValue(endTime), + } + maintenanceObject, diags := types.ObjectValue(maintenanceTypes, maintenanceValues) + if diags.HasError() { + return fmt.Errorf("failed to create flavor: %w", core.DiagsToError(diags)) + } + m.Maintenance = maintenanceObject + return nil +} + +func getMaintenanceTimes(ctx context.Context, cl *ske.ClusterResponse, m *Cluster) (startTime, endTime string, err error) { + startTimeAPI, err := time.Parse(time.RFC3339, *cl.Maintenance.TimeWindow.Start) + if err != nil { + return "", "", fmt.Errorf("failed to parse start time '%s' from API response as RFC3339 datetime: %w", *cl.Maintenance.TimeWindow.Start, err) + } + endTimeAPI, err := time.Parse(time.RFC3339, *cl.Maintenance.TimeWindow.End) + if err != nil { + return "", "", fmt.Errorf("failed to parse end time '%s' from API response as RFC3339 datetime: %w", *cl.Maintenance.TimeWindow.End, err) + } + + if m.Maintenance.IsNull() || m.Maintenance.IsUnknown() { + return startTimeAPI.Format("15:04:05Z07:00"), endTimeAPI.Format("15:04:05Z07:00"), nil + } + + maintenance := &Maintenance{} + diags := m.Maintenance.As(ctx, maintenance, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return "", "", fmt.Errorf("error in maintenance object conversion %w", core.DiagsToError(diags.Errors())) + } + + if maintenance.Start.IsNull() || maintenance.Start.IsUnknown() { + startTime = startTimeAPI.Format("15:04:05Z07:00") + } else { + startTimeTF, err := time.Parse("15:04:05Z07:00", maintenance.Start.ValueString()) + if err != nil { + return "", "", fmt.Errorf("failed to parse start time '%s' from TF config as RFC time: %w", maintenance.Start.ValueString(), err) + } + if startTimeAPI.Format("15:04:05Z07:00") != startTimeTF.Format("15:04:05Z07:00") { + return "", "", fmt.Errorf("start time '%v' from API response doesn't match start time '%v' from TF config", *cl.Maintenance.TimeWindow.Start, maintenance.Start.ValueString()) + } + startTime = maintenance.Start.ValueString() + } + + if maintenance.End.IsNull() || maintenance.End.IsUnknown() { + endTime = endTimeAPI.Format("15:04:05Z07:00") + } else { + endTimeTF, err := time.Parse("15:04:05Z07:00", maintenance.End.ValueString()) + if err != nil { + return "", "", fmt.Errorf("failed to parse end time '%s' from TF config as RFC time: %w", maintenance.End.ValueString(), err) + } + if endTimeAPI.Format("15:04:05Z07:00") != endTimeTF.Format("15:04:05Z07:00") { + return "", "", fmt.Errorf("end time '%v' from API response doesn't match end time '%v' from TF config", *cl.Maintenance.TimeWindow.End, maintenance.End.ValueString()) + } + endTime = maintenance.End.ValueString() + } + + return startTime, endTime, nil +} + +func mapExtensions(cl *ske.ClusterResponse, m *Cluster) { + if cl.Extensions == nil || (cl.Extensions.Argus == nil && cl.Extensions.Acl == nil) { + return + } + if m.Extensions == nil { + m.Extensions = &Extensions{} + } + if cl.Extensions.Argus != nil { + m.Extensions.Argus = &ArgusExtension{ + Enabled: types.BoolPointerValue(cl.Extensions.Argus.Enabled), + ArgusInstanceId: types.StringPointerValue(cl.Extensions.Argus.ArgusInstanceId), + } + } + + if cl.Extensions.Acl != nil { + cidr := []attr.Value{} + if cl.Extensions.Acl.AllowedCidrs != nil { + for _, v := range *cl.Extensions.Acl.AllowedCidrs { + cidr = append(cidr, types.StringValue(v)) + } + } + m.Extensions.ACL = &ACL{ + Enabled: types.BoolPointerValue(cl.Extensions.Acl.Enabled), + AllowedCIDRs: types.ListValueMust(types.StringType, cidr), + } + } +} + +func toKubernetesPayload(m *Cluster, availableVersions []ske.KubernetesVersion) (kubernetesPayload *ske.Kubernetes, hasDeprecatedVersion bool, err error) { + versionUsed, hasDeprecatedVersion, err := latestMatchingVersion(availableVersions, m.KubernetesVersion.ValueStringPointer()) + if err != nil { + return nil, false, fmt.Errorf("getting latest matching kubernetes version: %w", err) + } + + k := &ske.Kubernetes{ + Version: versionUsed, + AllowPrivilegedContainers: m.AllowPrivilegedContainers.ValueBoolPointer(), + } + return k, hasDeprecatedVersion, nil +} + +func latestMatchingVersion(availableVersions []ske.KubernetesVersion, providedVersion *string) (version *string, deprecated bool, err error) { + deprecated = false + + if availableVersions == nil { + return nil, false, fmt.Errorf("nil available kubernetes versions") + } + + if providedVersion == nil { + return nil, false, fmt.Errorf("provided version is nil") + } + + providedVersionPrefixed := "v" + *providedVersion + + if !semver.IsValid(providedVersionPrefixed) { + return nil, false, fmt.Errorf("provided version is invalid") + } + + var versionUsed *string + // Get the higher available version that matches the major and minor version provided by the user + for _, v := range availableVersions { + if v.State == nil || v.Version == nil { + continue + } + vPreffixed := "v" + *v.Version + if semver.MajorMinor(vPreffixed) == semver.MajorMinor(providedVersionPrefixed) && + (semver.Compare(vPreffixed, providedVersionPrefixed) == 1 || semver.Compare(vPreffixed, providedVersionPrefixed) == 0) { + versionUsed = v.Version + + if strings.EqualFold(*v.State, VersionStateDeprecated) { + deprecated = true + } else { + deprecated = false + } + } + } + + // Throwing error if we could not match the version with the available versions + if versionUsed == nil { + return nil, false, fmt.Errorf("provided version is not one of the available kubernetes versions") + } + + return versionUsed, deprecated, nil +} + +func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Cluster + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := state.ProjectId.ValueString() + name := state.Name.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "name", name) + + clResp, err := r.client.GetCluster(ctx, projectId, name).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, fmt.Sprintf("Unable to read cluster, project_id = %s, name = %s", projectId, name), err.Error()) + return + } + + err = mapFields(ctx, clResp, &state) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error()) + return + } + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "SKE cluster read") +} + +func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + var model Cluster + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + clName := model.Name.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "name", clName) + + availableVersions := r.loadAvaiableVersions(ctx, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + r.createOrUpdateCluster(ctx, &resp.Diagnostics, &model, availableVersions) + if resp.Diagnostics.HasError() { + return + } + + // handle credential + r.getCredential(ctx, &resp.Diagnostics, &model) + if resp.Diagnostics.HasError() { + return + } + + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "SKE cluster updated") +} + +func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Cluster + resp.Diagnostics.Append(req.State.Get(ctx, &model)...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + name := model.Name.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "name", name) + + c := r.client + _, err := c.DeleteCluster(ctx, projectId, name).Execute() + if err != nil { + resp.Diagnostics.AddError("failed deleting cluster", err.Error()) + return + } + _, err = ske.DeleteClusterWaitHandler(ctx, r.client, projectId, name).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting cluster", fmt.Sprintf("Cluster deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "SKE cluster deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,name +func (r *clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id],[name] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[1])...) + tflog.Info(ctx, "SKE cluster state imported") +} diff --git a/stackit/services/ske/cluster/resource_test.go b/stackit/services/ske/cluster/resource_test.go new file mode 100644 index 00000000..5a56eddc --- /dev/null +++ b/stackit/services/ske/cluster/resource_test.go @@ -0,0 +1,635 @@ +package ske + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/ske" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" +) + +func TestMapFields(t *testing.T) { + cs := ske.ClusterStatusState("OK") + tests := []struct { + description string + input *ske.ClusterResponse + expected Cluster + isValid bool + }{ + { + "default_values", + &ske.ClusterResponse{ + Name: utils.Ptr("name"), + }, + Cluster{ + Id: types.StringValue("pid,name"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + KubernetesVersion: types.StringNull(), + AllowPrivilegedContainers: types.BoolNull(), + NodePools: []NodePool{}, + Maintenance: types.ObjectNull(map[string]attr.Type{}), + Hibernations: nil, + Extensions: nil, + KubeConfig: types.StringNull(), + }, + true, + }, + { + "simple_values", + &ske.ClusterResponse{ + Extensions: &ske.Extension{ + Acl: &ske.ACL{ + AllowedCidrs: &[]string{"cidr1"}, + Enabled: utils.Ptr(true), + }, + Argus: &ske.Argus{ + ArgusInstanceId: utils.Ptr("aid"), + Enabled: utils.Ptr(true), + }, + }, + Hibernation: &ske.Hibernation{ + Schedules: &[]ske.HibernationSchedule{ + { + End: utils.Ptr("2"), + Start: utils.Ptr("1"), + Timezone: utils.Ptr("CET"), + }, + }, + }, + Kubernetes: &ske.Kubernetes{ + AllowPrivilegedContainers: utils.Ptr(true), + Version: utils.Ptr("1.2.3"), + }, + Maintenance: &ske.Maintenance{ + AutoUpdate: &ske.MaintenanceAutoUpdate{ + KubernetesVersion: utils.Ptr(true), + MachineImageVersion: utils.Ptr(true), + }, + TimeWindow: &ske.TimeWindow{ + Start: utils.Ptr("0000-01-02T03:04:05+06:00"), + End: utils.Ptr("0010-11-12T13:14:15Z"), + }, + }, + Name: utils.Ptr("name"), + Nodepools: &[]ske.Nodepool{ + { + AvailabilityZones: &[]string{"z1", "z2"}, + Cri: &ske.CRI{ + Name: utils.Ptr("cri"), + }, + Labels: &map[string]string{"k": "v"}, + Machine: &ske.Machine{ + Image: &ske.Image{ + Name: utils.Ptr("os"), + Version: utils.Ptr("os-ver"), + }, + Type: utils.Ptr("B"), + }, + MaxSurge: utils.Ptr(int32(3)), + MaxUnavailable: nil, + Maximum: utils.Ptr(int32(5)), + Minimum: utils.Ptr(int32(1)), + Name: utils.Ptr("node"), + Taints: &[]ske.Taint{ + { + Effect: utils.Ptr("effect"), + Key: utils.Ptr("key"), + Value: utils.Ptr("value"), + }, + }, + Volume: &ske.Volume{ + Size: utils.Ptr(int32(3)), + Type: utils.Ptr("type"), + }, + }, + }, + Status: &ske.ClusterStatus{ + Aggregated: &cs, + Error: nil, + Hibernated: nil, + }, + }, + Cluster{ + Id: types.StringValue("pid,name"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + KubernetesVersion: types.StringValue("1.2"), + KubernetesVersionUsed: types.StringValue("1.2.3"), + AllowPrivilegedContainers: types.BoolValue(true), + + NodePools: []NodePool{ + { + Name: types.StringValue("node"), + MachineType: types.StringValue("B"), + OSName: types.StringValue("os"), + OSVersion: types.StringValue("os-ver"), + Minimum: types.Int64Value(1), + Maximum: types.Int64Value(5), + MaxSurge: types.Int64Value(3), + MaxUnavailable: types.Int64Null(), + VolumeType: types.StringValue("type"), + VolumeSize: types.Int64Value(3), + Labels: types.MapValueMust(types.StringType, map[string]attr.Value{"k": types.StringValue("v")}), + Taints: []Taint{ + { + Effect: types.StringValue("effect"), + Key: types.StringValue("key"), + Value: types.StringValue("value"), + }, + }, + CRI: types.StringValue("cri"), + AvailabilityZones: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("z1"), types.StringValue("z2")}), + }, + }, + Maintenance: types.ObjectValueMust(maintenanceTypes, map[string]attr.Value{ + "enable_kubernetes_version_updates": types.BoolValue(true), + "enable_machine_image_version_updates": types.BoolValue(true), + "start": types.StringValue("03:04:05+06:00"), + "end": types.StringValue("13:14:15Z"), + }), + Hibernations: []Hibernation{ + { + Start: types.StringValue("1"), + End: types.StringValue("2"), + Timezone: types.StringValue("CET"), + }, + }, + Extensions: &Extensions{ + Argus: &ArgusExtension{ + Enabled: types.BoolValue(true), + ArgusInstanceId: types.StringValue("aid"), + }, + ACL: &ACL{ + Enabled: types.BoolValue(true), + AllowedCIDRs: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("cidr1"), + }), + }, + }, + KubeConfig: types.StringNull(), + }, + true, + }, + { + "nil_response", + nil, + Cluster{}, + false, + }, + { + "no_resource_id", + &ske.ClusterResponse{}, + Cluster{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Cluster{ + ProjectId: tt.expected.ProjectId, + } + err := mapFields(context.Background(), tt.input, state) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestLatestMatchingVersion(t *testing.T) { + tests := []struct { + description string + availableVersions []ske.KubernetesVersion + providedVersion *string + expectedVersionUsed *string + expectedHasDeprecatedVersion bool + isValid bool + }{ + { + "available_version", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.20.1"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.20.2"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateSupported), + }, + }, + utils.Ptr("1.20"), + utils.Ptr("1.20.2"), + false, + true, + }, + { + "available_version_no_patch", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateSupported), + }, + }, + utils.Ptr("1.20"), + utils.Ptr("1.20.0"), + false, + true, + }, + { + "deprecated_version", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateDeprecated), + }, + }, + utils.Ptr("1.19"), + utils.Ptr("1.19.0"), + true, + true, + }, + { + "deprecated_version_not_selected", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateDeprecated), + }, + }, + utils.Ptr("1.20"), + utils.Ptr("1.20.0"), + false, + true, + }, + { + "preview_version", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStatePreview), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateSupported), + }, + }, + utils.Ptr("1.20"), + utils.Ptr("1.20.0"), + false, + true, + }, + { + "no_matching_available_versions", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateSupported), + }, + }, + utils.Ptr("1.21"), + nil, + false, + false, + }, + { + "no_available_version", + []ske.KubernetesVersion{}, + utils.Ptr("1.20"), + nil, + false, + false, + }, + { + "nil_available_version", + nil, + utils.Ptr("1.20"), + nil, + false, + false, + }, + { + "empty_provided_version", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateSupported), + }, + }, + utils.Ptr(""), + nil, + false, + false, + }, + { + "nil_provided_version", + []ske.KubernetesVersion{ + { + Version: utils.Ptr("1.20.0"), + State: utils.Ptr(VersionStateSupported), + }, + { + Version: utils.Ptr("1.19.0"), + State: utils.Ptr(VersionStateSupported), + }, + }, + nil, + nil, + false, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + versionUsed, hasDeprecatedVersion, err := latestMatchingVersion(tt.availableVersions, tt.providedVersion) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + if *versionUsed != *tt.expectedVersionUsed { + t.Fatalf("Used version does not match: expecting %s, got %s", *tt.expectedVersionUsed, *versionUsed) + } + if tt.expectedHasDeprecatedVersion != hasDeprecatedVersion { + t.Fatalf("hasDeprecatedVersion flag is wrong: expecting %t, got %t", tt.expectedHasDeprecatedVersion, hasDeprecatedVersion) + } + } + }) + } +} +func TestGetMaintenanceTimes(t *testing.T) { + tests := []struct { + description string + startAPI string + startTF *string + endAPI string + endTF *string + isValid bool + startExpected string + endExpected string + }{ + { + description: "base", + startAPI: "0001-02-03T04:05:06+07:08", + endAPI: "0011-12-13T14:15:16+17:18", + isValid: true, + startExpected: "04:05:06+07:08", + endExpected: "14:15:16+17:18", + }, + { + description: "base_utc", + startAPI: "0001-02-03T04:05:06Z", + endAPI: "0011-12-13T14:15:16Z", + isValid: true, + startExpected: "04:05:06Z", + endExpected: "14:15:16Z", + }, + { + description: "api_wrong_format_1", + startAPI: "T04:05:06+07:08", + endAPI: "0011-12-13T14:15:16+17:18", + isValid: false, + }, + { + description: "api_wrong_format_2", + startAPI: "0001-02-03T04:05:06+07:08", + endAPI: "14:15:16+17:18", + isValid: false, + }, + { + description: "tf_state_filled_in_1", + startAPI: "0001-02-03T04:05:06+07:08", + startTF: utils.Ptr("04:05:06+07:08"), + endAPI: "0011-12-13T14:15:16+17:18", + endTF: utils.Ptr("14:15:16+17:18"), + isValid: true, + startExpected: "04:05:06+07:08", + endExpected: "14:15:16+17:18", + }, + { + description: "tf_state_filled_in_2", + startAPI: "0001-02-03T04:05:06Z", + startTF: utils.Ptr("04:05:06+00:00"), + endAPI: "0011-12-13T14:15:16Z", + endTF: utils.Ptr("14:15:16+00:00"), + isValid: true, + startExpected: "04:05:06+00:00", + endExpected: "14:15:16+00:00", + }, + { + description: "tf_state_filled_in_3", + startAPI: "0001-02-03T04:05:06+00:00", + startTF: utils.Ptr("04:05:06Z"), + endAPI: "0011-12-13T14:15:16+00:00", + endTF: utils.Ptr("14:15:16Z"), + isValid: true, + startExpected: "04:05:06Z", + endExpected: "14:15:16Z", + }, + { + description: "tf_state_doesnt_match_1", + startAPI: "0001-02-03T04:05:06+07:08", + startTF: utils.Ptr("00:00:00+07:08"), + endAPI: "0011-12-13T14:15:16+17:18", + endTF: utils.Ptr("14:15:16+17:18"), + isValid: false, + }, + { + description: "tf_state_doesnt_match_2", + startAPI: "0001-02-03T04:05:06+07:08", + startTF: utils.Ptr("04:05:06+07:08"), + endAPI: "0011-12-13T14:15:16+17:18", + endTF: utils.Ptr("00:00:00+17:18"), + isValid: false, + }, + { + description: "tf_state_doesnt_match_3", + startAPI: "0001-02-03T04:05:06+07:08", + startTF: utils.Ptr("04:05:06Z"), + endAPI: "0011-12-13T14:15:16+17:18", + endTF: utils.Ptr("14:15:16+17:18"), + isValid: false, + }, + { + description: "tf_state_doesnt_match_4", + startAPI: "0001-02-03T04:05:06+07:08", + startTF: utils.Ptr("04:05:06+07:08"), + endAPI: "0011-12-13T14:15:16+17:18", + endTF: utils.Ptr("14:15:16Z"), + isValid: false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + apiResponse := &ske.ClusterResponse{ + Maintenance: &ske.Maintenance{ + TimeWindow: &ske.TimeWindow{ + Start: utils.Ptr(tt.startAPI), + End: utils.Ptr(tt.endAPI), + }, + }, + } + + maintenanceValues := map[string]attr.Value{ + "enable_kubernetes_version_updates": types.BoolNull(), + "enable_machine_image_version_updates": types.BoolNull(), + "start": types.StringPointerValue(tt.startTF), + "end": types.StringPointerValue(tt.endTF), + } + maintenanceObject, diags := types.ObjectValue(maintenanceTypes, maintenanceValues) + if diags.HasError() { + t.Fatalf("failed to create flavor: %v", core.DiagsToError(diags)) + } + tfState := &Cluster{ + Maintenance: maintenanceObject, + } + + start, end, err := getMaintenanceTimes(context.Background(), apiResponse, tfState) + + if err != nil { + if tt.isValid { + t.Errorf("getMaintenanceTimes failed on valid input: %v", err) + } + return + } + if !tt.isValid { + t.Fatalf("getMaintenanceTimes didn't fail on invalid input") + } + if tt.startExpected != start { + t.Errorf("extected start '%s', got '%s'", tt.startExpected, start) + } + if tt.endExpected != end { + t.Errorf("extected end '%s', got '%s'", tt.endExpected, end) + } + }) + } +} + +func TestCheckAllowPrivilegedContainers(t *testing.T) { + tests := []struct { + description string + kubernetesVersion *string + allowPrivilegeContainers *bool + isValid bool + }{ + { + description: "null_version_1", + kubernetesVersion: nil, + allowPrivilegeContainers: nil, + isValid: false, + }, + { + description: "null_version_2", + kubernetesVersion: nil, + allowPrivilegeContainers: utils.Ptr(false), + isValid: false, + }, + { + description: "flag_required_1", + kubernetesVersion: utils.Ptr("0.999.999"), + allowPrivilegeContainers: nil, + isValid: false, + }, + { + description: "flag_required_2", + kubernetesVersion: utils.Ptr("0.999.999"), + allowPrivilegeContainers: utils.Ptr(false), + isValid: true, + }, + { + description: "flag_required_3", + kubernetesVersion: utils.Ptr("1.24.999"), + allowPrivilegeContainers: nil, + isValid: false, + }, + { + description: "flag_required_4", + kubernetesVersion: utils.Ptr("1.24.999"), + allowPrivilegeContainers: utils.Ptr(false), + isValid: true, + }, + { + description: "flag_deprecated_1", + kubernetesVersion: utils.Ptr("1.25"), + allowPrivilegeContainers: nil, + isValid: true, + }, + { + description: "flag_deprecated_2", + kubernetesVersion: utils.Ptr("1.25"), + allowPrivilegeContainers: utils.Ptr(false), + isValid: false, + }, + { + description: "flag_deprecated_3", + kubernetesVersion: utils.Ptr("2.0.0"), + allowPrivilegeContainers: nil, + isValid: true, + }, + { + description: "flag_deprecated_4", + kubernetesVersion: utils.Ptr("2.0.0"), + allowPrivilegeContainers: utils.Ptr(false), + isValid: false, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + diags := checkAllowPrivilegedContainers( + types.BoolPointerValue(tt.allowPrivilegeContainers), + types.StringPointerValue(tt.kubernetesVersion), + ) + + if tt.isValid && diags.HasError() { + t.Errorf("checkAllowPrivilegedContainers failed on valid input: %v", core.DiagsToError(diags)) + } + if !tt.isValid && !diags.HasError() { + t.Errorf("checkAllowPrivilegedContainers didn't fail on valid input") + } + }) + } +} diff --git a/stackit/services/ske/project/datasource.go b/stackit/services/ske/project/datasource.go new file mode 100644 index 00000000..0723d5ce --- /dev/null +++ b/stackit/services/ske/project/datasource.go @@ -0,0 +1,115 @@ +package ske + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/ske" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &projectDataSource{} +) + +// NewProjectDataSource is a helper function to simplify the provider implementation. +func NewProjectDataSource() datasource.DataSource { + return &projectDataSource{} +} + +// projectDataSource is the data source implementation. +type projectDataSource struct { + client *ske.APIClient +} + +// Metadata returns the resource type name. +func (r *projectDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_ske_project" +} + +// Configure adds the provider configured client to the resource. +func (r *projectDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *ske.APIClient + var err error + if providerData.SKECustomEndpoint != "" { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.SKECustomEndpoint), + ) + } else { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "SKE client configured") + r.client = apiClient +} + +// Schema defines the schema for the resource. +func (r *projectDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT Project ID in which the kubernetes project is enabled.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *projectDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var state Model + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := state.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + _, err := r.client.GetProject(ctx, projectId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read project", err.Error()) + return + } + state.Id = types.StringValue(projectId) + state.ProjectId = types.StringValue(projectId) + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "SKE project read") +} diff --git a/stackit/services/ske/project/resource.go b/stackit/services/ske/project/resource.go new file mode 100644 index 00000000..90d975e2 --- /dev/null +++ b/stackit/services/ske/project/resource.go @@ -0,0 +1,210 @@ +package ske + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/ske" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/validate" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &projectResource{} + _ resource.ResourceWithConfigure = &projectResource{} + _ resource.ResourceWithImportState = &projectResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` + ProjectId types.String `tfsdk:"project_id"` +} + +// NewProjectResource is a helper function to simplify the provider implementation. +func NewProjectResource() resource.Resource { + return &projectResource{} +} + +// projectResource is the resource implementation. +type projectResource struct { + client *ske.APIClient +} + +// Metadata returns the resource type name. +func (r *projectResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_ske_project" +} + +// Configure adds the provider configured client to the resource. +func (r *projectResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData)) + return + } + + var apiClient *ske.APIClient + var err error + if providerData.SKECustomEndpoint != "" { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.SKECustomEndpoint), + ) + } else { + apiClient, err = ske.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + resp.Diagnostics.AddError("Could not Configure API Client", err.Error()) + return + } + + tflog.Info(ctx, "SKE project client configured") + r.client = apiClient +} + +// Schema returns the Terraform schema structure +func (r *projectResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Terraform's internal resource ID.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "project_id": schema.StringAttribute{ + Description: "STACKIT Project ID in which the kubernetes project is enabled.", + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *projectResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + resp.Diagnostics.Append(req.Plan.Get(ctx, &model)...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + _, err := r.client.CreateProject(ctx, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("failed during SKE project creation", err.Error()) + return + } + + model.Id = types.StringValue(projectId) + wr, err := ske.CreateProjectWaitHandler(ctx, r.client, projectId).SetTimeout(5 * time.Minute).WaitWithContext(ctx) + if err != nil { + resp.Diagnostics.AddError("Error creating cluster", fmt.Sprintf("Project creation waiting: %v", err)) + return + } + got, ok := wr.(*ske.ProjectResponse) + if !ok { + resp.Diagnostics.AddError("Error creating cluster", fmt.Sprintf("Wait result conversion, got %+v", got)) + return + } + diags := resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SKE project created or updated") +} + +// Read refreshes the Terraform state with the latest data. +func (r *projectResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + // read + _, err := r.client.GetProject(ctx, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("failed during SKE project read", err.Error()) + return + } + model.Id = types.StringValue(projectId) + model.ProjectId = types.StringValue(projectId) + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + tflog.Info(ctx, "SKE project read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *projectResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + resp.Diagnostics.AddError("Error updating ", "project can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *projectResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + resp.Diagnostics.Append(req.State.Get(ctx, &model)...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + c := r.client + _, err := c.DeleteProject(ctx, projectId).Execute() + if err != nil { + resp.Diagnostics.AddError("failed deleting project", err.Error()) + return + } + _, err = ske.DeleteProjectWaitHandler(ctx, r.client, projectId).SetTimeout(10 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintf("Project deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "SKE project deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id +func (r *projectResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { // nolint:gocritic // function signature required by Terraform + idParts := strings.Split(req.ID, core.Separator) + if len(idParts) != 1 || idParts[0] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: [project_id] Got: %q", req.ID), + ) + return + } + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + tflog.Info(ctx, "SKE project state imported") +} diff --git a/stackit/services/ske/ske_acc_test.go b/stackit/services/ske/ske_acc_test.go new file mode 100644 index 00000000..d71e9760 --- /dev/null +++ b/stackit/services/ske/ske_acc_test.go @@ -0,0 +1,541 @@ +package ske_test + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/ske" + "github.com/stackitcloud/terraform-provider-stackit/stackit/testutil" +) + +var projectResource = map[string]string{ + "project_id": testutil.ProjectId, +} + +var clusterResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": fmt.Sprintf("cl-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)), + "name_min": fmt.Sprintf("cl-min-%s", acctest.RandStringFromCharSet(3, acctest.CharSetAlphaNum)), + "kubernetes_version": "1.24", + "kubernetes_version_used": "1.24.16", + "kubernetes_version_new": "1.25", + "kubernetes_version_used_new": "1.25.12", + "allowPrivilegedContainers": "true", + "nodepool_name": "np-acc-test", + "nodepool_name_min": "np-acc-min-test", + "nodepool_machine_type": "b1.2", + "nodepool_os_version": "3510.2.5", + "nodepool_os_version_min": "3510.2.5", + "nodepool_os_name": "flatcar", + "nodepool_minimum": "2", + "nodepool_maximum": "3", + "nodepool_max_surge": "1", + "nodepool_max_unavailable": "1", + "nodepool_volume_size": "20", + "nodepool_volume_type": "storage_premium_perf0", + "nodepool_zone": "eu01-3", + "nodepool_cri": "containerd", + "nodepool_label_key": "key", + "nodepool_label_value": "value", + "nodepool_taints_effect": "PreferNoSchedule", + "nodepool_taints_key": "tkey", + "nodepool_taints_value": "tvalue", + "extensions_acl_enabled": "true", + "extensions_acl_cidrs": "192.168.0.0/24", + "extensions_argus_enabled": "false", + "extensions_argus_instance_id": "aaaaaaaa-1111-2222-3333-444444444444", // A not-existing Argus ID let the creation time-out. + "hibernations_start": "0 16 * * *", + "hibernations_end": "0 18 * * *", + "hibernations_timezone": "Europe/Berlin", + "maintenance_enable_kubernetes_version_updates": "true", + "maintenance_enable_machine_image_version_updates": "true", + "maintenance_start": "01:23:45Z", + "maintenance_end": "05:00:00+02:00", +} + +func getConfig(version string, apc *bool, maintenanceEnd *string) string { + apcConfig := "" + if apc != nil { + if *apc { + apcConfig = "allow_privileged_containers = true" + } else { + apcConfig = "allow_privileged_containers = false" + } + } + maintenanceEndTF := clusterResource["maintenance_end"] + if maintenanceEnd != nil { + maintenanceEndTF = *maintenanceEnd + } + return fmt.Sprintf(` + %s + + resource "stackit_ske_project" "project" { + project_id = "%s" + } + + resource "stackit_ske_cluster" "cluster" { + project_id = stackit_ske_project.project.project_id + name = "%s" + kubernetes_version = "%s" + %s + node_pools = [{ + name = "%s" + machine_type = "%s" + minimum = "%s" + maximum = "%s" + max_surge = "%s" + max_unavailable = "%s" + os_name = "%s" + os_version = "%s" + volume_size = "%s" + volume_type = "%s" + cri = "%s" + availability_zones = ["%s"] + labels = { + %s = "%s" + } + taints = [{ + effect = "%s" + key = "%s" + value = "%s" + }] + }] + extensions = { + acl = { + enabled = %s + allowed_cidrs = ["%s"] + } + argus = { + enabled = %s + argus_instance_id = "%s" + } + } + hibernations = [{ + start = "%s" + end = "%s" + timezone = "%s" + }] + maintenance = { + enable_kubernetes_version_updates = %s + enable_machine_image_version_updates = %s + start = "%s" + end = "%s" + } + } + + resource "stackit_ske_cluster" "cluster_min" { + project_id = stackit_ske_project.project.project_id + name = "%s" + kubernetes_version = "%s" + node_pools = [{ + name = "%s" + machine_type = "%s" + os_version = "%s" + minimum = "%s" + maximum = "%s" + availability_zones = ["%s"] + }] + } + `, + testutil.SKEProviderConfig(), + projectResource["project_id"], + clusterResource["name"], + version, + apcConfig, + clusterResource["nodepool_name"], + clusterResource["nodepool_machine_type"], + clusterResource["nodepool_minimum"], + clusterResource["nodepool_maximum"], + clusterResource["nodepool_max_surge"], + clusterResource["nodepool_max_unavailable"], + clusterResource["nodepool_os_name"], + clusterResource["nodepool_os_version"], + clusterResource["nodepool_volume_size"], + clusterResource["nodepool_volume_type"], + clusterResource["nodepool_cri"], + clusterResource["nodepool_zone"], + clusterResource["nodepool_label_key"], + clusterResource["nodepool_label_value"], + clusterResource["nodepool_taints_effect"], + clusterResource["nodepool_taints_key"], + clusterResource["nodepool_taints_value"], + clusterResource["extensions_acl_enabled"], + clusterResource["extensions_acl_cidrs"], + clusterResource["extensions_argus_enabled"], + clusterResource["extensions_argus_instance_id"], + clusterResource["hibernations_start"], + clusterResource["hibernations_end"], + clusterResource["hibernations_timezone"], + clusterResource["maintenance_enable_kubernetes_version_updates"], + clusterResource["maintenance_enable_machine_image_version_updates"], + clusterResource["maintenance_start"], + maintenanceEndTF, + + // Minimal + clusterResource["name_min"], + clusterResource["kubernetes_version_new"], + clusterResource["nodepool_name_min"], + clusterResource["nodepool_machine_type"], + clusterResource["nodepool_os_version_min"], + clusterResource["nodepool_minimum"], + clusterResource["nodepool_maximum"], + clusterResource["nodepool_zone"], + ) +} + +func TestAccSKE(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckSKEDestroy, + Steps: []resource.TestStep{ + + // 1) Creation + { + Config: getConfig(clusterResource["kubernetes_version"], utils.Ptr(true), nil), + Check: resource.ComposeAggregateTestCheckFunc( + // project data + resource.TestCheckResourceAttr("stackit_ske_project.project", "project_id", projectResource["project_id"]), + // cluster data + resource.TestCheckResourceAttrPair( + "stackit_ske_project.project", "project_id", + "stackit_ske_cluster.cluster", "project_id", + ), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "name", clusterResource["name"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version", clusterResource["kubernetes_version"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version_used", clusterResource["kubernetes_version_used"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "allow_privileged_containers", clusterResource["allowPrivilegedContainers"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.name", clusterResource["nodepool_name"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_name", clusterResource["nodepool_os_name"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_version", clusterResource["nodepool_os_version"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.minimum", clusterResource["nodepool_minimum"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.maximum", clusterResource["nodepool_maximum"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_surge", clusterResource["nodepool_max_surge"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_unavailable", clusterResource["nodepool_max_unavailable"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_type", clusterResource["nodepool_volume_type"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", fmt.Sprintf("node_pools.0.labels.%s", clusterResource["nodepool_label_key"]), clusterResource["nodepool_label_value"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.effect", clusterResource["nodepool_taints_effect"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.key", clusterResource["nodepool_taints_key"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.value", clusterResource["nodepool_taints_value"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.cri", clusterResource["nodepool_cri"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.enabled", clusterResource["extensions_acl_enabled"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.0", clusterResource["extensions_acl_cidrs"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.enabled", clusterResource["extensions_argus_enabled"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.argus_instance_id", clusterResource["extensions_argus_instance_id"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.start", clusterResource["hibernations_start"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.timezone", clusterResource["hibernations_timezone"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_kubernetes_version_updates", clusterResource["maintenance_enable_kubernetes_version_updates"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end"]), + + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster", "kube_config"), + + // Minimal cluster + resource.TestCheckResourceAttrPair( + "stackit_ske_project.project", "project_id", + "stackit_ske_cluster.cluster_min", "project_id", + ), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "name", clusterResource["name_min"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "kubernetes_version", clusterResource["kubernetes_version_new"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "kubernetes_version_used", clusterResource["kubernetes_version_used_new"]), + resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "allow_privileged_containers"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.name", clusterResource["nodepool_name_min"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.os_name"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.os_version", clusterResource["nodepool_os_version_min"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.minimum", clusterResource["nodepool_minimum"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.maximum", clusterResource["nodepool_maximum"]), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.max_surge"), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.max_unavailable"), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.volume_type"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.labels.%", "0"), + resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.taints"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.cri", clusterResource["nodepool_cri"]), + resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "extensions"), + resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "hibernations"), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.enable_kubernetes_version_updates"), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.enable_machine_image_version_updates"), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.start"), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.end"), + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "kube_config"), + ), + }, + // 2) Data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_ske_project" "project" { + project_id = "%s" + depends_on = [stackit_ske_project.project] + } + + data "stackit_ske_cluster" "cluster" { + project_id = "%s" + name = "%s" + depends_on = [stackit_ske_cluster.cluster] + } + + data "stackit_ske_cluster" "cluster_min" { + project_id = "%s" + name = "%s" + depends_on = [stackit_ske_cluster.cluster_min] + } + + `, + getConfig(clusterResource["kubernetes_version"], utils.Ptr(true), nil), + projectResource["project_id"], + clusterResource["project_id"], + clusterResource["name"], + clusterResource["project_id"], + clusterResource["name_min"], + ), + Check: resource.ComposeAggregateTestCheckFunc( + // project data + resource.TestCheckResourceAttr("data.stackit_ske_project.project", "id", projectResource["project_id"]), + + // cluster data + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "id", fmt.Sprintf("%s,%s", + clusterResource["project_id"], + clusterResource["name"], + )), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "name", clusterResource["name"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "kubernetes_version", clusterResource["kubernetes_version"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "kubernetes_version_used", clusterResource["kubernetes_version_used"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "allow_privileged_containers", clusterResource["allowPrivilegedContainers"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.name", clusterResource["nodepool_name"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.availability_zones.#", "1"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.os_name", clusterResource["nodepool_os_name"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.os_version", clusterResource["nodepool_os_version"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.minimum", clusterResource["nodepool_minimum"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.maximum", clusterResource["nodepool_maximum"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.max_surge", clusterResource["nodepool_max_surge"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.max_unavailable", clusterResource["nodepool_max_unavailable"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.volume_type", clusterResource["nodepool_volume_type"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", fmt.Sprintf("node_pools.0.labels.%s", clusterResource["nodepool_label_key"]), clusterResource["nodepool_label_value"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.#", "1"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.0.effect", clusterResource["nodepool_taints_effect"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.0.key", clusterResource["nodepool_taints_key"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.0.value", clusterResource["nodepool_taints_value"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.cri", clusterResource["nodepool_cri"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "extensions.acl.enabled", clusterResource["extensions_acl_enabled"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.#", "1"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.0", clusterResource["extensions_acl_cidrs"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.#", "1"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.start", clusterResource["hibernations_start"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.timezone", clusterResource["hibernations_timezone"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.enable_kubernetes_version_updates", clusterResource["maintenance_enable_kubernetes_version_updates"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end"]), + + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster", "kube_config"), + + // Minimal cluster + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "name", clusterResource["name_min"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "kubernetes_version", clusterResource["kubernetes_version_new"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "kubernetes_version_used", clusterResource["kubernetes_version_used_new"]), + resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "allow_privileged_containers"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.name", clusterResource["nodepool_name_min"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.#", "1"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.os_name"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.os_version", clusterResource["nodepool_os_version_min"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.minimum", clusterResource["nodepool_minimum"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.maximum", clusterResource["nodepool_maximum"]), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.max_surge"), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.max_unavailable"), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.volume_type"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.labels.%", "0"), + resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.taints"), + resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.cri", clusterResource["nodepool_cri"]), + resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "extensions"), + resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "hibernations"), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.enable_kubernetes_version_updates"), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.enable_machine_image_version_updates"), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.start"), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.end"), + resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "kube_config"), + ), + }, + // 3) Import project + { + ResourceName: "stackit_ske_project.project", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + _, ok := s.RootModule().Resources["stackit_ske_project.project"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_ske_project.project") + } + return testutil.ProjectId, nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // 4) Import cluster + { + ResourceName: "stackit_ske_cluster.cluster", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_ske_cluster.cluster"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_ske_cluster.cluster") + } + _, ok = r.Primary.Attributes["project_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute project_id") + } + name, ok := r.Primary.Attributes["name"] + if !ok { + return "", fmt.Errorf("couldn't find attribute name") + } + return fmt.Sprintf("%s,%s", testutil.ProjectId, name), nil + }, + ImportState: true, + ImportStateVerify: true, + // The fields are not provided in the SKE API when disabled, although set actively. + ImportStateVerifyIgnore: []string{"kube_config", "extensions.argus.%", "extensions.argus.argus_instance_id", "extensions.argus.enabled", "extensions.acl.enabled", "extensions.acl.allowed_cidrs", "extensions.acl.allowed_cidrs.#", "extensions.acl.%"}, + }, + // ) Import minimal cluster + { + ResourceName: "stackit_ske_cluster.cluster_min", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_ske_cluster.cluster_min"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_ske_cluster.cluster_min") + } + _, ok = r.Primary.Attributes["project_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute project_id") + } + name, ok := r.Primary.Attributes["name"] + if !ok { + return "", fmt.Errorf("couldn't find attribute name") + } + return fmt.Sprintf("%s,%s", testutil.ProjectId, name), nil + }, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"kube_config"}, + }, + // 6) Update kubernetes version and maximum + { + Config: getConfig("1.25.12", nil, utils.Ptr("03:03:03+00:00")), + Check: resource.ComposeAggregateTestCheckFunc( + // cluster data + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "name", clusterResource["name"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version", "1.25"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version_used", "1.25.12"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.name", clusterResource["nodepool_name"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_name", clusterResource["nodepool_os_name"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_version", clusterResource["nodepool_os_version"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.minimum", clusterResource["nodepool_minimum"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.maximum", clusterResource["nodepool_maximum"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_surge", clusterResource["nodepool_max_surge"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_unavailable", clusterResource["nodepool_max_unavailable"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_type", clusterResource["nodepool_volume_type"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", fmt.Sprintf("node_pools.0.labels.%s", clusterResource["nodepool_label_key"]), clusterResource["nodepool_label_value"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.effect", clusterResource["nodepool_taints_effect"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.key", clusterResource["nodepool_taints_key"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.value", clusterResource["nodepool_taints_value"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.cri", clusterResource["nodepool_cri"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.enabled", clusterResource["extensions_acl_enabled"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.0", clusterResource["extensions_acl_cidrs"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.enabled", clusterResource["extensions_argus_enabled"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.argus_instance_id", clusterResource["extensions_argus_instance_id"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.#", "1"), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.start", clusterResource["hibernations_start"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.timezone", clusterResource["hibernations_timezone"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_kubernetes_version_updates", clusterResource["maintenance_enable_kubernetes_version_updates"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]), + resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", "03:03:03+00:00"), + + resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster", "kube_config"), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckSKEDestroy(s *terraform.State) error { + ctx := context.Background() + var client *ske.APIClient + var err error + if testutil.SKECustomEndpoint == "" { + client, err = ske.NewAPIClient() + } else { + client, err = ske.NewAPIClient( + config.WithEndpoint(testutil.SKECustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + projectsToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_ske_project" { + continue + } + projectsToDestroy = append(projectsToDestroy, rs.Primary.ID) + } + for _, projectId := range projectsToDestroy { + _, err := client.GetProject(ctx, projectId).Execute() + if err != nil { + oapiErr, ok := err.(*ske.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if !ok { + return fmt.Errorf("could not convert error to GenericOpenApiError in acc test destruction, %w", err) + } + if oapiErr.StatusCode() == http.StatusNotFound || oapiErr.StatusCode() == http.StatusForbidden { + // Already gone + continue + } + return fmt.Errorf("getting project: %w", err) + } + + _, err = client.DeleteProjectExecute(ctx, projectId) + if err != nil { + return fmt.Errorf("destroying project %s during CheckDestroy: %w", projectId, err) + } + _, err = ske.DeleteProjectWaitHandler(ctx, client, projectId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying project %s during CheckDestroy: waiting for deletion %w", projectId, err) + } + } + return nil +} diff --git a/stackit/testutil/sdk_credentials_invalid.json b/stackit/testutil/sdk_credentials_invalid.json new file mode 100644 index 00000000..db8f7a0c --- /dev/null +++ b/stackit/testutil/sdk_credentials_invalid.json @@ -0,0 +1 @@ +"not json" \ No newline at end of file diff --git a/stackit/testutil/sdk_credentials_valid.json b/stackit/testutil/sdk_credentials_valid.json new file mode 100644 index 00000000..7c5aa2c0 --- /dev/null +++ b/stackit/testutil/sdk_credentials_valid.json @@ -0,0 +1,3 @@ +{ + "STACKIT_SERVICE_ACCOUNT_TOKEN": "foo_token" +} \ No newline at end of file diff --git a/stackit/testutil/testutil.go b/stackit/testutil/testutil.go new file mode 100644 index 00000000..4033b853 --- /dev/null +++ b/stackit/testutil/testutil.go @@ -0,0 +1,268 @@ +package testutil + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + + "github.com/stackitcloud/terraform-provider-stackit/stackit" +) + +const ( + // Default location of credentials JSON + credentialsFilePath = ".stackit/credentials.json" //nolint:gosec // linter false positive +) + +var ( + // TestAccProtoV6ProviderFactories is used to instantiate a provider during + // acceptance testing. The factory function will be invoked for every Terraform + // CLI command executed to create a provider server to which the CLI can + // reattach. + TestAccProtoV6ProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ + "stackit": providerserver.NewProtocol6WithError(stackit.New("test-version")()), + } + + // ProjectId is the id of project used for tests + ProjectId = os.Getenv("TF_ACC_PROJECT_ID") + // TestProjectParentContainerID is the container id of the organization under which projects are created as part of the resource-manager acceptance tests + TestProjectParentContainerID = os.Getenv("TF_ACC_TEST_PROJECT_PARENT_CONTAINER_ID") + // TestProjectServiceAccountEmail is the e-mail of a service account with admin permissions on the organization under which projects are created as part of the resource-manager acceptance tests + TestProjectServiceAccountEmail = os.Getenv("TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_EMAIL") + + ArgusCustomEndpoint = os.Getenv("TF_ACC_ARGUS_CUSTOM_ENDPOINT") + DnsCustomEndpoint = os.Getenv("TF_ACC_DNS_CUSTOM_ENDPOINT") + LogMeCustomEndpoint = os.Getenv("TF_ACC_LOGME_CUSTOM_ENDPOINT") + MariaDBCustomEndpoint = os.Getenv("TF_ACC_MARIADB_CUSTOM_ENDPOINT") + OpenSearchCustomEndpoint = os.Getenv("TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT") + PostgreSQLCustomEndpoint = os.Getenv("TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT") + PostgresFlexCustomEndpoint = os.Getenv("TF_ACC_POSTGRESFLEX_CUSTOM_ENDPOINT") + RabbitMQCustomEndpoint = os.Getenv("TF_ACC_RABBITMQ_CUSTOM_ENDPOINT") + RedisCustomEndpoint = os.Getenv("TF_ACC_REDIS_CUSTOM_ENDPOINT") + ResourceManagerCustomEndpoint = os.Getenv("TF_ACC_RESOURCEMANAGER_CUSTOM_ENDPOINT") + SKECustomEndpoint = os.Getenv("TF_ACC_SKE_CUSTOM_ENDPOINT") +) + +// Provider config helper functions + +func ArgusProviderConfig() string { + if ArgusCustomEndpoint == "" { + return `provider "stackit" {}` + } + return fmt.Sprintf(` + provider "stackit" { + argus_custom_endpoint = "%s" + }`, + ArgusCustomEndpoint, + ) +} + +func DnsProviderConfig() string { + if DnsCustomEndpoint == "" { + return `provider "stackit" {}` + } + return fmt.Sprintf(` + provider "stackit" { + dns_custom_endpoint = "%s" + }`, + DnsCustomEndpoint, + ) +} + +func LogMeProviderConfig() string { + if LogMeCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + logme_custom_endpoint = "%s" + }`, + LogMeCustomEndpoint, + ) +} + +func MariaDBProviderConfig() string { + if MariaDBCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + mariadb_custom_endpoint = "%s" + }`, + MariaDBCustomEndpoint, + ) +} + +func OpenSearchProviderConfig() string { + if OpenSearchCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + opensearch_custom_endpoint = "%s" + }`, + OpenSearchCustomEndpoint, + ) +} + +func PostgreSQLProviderConfig() string { + if PostgreSQLCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + postgresql_custom_endpoint = "%s" + }`, + PostgreSQLCustomEndpoint, + ) +} + +func PostgresFlexProviderConfig() string { + if PostgresFlexCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + postgresflex_custom_endpoint = "%s" + }`, + PostgresFlexCustomEndpoint, + ) +} + +func RabbitMQProviderConfig() string { + if RabbitMQCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + rabbitmq_custom_endpoint = "%s" + }`, + RabbitMQCustomEndpoint, + ) +} + +func RedisProviderConfig() string { + if RedisCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + redis_custom_endpoint = "%s" + }`, + RedisCustomEndpoint, + ) +} + +func ResourceManagerProviderConfig() string { + token := getTestProjectServiceAccountToken("") + if ResourceManagerCustomEndpoint == "" { + return fmt.Sprintf(` + provider "stackit" { + service_account_email = "%s" + service_account_token = "%s" + }`, + TestProjectServiceAccountEmail, + token, + ) + } + return fmt.Sprintf(` + provider "stackit" { + resourcemanager_custom_endpoint = "%s" + service_account_email = "%s" + service_account_token = "%s" + }`, + ResourceManagerCustomEndpoint, + TestProjectServiceAccountEmail, + token, + ) +} + +func SKEProviderConfig() string { + if SKECustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + ske_custom_endpoint = "%s" + }`, + SKECustomEndpoint, + ) +} + +func ResourceNameWithDateTime(name string) string { + dateTime := time.Now().Format(time.RFC3339) + // Remove timezone to have a smaller datetime + dateTimeTrimmed, _, _ := strings.Cut(dateTime, "+") + return fmt.Sprintf("tf-acc-%s-%s", name, dateTimeTrimmed) +} + +func getTestProjectServiceAccountToken(path string) string { + var err error + token, tokenSet := os.LookupEnv("TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN") + if !tokenSet || token == "" { + token, err = readTestTokenFromCredentialsFile(path) + if err != nil { + return "" + } + } + return token +} + +func readTestTokenFromCredentialsFile(path string) (string, error) { + if path == "" { + customPath, customPathSet := os.LookupEnv("STACKIT_CREDENTIALS_PATH") + if !customPathSet || customPath == "" { + path = credentialsFilePath + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("getting home directory: %w", err) + } + path = filepath.Join(home, path) + } else { + path = customPath + } + } + + credentialsRaw, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("opening file: %w", err) + } + + var credentials struct { + TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN string `json:"TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN"` + } + err = json.Unmarshal(credentialsRaw, &credentials) + if err != nil { + return "", fmt.Errorf("unmarshalling credentials: %w", err) + } + return credentials.TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN, nil +} diff --git a/stackit/validate/validate.go b/stackit/validate/validate.go new file mode 100644 index 00000000..7aab712a --- /dev/null +++ b/stackit/validate/validate.go @@ -0,0 +1,85 @@ +package validate + +import ( + "context" + "fmt" + "net" + "regexp" + "strings" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/stackitcloud/terraform-provider-stackit/stackit/core" +) + +type Validator struct { + description string + markdownDescription string + validate ValidationFn +} + +type ValidationFn func(context.Context, validator.StringRequest, *validator.StringResponse) + +var _ = validator.String(&Validator{}) + +func (v *Validator) Description(_ context.Context) string { + return v.description +} + +func (v *Validator) MarkdownDescription(_ context.Context) string { + return v.markdownDescription +} + +func (v *Validator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { // nolint:gocritic // function signature required by Terraform + if req.ConfigValue.IsUnknown() || req.ConfigValue.IsNull() { + return + } + v.validate(ctx, req, resp) +} + +func UUID() *Validator { + return &Validator{ + description: "validate string is UUID", + validate: func(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + if _, err := uuid.Parse(req.ConfigValue.ValueString()); err != nil { + resp.Diagnostics.AddError("not a valid UUID", err.Error()) + } + }, + } +} + +func IP() *Validator { + return &Validator{ + description: "validate string is IP address", + validate: func(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + if net.ParseIP(req.ConfigValue.ValueString()) == nil { + resp.Diagnostics.AddError("not a valid IP address", "") + } + }, + } +} + +func NoSeparator() *Validator { + return &Validator{ + description: "validate string does not contain internal separator", + validate: func(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + if strings.Contains(req.ConfigValue.ValueString(), core.Separator) { + resp.Diagnostics.AddError("Invalid character found.", fmt.Sprintf("The string should not contain a '%s'", core.Separator)) + } + }, + } +} + +func SemanticMinorVersion() *Validator { + return &Validator{ + description: "validate string does not contain internal separator", + validate: func(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + exp := `^\d+\.\d+?$` + r := regexp.MustCompile(exp) + version := req.ConfigValue.ValueString() + if !r.MatchString(version) { + resp.Diagnostics.AddError("Invalid version.", "The version should be a valid semantic version only containing major and minor version. The version should not contain a leading `v`. Got "+version) + } + }, + } +} diff --git a/stackit/validate/validate_test.go b/stackit/validate/validate_test.go new file mode 100644 index 00000000..39bdfaf9 --- /dev/null +++ b/stackit/validate/validate_test.go @@ -0,0 +1,210 @@ +package validate + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func TestUUID(t *testing.T) { + tests := []struct { + description string + input string + isValid bool + }{ + { + "ok", + "cae27bba-c43d-498a-861e-d11d241c4ff8", + true, + }, + { + "too short", + "a-b-c-d", + false, + }, + { + "Empty", + "", + false, + }, + { + "not UUID", + "www-541-%", + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + r := validator.StringResponse{} + UUID().ValidateString(context.Background(), validator.StringRequest{ + ConfigValue: types.StringValue(tt.input), + }, &r) + + if !tt.isValid && !r.Diagnostics.HasError() { + t.Fatalf("Should have failed") + } + if tt.isValid && r.Diagnostics.HasError() { + t.Fatalf("Should not have failed: %v", r.Diagnostics.Errors()) + } + }) + } +} + +func TestIP(t *testing.T) { + tests := []struct { + description string + input string + isValid bool + }{ + { + "ok IP4", + "111.222.111.222", + true, + }, + { + "ok IP6", + "2001:0db8:85a3:08d3::0370:7344", + true, + }, + { + "too short", + "0.1.2", + false, + }, + { + "Empty", + "", + false, + }, + { + "Not an IP", + "for-sure-not-an-IP", + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + r := validator.StringResponse{} + IP().ValidateString(context.Background(), validator.StringRequest{ + ConfigValue: types.StringValue(tt.input), + }, &r) + + if !tt.isValid && !r.Diagnostics.HasError() { + t.Fatalf("Should have failed") + } + if tt.isValid && r.Diagnostics.HasError() { + t.Fatalf("Should not have failed: %v", r.Diagnostics.Errors()) + } + }) + } +} + +func TestNoSeparator(t *testing.T) { + tests := []struct { + description string + input string + isValid bool + }{ + { + "ok", + "ABCD", + true, + }, + { + "ok-2", + "#$%&/()=.;-", + true, + }, + { + "Empty", + "", + true, + }, + { + "not ok", + "ab,", + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + r := validator.StringResponse{} + NoSeparator().ValidateString(context.Background(), validator.StringRequest{ + ConfigValue: types.StringValue(tt.input), + }, &r) + + if !tt.isValid && !r.Diagnostics.HasError() { + t.Fatalf("Should have failed") + } + if tt.isValid && r.Diagnostics.HasError() { + t.Fatalf("Should not have failed: %v", r.Diagnostics.Errors()) + } + }) + } +} + +func TestSemanticMinorVersion(t *testing.T) { + tests := []struct { + description string + input string + isValid bool + }{ + { + "ok", + "1.20", + true, + }, + { + "ok-2", + "1.3", + true, + }, + { + "ok-3", + "10.1", + true, + }, + { + "Empty", + "", + false, + }, + { + "not ok", + "afssfdfs", + false, + }, + { + "not ok-major-version", + "1", + false, + }, + { + "not ok-patch-version", + "1.20.1", + false, + }, + { + "not ok-version", + "v1.20.1", + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + r := validator.StringResponse{} + SemanticMinorVersion().ValidateString(context.Background(), validator.StringRequest{ + ConfigValue: types.StringValue(tt.input), + }, &r) + + if !tt.isValid && !r.Diagnostics.HasError() { + t.Fatalf("Should have failed") + } + if tt.isValid && r.Diagnostics.HasError() { + t.Fatalf("Should not have failed: %v", r.Diagnostics.Errors()) + } + }) + } +} diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown new file mode 100644 index 00000000..f42b57b2 --- /dev/null +++ b/website/docs/index.html.markdown @@ -0,0 +1,29 @@ +The STACKIT provider is the official Terraform provider to integrate all the resources developed by STACKIT. + +## Authentication + +Before you can start using the client, you will need to create a STACKIT Service Account in your project and assign it the appropriate permissions (i.e. `project.owner`). + +After the service account has been created, you can authenticate to the client using the Token flow. + +### Token flow +There are multiple ways to provide the token to the Terraform provider: +- Pass it to the provider directly: +``` +provider "stackit" { + service_account_token = "[TOKEN]" +} +``` + +- Set it in an environment variable: +```bash +export STACKIT_SERVICE_ACCOUNT_TOKEN="[TOKEN]" +``` + +- Create a file `~/.stackit/credentials.json` with the content: +```json +{ + "STACKIT_SERVICE_ACCOUNT_TOKEN": "[TOKEN]" +} +``` +> To read from another location, either pass the file path to the provider using the variable `credentials_path`, or set the environment variable `STACKIT_CREDENTIALS_PATH` as the file path. \ No newline at end of file