Compare commits

..

No commits in common. "alpha" and "v0.2.0-alpha" have entirely different histories.

40 changed files with 146 additions and 978 deletions

View file

@ -94,11 +94,6 @@ runs:
fi fi
echo "::endgroup::" echo "::endgroup::"
# Install latest version of Terraform
- uses: hashicorp/setup-terraform@v4
with:
terraform_wrapper: false
- name: Setup JAVA - name: Setup JAVA
uses: actions/setup-java@v5 uses: actions/setup-java@v5
with: with:

View file

@ -1 +0,0 @@
# acceptance test action

View file

@ -1,168 +0,0 @@
name: CleanUp Project Resources
description: "Acceptance Testing CleanUp"
inputs:
project_id:
description: "STACKIT project ID for tests"
required: true
region:
description: "STACKIT region for tests"
default: 'eu01'
required: true
tf_resource_prefix:
description: "prefix in resource names"
default: 'tf-acc-'
required: true
service_account_json_content:
description: "STACKIT service account JSON file contents"
required: true
default: ''
service_account_json_content_b64:
description: "STACKIT service account JSON file contents"
required: true
default: ''
list_only:
description: "only list resources, DO NOT delete"
required: true
default: 'true'
log_level:
description: "Log Level"
required: true
default: 'warning'
outputs:
cli-version:
description: "stackit cli version"
value: ${{ steps.stackit_version.outputs.version }}
runs:
using: "composite"
steps:
- name: Install needed tools
shell: bash
run: |
echo "::group::apt install"
set -e
apt-get -y -qq update >apt_update.log 2>apt_update_err.log
if [ $? -ne 0 ]; then
cat apt_update.log apt_update_err.log
fi
apt-get -y -qq install curl gnupg jq >apt_get.log 2>apt_get_err.log
if [ $? -ne 0 ]; then
cat apt_get.log apt_get_err.log
fi
echo "::endgroup::"
echo "::group::apt add source"
curl https://packages.stackit.cloud/keys/key.gpg | gpg --dearmor -o /usr/share/keyrings/stackit.gpg
echo "deb [signed-by=/usr/share/keyrings/stackit.gpg] https://packages.stackit.cloud/apt/cli stackit main" | tee -a /etc/apt/sources.list.d/stackit.list
echo "::endgroup::"
echo "::group::apt install stackit cli"
apt-get -y -qq update >apt_update.log 2>apt_update_err.log
if [ $? -ne 0 ]; then
cat apt_update.log apt_update_err.log
fi
apt-get -y -qq install stackit >apt_get.log 2>apt_get_err.log
if [ $? -ne 0 ]; then
cat apt_get.log apt_get_err.log
fi
echo "::endgroup::"
- name: Check stackit cli version
id: stackit_version
run: |
set -e
VERSION=$(stackit --version | grep "Version:" | cut -d " " -f 2)
echo "stackit cli version: ${VERSION}"
echo "version=${VERSION}" >> $GITHUB_OUTPUT
shell: bash
- name: Creating service_account file from json input
if: inputs.service_account_json_content != ''
shell: bash
run: |
echo "::group::create service account file"
set -e
set -o pipefail
echo "${{ inputs.service_account_json_content }}" > .svc_acc.json
echo "::endgroup::"
- name: Creating service_account file from base64 json input
if: inputs.service_account_json_content_b64 != ''
shell: bash
run: |
echo "::group::create service account file"
set -e
set -o pipefail
echo "${{ inputs.service_account_json_content_b64 }}" | base64 -d > .svc_acc.json
echo "::endgroup::"
- name: Check service account file exists
shell: bash
run: |
set -e
if [[ ! -s .svc_acc.json ]]; then
echo "ERROR: service account file missing or empty"
exit 1
fi
- name: Retrieve resources
run: |
echo "::group::retrieve resources"
set -e
echo "authenticating api"
STACKIT_SERVICE_ACCOUNT_KEY_PATH="${PWD}/.svc_acc.json"
export STACKIT_SERVICE_ACCOUNT_KEY_PATH
stackit auth activate-service-account --service-account-key-path .svc_acc.json
echo "SQL Server Flex resources:"
stackit --verbosity ${{ inputs.log_level }} --project-id "${{ inputs.project_id }}" beta sqlserverflex instance list --output-format json | jq -r '.[] | select(.name | startswith("${{ inputs.tf_resource_prefix }}"))'
echo "PostgreSQL Flex resources:"
stackit --verbosity ${{ inputs.log_level }} --project-id "${{ inputs.project_id }}" postgresflex instance list --output-format json | jq -r '.[] | select(.name | startswith("${{ inputs.tf_resource_prefix }}"))'
echo "::endgroup::"
shell: bash
- name: Delete SQL Server Flex resources
if: ${{ inputs.list_only != 'true' }}
run: |
echo "::group::delete SQL Server Flex resources"
set -e
stackit --verbosity ${{ inputs.log_level }} auth activate-service-account --service-account-key-path .svc_acc.json
for s in $(stackit --verbosity ${{ inputs.log_level }} --project-id ${{ inputs.project_id }} beta sqlserverflex instance list --output-format json | jq -r '.[] | select(.name | startswith("${{ inputs.tf_resource_prefix }}")) | .id'); do stackit --verbosity ${{ inputs.log_level }} -y --project-id ${{ inputs.project_id }} beta sqlserverflex instance delete $s; done
echo "::endgroup::"
shell: bash
- name: Skip Delete SQL Server Flex resources
if: ${{ inputs.list_only == 'true' }}
run: |
set -e
echo "Skip deleting: list only mode"
shell: bash
- name: Delete PostgreSQL Flex resources
if: ${{ inputs.list_only != 'true' }}
run: |
echo "::group::delete PostgreSQL Flex resources"
set -e
stackit auth activate-service-account --service-account-key-path .svc_acc.json
for s in $(stackit --verbosity ${{ inputs.log_level }} --project-id ${{ inputs.project_id }} postgresflex instance list --output-format json | jq -r '.[] | select(.name | startswith("${{ inputs.tf_resource_prefix }}")) | .id'); do stackit --verbosity ${{ inputs.log_level }} -y --project-id ${{ inputs.project_id }} postgresflex instance delete $s; done
echo "::endgroup::"
shell: bash
- name: Skip Delete PostgreSQL Flex resources
if: ${{ inputs.list_only == 'true' }}
run: |
set -e
echo "Skip deleting: list only mode"
shell: bash

View file

@ -28,7 +28,7 @@ jobs:
config: config:
if: ${{ github.event_name != 'schedule' }} if: ${{ github.event_name != 'schedule' }}
name: Check GoReleaser config name: Check GoReleaser config
runs-on: stackit-docker runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v6
@ -40,7 +40,7 @@ jobs:
prepare: prepare:
name: Prepare GO cache name: Prepare GO cache
runs-on: stackit-docker runs-on: ubuntu-latest
permissions: permissions:
actions: read # Required to identify workflow run. actions: read # Required to identify workflow run.
checks: write # Required to add status summary. checks: write # Required to add status summary.
@ -102,7 +102,7 @@ jobs:
needs: needs:
- config - config
- prepare - prepare
runs-on: stackit-docker runs-on: ubuntu-latest
permissions: permissions:
actions: read # Required to identify workflow run. actions: read # Required to identify workflow run.
checks: write # Required to add status summary. checks: write # Required to add status summary.
@ -185,7 +185,7 @@ jobs:
testing: testing:
name: CI run tests name: CI run tests
runs-on: stackit-docker runs-on: ubuntu-latest
needs: needs:
- config - config
- prepare - prepare
@ -278,7 +278,7 @@ jobs:
main: main:
if: ${{ github.event_name != 'schedule' }} if: ${{ github.event_name != 'schedule' }}
name: CI run build and linting name: CI run build and linting
runs-on: stackit-docker runs-on: ubuntu-latest
needs: needs:
- config - config
- prepare - prepare
@ -329,7 +329,7 @@ jobs:
code_coverage: code_coverage:
name: "Code coverage report" name: "Code coverage report"
if: github.event_name == 'pull_request' # Do not run when workflow is triggered by push to main branch if: github.event_name == 'pull_request' # Do not run when workflow is triggered by push to main branch
runs-on: stackit-docker runs-on: ubuntu-latest
needs: needs:
- main - main
- prepare - prepare

View file

@ -1,45 +0,0 @@
name: TF Acceptance Test CleanUp
on:
workflow_dispatch:
inputs:
list_only:
description: "only list resources"
type: boolean
default: true
required: true
res_prefix:
description: "resource name prefix"
type: string
default: 'tf-acc-'
required: true
log_level:
description: 'Log Level'
required: true
default: 'warning'
type: choice
options:
- info
- warning
- debug
- error
jobs:
clean:
name: Clean up
runs-on: stackit-docker
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Clean
uses: ./.github/actions/clean_up
with:
project_id: ${{ vars.TF_ACC_PROJECT_ID }}
region: 'eu01'
tf_resource_prefix: ${{ inputs.res_prefix }}
service_account_json_content_b64: "${{ secrets.TF_ACC_SERVICE_ACCOUNT_JSON_B64 }}"
list_only: ${{ inputs.list_only }}
log_level: ${{ inputs.log_level }}

View file

@ -4,10 +4,9 @@ run-name: Publish by @${{ github.actor }}
on: on:
workflow_dispatch: workflow_dispatch:
push: push:
tags: tags:
- 'v*' - 'v0.*'
env: env:
GO_VERSION: "1.25" GO_VERSION: "1.25"
@ -17,6 +16,7 @@ env:
jobs: jobs:
config: config:
name: Check GoReleaser config name: Check GoReleaser config
if: github.event_name == 'push' && contains(github.ref, 'refs/tags/')
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
@ -29,12 +29,13 @@ jobs:
publish: publish:
name: "Publish provider" name: "Publish provider"
if: github.event_name == 'push' && contains(github.ref, 'refs/tags/')
needs: config needs: config
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
actions: read # Required to identify workflow run. actions: read # Required to identify workflow run.
checks: write # Required to add status summary. checks: write # Required to add status summary.
contents: write # Required to checkout repository. contents: read # Required to checkout repository.
pull-requests: write # Required to add PR comment. pull-requests: write # Required to add PR comment.
steps: steps:
- name: Install needed tools - name: Install needed tools
@ -44,8 +45,6 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v6
with:
fetch-tags: true
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v6 uses: actions/setup-go@v6
@ -83,7 +82,7 @@ jobs:
gpg --import ~/private.key.pem gpg --import ~/private.key.pem
rm ~/private.key.pem rm ~/private.key.pem
- name: Run GoReleaser - name: Run GoReleaser with SNAPSHOT
if: github.event_name == 'workflow_dispatch' if: github.event_name == 'workflow_dispatch'
id: goreleaser id: goreleaser
env: env:
@ -91,8 +90,7 @@ jobs:
GPG_FINGERPRINT: ${{ secrets.GPG_FINGERPRINT }} GPG_FINGERPRINT: ${{ secrets.GPG_FINGERPRINT }}
uses: goreleaser/goreleaser-action@v7 uses: goreleaser/goreleaser-action@v7
with: with:
# args: release --skip publish --clean --snapshot args: release --skip publish --clean --snapshot
args: release --skip publish --clean
- name: Run GoReleaser - name: Run GoReleaser
if: github.event_name != 'workflow_dispatch' if: github.event_name != 'workflow_dispatch'
@ -108,15 +106,9 @@ jobs:
run: | run: |
echo "${{ secrets.PUBLIC_KEY_PEM }}" >public_key.pem echo "${{ secrets.PUBLIC_KEY_PEM }}" >public_key.pem
- name: Determine version
id: get_version
run: |
set -e
VERSION=$(jq -r .version < dist/metadata.json)
echo "version=${VERSION}" >> "$GITHUB_OUTPUT"
- name: Prepare provider directory structure - name: Prepare provider directory structure
run: | run: |
VERSION=$(jq -r .version < dist/metadata.json)
go run generator/main.go \ go run generator/main.go \
publish \ publish \
--namespace=mhenselin \ --namespace=mhenselin \
@ -125,7 +117,7 @@ jobs:
--domain=tfregistry.sysops.stackit.rocks \ --domain=tfregistry.sysops.stackit.rocks \
--gpgFingerprint="${{ secrets.GPG_FINGERPRINT }}" \ --gpgFingerprint="${{ secrets.GPG_FINGERPRINT }}" \
--gpgPubKeyFile=public_key.pem \ --gpgPubKeyFile=public_key.pem \
--version=${{ steps.get_version.outputs.version }} --version=${VERSION}
- name: Prepare documentation nav file - name: Prepare documentation nav file
run: | run: |
@ -150,7 +142,6 @@ jobs:
run: | run: |
set -e set -e
ssh -o StrictHostKeyChecking=no ubuntu@${{ vars.DOCS_SERVER_IP }} 'rm -rf /srv/www/docs' ssh -o StrictHostKeyChecking=no ubuntu@${{ vars.DOCS_SERVER_IP }} 'rm -rf /srv/www/docs'
echo "${{ steps.get_version.outputs.version }}" >docs/_version.txt echo "${{ github.ref_name }}" >docs/_version.txt
# echo "${{ github.ref_name }}" >docs/_version.txt
scp -o StrictHostKeyChecking=no -r docs ubuntu@${{ vars.DOCS_SERVER_IP }}:/srv/www/ scp -o StrictHostKeyChecking=no -r docs ubuntu@${{ vars.DOCS_SERVER_IP }}:/srv/www/
scp -o StrictHostKeyChecking=no nav.md ubuntu@${{ vars.DOCS_SERVER_IP }}:/srv/www/ scp -o StrictHostKeyChecking=no nav.md ubuntu@${{ vars.DOCS_SERVER_IP }}:/srv/www/

View file

@ -16,14 +16,14 @@ permissions:
jobs: jobs:
goreleaser: goreleaser:
runs-on: stackit-docker runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v6
with: with:
# Allow goreleaser to access older tag information. # Allow goreleaser to access older tag information.
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-go@v6 - uses: https://code.forgejo.org/actions/setup-go@v6
with: with:
go-version-file: "go.mod" go-version-file: "go.mod"
cache: true cache: true

View file

@ -8,13 +8,13 @@ on:
jobs: jobs:
renovate: renovate:
name: Renovate name: Renovate
runs-on: stackit-docker runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v6
- name: Self-hosted Renovate - name: Self-hosted Renovate
uses: renovatebot/github-action@v46.1.5 uses: renovatebot/github-action@v46.1.4
with: with:
configurationFile: .github/renovate.json configurationFile: .github/renovate.json
# token: ${{ secrets.RENOVATE_TOKEN }} # token: ${{ secrets.RENOVATE_TOKEN }}

View file

@ -20,7 +20,7 @@ permissions:
jobs: jobs:
stale: stale:
name: "Stale" name: "Stale"
runs-on: stackit-docker runs-on: ubuntu-latest
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- name: "Mark old PRs as stale" - name: "Mark old PRs as stale"

View file

@ -13,19 +13,17 @@ on:
inputs: inputs:
enable_debug: enable_debug:
description: "enable terraform debug logs" description: "enable terraform debug logs"
type: boolean default: 'false'
default: false
required: true required: true
test_timeout_string: test_timeout_string:
description: "string that determines the timeout (default: 45m)" description: "string that determines the timeout (default: 45m)"
type: string
default: '90m' default: '90m'
required: true required: true
jobs: jobs:
acc_test: acc_test:
name: Acceptance Tests name: Acceptance Tests
runs-on: stackit-docker runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v6
@ -59,3 +57,4 @@ jobs:
tf_acc_kek_key_ring_id: ${{ vars.TF_ACC_KEK_KEY_RING_ID }} tf_acc_kek_key_ring_id: ${{ vars.TF_ACC_KEK_KEY_RING_ID }}
tf_acc_kek_key_version: ${{ vars.TF_ACC_KEK_KEY_VERSION }} tf_acc_kek_key_version: ${{ vars.TF_ACC_KEK_KEY_VERSION }}
tf_acc_kek_service_account: ${{ vars.TF_ACC_KEK_SERVICE_ACCOUNT }} tf_acc_kek_service_account: ${{ vars.TF_ACC_KEK_SERVICE_ACCOUNT }}
tf_debug: ${{ inputs.enable_debug }}

116
README.md
View file

@ -19,7 +19,7 @@ terraform {
required_providers { required_providers {
stackitprivatepreview = { stackitprivatepreview = {
source = "tfregistry.sysops.stackit.rocks/mhenselin/stackitprivatepreview" source = "tfregistry.sysops.stackit.rocks/mhenselin/stackitprivatepreview"
version = ">= 0.1.0" version = "= 0.0.5-alpha"
} }
} }
} }
@ -38,6 +38,7 @@ Check one of the examples in the [examples](examples/) folder.
To authenticate, you will need a [service account](https://docs.stackit.cloud/platform/access-and-identity/service-accounts/). Create it in the [STACKIT Portal](https://portal.stackit.cloud/) and assign the necessary permissions to it, e.g. `project.owner`. There are multiple ways to authenticate: To authenticate, you will need a [service account](https://docs.stackit.cloud/platform/access-and-identity/service-accounts/). Create it in the [STACKIT Portal](https://portal.stackit.cloud/) and assign the necessary permissions to it, e.g. `project.owner`. There are multiple ways to authenticate:
- Key flow (recommended) - Key flow (recommended)
- Token flow (is scheduled for deprecation and will be removed on December 17, 2025.)
When setting up authentication, the provider will always try to use the key flow first and search for credentials in several locations, following a specific order: When setting up authentication, the provider will always try to use the key flow first and search for credentials in several locations, following a specific order:
@ -51,6 +52,7 @@ When setting up authentication, the provider will always try to use the key flow
```json ```json
{ {
"STACKIT_SERVICE_ACCOUNT_TOKEN": "foo_token",
"STACKIT_SERVICE_ACCOUNT_KEY_PATH": "path/to/sa_key.json" "STACKIT_SERVICE_ACCOUNT_KEY_PATH": "path/to/sa_key.json"
} }
``` ```
@ -69,41 +71,35 @@ To configure the key flow, follow this steps:
1. Create a service account key: 1. Create a service account key:
- Use the [STACKIT Portal](https://portal.stackit.cloud/): go to the `Service Accounts` tab, choose a `Service Account` and go to `Service Account Keys` to create a key. For more details, see [Create a service account key](https://docs.stackit.cloud/platform/access-and-identity/service-accounts/how-tos/manage-service-account-keys/) - Use the [STACKIT Portal](https://portal.stackit.cloud/): go to the `Service Accounts` tab, choose a `Service Account` and go to `Service Account Keys` to create a key. For more details, see [Create a service account key](https://docs.stackit.cloud/platform/access-and-identity/service-accounts/how-tos/manage-service-account-keys/)
2. Save the content of the service account key by copying it and saving it in a JSON file. 2. Save the content of the service account key by copying it and saving it in a JSON file.
The expected format of the service account key is a **JSON** with the following structure: The expected format of the service account key is a **JSON** with the following structure:
```json ```json
{ {
"id": "uuid", "id": "uuid",
"publicKey": "public key", "publicKey": "public key",
"createdAt": "2023-08-24T14:15:22Z", "createdAt": "2023-08-24T14:15:22Z",
"validUntil": "2023-08-24T14:15:22Z", "validUntil": "2023-08-24T14:15:22Z",
"keyType": "USER_MANAGED", "keyType": "USER_MANAGED",
"keyOrigin": "USER_PROVIDED", "keyOrigin": "USER_PROVIDED",
"keyAlgorithm": "RSA_2048", "keyAlgorithm": "RSA_2048",
"active": true, "active": true,
"credentials": { "credentials": {
"kid": "string", "kid": "string",
"iss": "my-sa@sa.stackit.cloud", "iss": "my-sa@sa.stackit.cloud",
"sub": "uuid", "sub": "uuid",
"aud": "string", "aud": "string",
(optional) "privateKey": "private key when generated by the SA service" (optional) "privateKey": "private key when generated by the SA service"
} }
} }
``` ```
3. Configure the service account key for authentication in the provider by following one of the alternatives below: 3. Configure the service account key for authentication in the provider by following one of the alternatives below:
- setting the fields in the provider block: `service_account_key` or `service_account_key_path` - setting the fields in the provider block: `service_account_key` or `service_account_key_path`
```hcl
provider "stackitprivatepreview" {
default_region = "eu01"
service_account_key_path = "../service_account.json"
}
```
- setting the environment variable: `STACKIT_SERVICE_ACCOUNT_KEY_PATH` or `STACKIT_SERVICE_ACCOUNT_KEY` - setting the environment variable: `STACKIT_SERVICE_ACCOUNT_KEY_PATH` or `STACKIT_SERVICE_ACCOUNT_KEY`
- ensure the set the service account key in `STACKIT_SERVICE_ACCOUNT_KEY` is correctly formatted. Use e.g. - ensure the set the service account key in `STACKIT_SERVICE_ACCOUNT_KEY` is correctly formatted. Use e.g.
`$ export STACKIT_SERVICE_ACCOUNT_KEY=$(cat ./service-account-key.json)` `$ export STACKIT_SERVICE_ACCOUNT_KEY=$(cat ./service-account-key.json)`
@ -115,6 +111,16 @@ To configure the key flow, follow this steps:
> - setting the environment variable: `STACKIT_PRIVATE_KEY_PATH` or `STACKIT_PRIVATE_KEY` > - setting the environment variable: `STACKIT_PRIVATE_KEY_PATH` or `STACKIT_PRIVATE_KEY`
> - setting `STACKIT_PRIVATE_KEY_PATH` in the credentials file (see above) > - setting `STACKIT_PRIVATE_KEY_PATH` in the credentials file (see above)
### Token flow
> Is scheduled for deprecation and will be removed on December 17, 2025.
Using this flow is less secure since the token is long-lived. You can provide the token in several ways:
1. Setting the field `service_account_token` in the provider
2. Setting the environment variable `STACKIT_SERVICE_ACCOUNT_TOKEN`
3. Setting it in the credentials file (see above)
## Backend configuration ## Backend configuration
To keep track of your terraform state, you can configure an [S3 backend](https://developer.hashicorp.com/terraform/language/settings/backends/s3) using [STACKIT Object Storage](https://docs.stackit.cloud/products/storage/object-storage). To keep track of your terraform state, you can configure an [S3 backend](https://developer.hashicorp.com/terraform/language/settings/backends/s3) using [STACKIT Object Storage](https://docs.stackit.cloud/products/storage/object-storage).
@ -144,6 +150,62 @@ terraform {
Note: AWS specific checks must be skipped as they do not work on STACKIT. For details on what those validations do, see [here](https://developer.hashicorp.com/terraform/language/settings/backends/s3#configuration). Note: AWS specific checks must be skipped as they do not work on STACKIT. For details on what those validations do, see [here](https://developer.hashicorp.com/terraform/language/settings/backends/s3#configuration).
## Opting into Beta Resources
To use beta resources in the STACKIT Terraform provider, follow these steps:
1. **Provider Configuration Option**
Set the `enable_beta_resources` option in the provider configuration. This is a boolean attribute that can be either `true` or `false`.
```hcl
provider "stackit" {
default_region = "eu01"
enable_beta_resources = true
}
```
2. **Environment Variable**
Set the `STACKIT_TF_ENABLE_BETA_RESOURCES` environment variable to `"true"` or `"false"`. Other values will be ignored and will produce a warning.
```sh
export STACKIT_TF_ENABLE_BETA_RESOURCES=true
```
> **Note**: The environment variable takes precedence over the provider configuration option. This means that if the `STACKIT_TF_ENABLE_BETA_RESOURCES` environment variable is set to a valid value (`"true"` or `"false"`), it will override the `enable_beta_resources` option specified in the provider configuration.
For more details, please refer to the [beta resources configuration guide](https://registry.terraform.io/providers/stackitcloud/stackit/latest/docs/guides/opting_into_beta_resources).
## Opting into Experiments
Experiments are features that are even less mature and stable than Beta Resources. While there is some assumed stability in beta resources, will have to expect breaking changes while using experimental resources. Experimental Resources do not come with any support or warranty.
To enable experiments set the experiments field in the provider definition:
```hcl
provider "stackit" {
default_region = "eu01"
experiments = ["iam", "routing-tables", "network"]
}
```
### Available Experiments
#### `iam`
Enables IAM management features in the Terraform provider. The underlying IAM API is expected to undergo a redesign in the future, which leads to it being considered experimental.
#### `routing-tables`
This feature enables experimental routing table capabilities in the Terraform Provider, available only to designated SNAs at this time.
#### `network`
The `stackit_network` provides the fields `region` and `routing_table_id` when the experiment flag `network` is set.
The underlying API is not stable yet and could change in the future.
If you don't need these fields, don't set the experiment flag `network`, to use the stable api.
## Acceptance Tests ## Acceptance Tests
> [!WARNING] > [!WARNING]

View file

@ -1,38 +0,0 @@
resource "stackit_kms_keyring" "mshalpha-keyring" {
project_id = var.project_id
display_name = "msh-alpha-tests"
description = "This is a test keyring for private endpoints"
}
resource "stackit_kms_key" "mshalpha-key01" {
project_id = var.project_id
keyring_id = stackit_kms_keyring.mshalpha-keyring.keyring_id
display_name = "mshalpha-key01"
protection = "software"
algorithm = "aes_256_gcm"
purpose = "symmetric_encrypt_decrypt"
access_scope = "SNA"
}
output "keyid" {
value = stackit_kms_key.mshalpha-key01.key_id
}
# (because stackit_kms_key.key001 is not in configuration)
resource "stackit_kms_key" "key001" {
access_scope = "SNA"
algorithm = "aes_256_gcm"
display_name = "msh-key-sna01"
keyring_id = stackit_kms_keyring.keyring001.keyring_id
project_id = var.project_id
protection = "software"
purpose = "symmetric_encrypt_decrypt"
}
# stackit_kms_keyring.keyring001 will be destroyed
# (because stackit_kms_keyring.keyring001 is not in configuration)
resource "stackit_kms_keyring" "keyring001" {
description = "This is a test keyring for private endpoints"
display_name = "msh-keyring-sna01"
project_id = var.project_id
}

View file

@ -1,96 +0,0 @@
data "stackitprivatepreview_postgresflexalpha_flavor" "pgsql_flavor" {
project_id = var.project_id
region = "eu01"
cpu = 2
ram = 4
node_type = "Single"
storage_class = "premium-perf2-stackit"
}
resource "stackitprivatepreview_postgresflexalpha_instance" "msh-alpha-sna-enc" {
project_id = var.project_id
name = "msh-alpha-sna-enc"
backup_schedule = "0 0 * * *"
retention_days = 45
flavor_id = data.stackitprivatepreview_postgresflexalpha_flavor.pgsql_flavor.flavor_id
replicas = 1
storage = {
performance_class = "premium-perf2-stackit"
size = 10
}
encryption = {
kek_key_id = stackit_kms_key.mshalpha-key01.key_id
kek_key_ring_id = stackit_kms_keyring.mshalpha-keyring.keyring_id
kek_key_version = 1
service_account = var.sa_email
}
network = {
acl = ["0.0.0.0/0", "193.148.160.0/19", "170.85.2.177/32"]
access_scope = "SNA"
}
version = 17
}
resource "stackitprivatepreview_postgresflexalpha_instance" "msh-alpha-nosna-noenc" {
project_id = var.project_id
name = "msh-alpha-nosna-enc"
backup_schedule = "0 0 * * *"
retention_days = 45
flavor_id = data.stackitprivatepreview_postgresflexalpha_flavor.pgsql_flavor.flavor_id
replicas = 1
storage = {
performance_class = "premium-perf2-stackit"
size = 10
}
network = {
acl = ["0.0.0.0/0", "193.148.160.0/19", "170.85.2.177/32"]
access_scope = "PUBLIC"
}
version = 16
}
resource "stackitprivatepreview_postgresflexalpha_user" "ptlsdbadminuser" {
project_id = var.project_id
instance_id = stackitprivatepreview_postgresflexalpha_instance.msh-alpha-sna-enc.instance_id
name = var.db_admin_username
roles = ["createdb", "login"]
# roles = ["createdb", "login", "createrole"]
}
resource "stackitprivatepreview_postgresflexalpha_user" "ptlsdbuser" {
project_id = var.project_id
instance_id = stackitprivatepreview_postgresflexalpha_instance.msh-alpha-sna-enc.instance_id
name = var.db_username
roles = ["login"]
# roles = ["createdb", "login", "createrole"]
}
resource "stackitprivatepreview_postgresflexalpha_database" "example" {
count = 5
depends_on = [stackitprivatepreview_postgresflexalpha_user.ptlsdbadminuser]
project_id = var.project_id
instance_id = stackitprivatepreview_postgresflexalpha_instance.msh-alpha-sna-enc.instance_id
name = "${var.db_name}${count.index}"
owner = var.db_admin_username
}
# data "stackitprivatepreview_postgresflexalpha_instance" "datapsql" {
# project_id = var.project_id
# instance_id = var.instance_id
# region = "eu01"
# }
# output "psql_instance_id" {
# value = data.stackitprivatepreview_postgresflexalpha_instance.datapsql.instance_id
# }
output "psql_user_password" {
value = stackitprivatepreview_postgresflexalpha_user.ptlsdbuser.password
sensitive = true
}
# output "psql_user_conn" {
# value = stackitprivatepreview_postgresflexalpha_user.ptlsdbuser.connection_string
# sensitive = true
# }

View file

@ -1,24 +0,0 @@
terraform {
required_providers {
stackit = {
source = "registry.terraform.io/stackitcloud/stackit"
version = "~> 0.70"
}
stackitprivatepreview = {
source = "tfregistry.sysops.stackit.rocks/mhenselin/stackitprivatepreview"
version = ">=0.1.0"
}
}
}
provider "stackit" {
default_region = "eu01"
enable_beta_resources = true
service_account_key_path = "../service_account.json"
}
provider "stackitprivatepreview" {
default_region = "eu01"
service_account_key_path = "../service_account.json"
}

View file

@ -1,101 +0,0 @@
# resource "stackit_kms_keyring" "keyring001" {
# project_id = var.project_id
# display_name = "msh-keyring-sna01"
# description = "This is a test keyring for private endpoints"
# }
#
# resource "stackit_kms_key" "key001" {
# project_id = var.project_id
# keyring_id = stackit_kms_keyring.keyring001.keyring_id
# display_name = "msh-key-sna01"
# protection = "software"
# algorithm = "aes_256_gcm"
# purpose = "symmetric_encrypt_decrypt"
# access_scope = "SNA"
# }
data "stackitprivatepreview_sqlserverflexbeta_flavor" "sqlserver_flavor" {
project_id = var.project_id
region = "eu01"
cpu = 4
ram = 16
node_type = "Single"
storage_class = "premium-perf2-stackit"
}
resource "stackitprivatepreview_sqlserverflexbeta_instance" "msh-sna-001" {
project_id = var.project_id
name = "msh-sna-001"
backup_schedule = "0 3 * * *"
retention_days = 31
flavor_id = data.stackitprivatepreview_sqlserverflexbeta_flavor.sqlserver_flavor.flavor_id
storage = {
class = "premium-perf2-stackit"
size = 50
}
version = 2022
encryption = {
kek_key_version = 1
kek_key_id = var.key_id
kek_key_ring_id = var.keyring_id
service_account = var.sa_email
}
network = {
acl = ["0.0.0.0/0", "193.148.160.0/19"]
access_scope = "SNA"
}
}
#resource "stackitprivatepreview_sqlserverflexbeta_instance" "msh-nosna-001" {
# project_id = var.project_id
# name = "msh-nosna-001"
# backup_schedule = "0 3 * * *"
# retention_days = 31
# flavor_id = data.stackitprivatepreview_sqlserverflexbeta_flavor.sqlserver_flavor.flavor_id
# storage = {
# class = "premium-perf2-stackit"
# size = 50
# }
# version = 2022
# # encryption = {
# # #key_id = stackit_kms_key.key.key_id
# # #keyring_id = stackit_kms_keyring.keyring.keyring_id
# # #key_version = 1
# # #key_id = var.key_id
# # # key with scope public
# # key_id = "fe039bcf-8d7b-431a-801d-9e81371a6b7b"
# # keyring_id = var.keyring_id
# # key_version = var.key_version
# # service_account = var.sa_email
# # }
# network = {
# acl = ["0.0.0.0/0", "193.148.160.0/19"]
# access_scope = "PUBLIC"
# }
#}
# data "stackitprivatepreview_sqlserverflexbeta_instance" "test" {
# project_id = var.project_id
# instance_id = var.instance_id
# region = "eu01"
# }
# output "test" {
# value = data.stackitprivatepreview_sqlserverflexbeta_instance.test
# }
resource "stackitprivatepreview_sqlserverflexbeta_user" "ptlsdbadminuser" {
project_id = var.project_id
instance_id = stackitprivatepreview_sqlserverflexbeta_instance.msh-sna-001.instance_id
username = var.db_admin_username
#roles = ["##STACKIT_LoginManager##", "##STACKIT_DatabaseManager##"]
roles = ["##STACKIT_LoginManager##"]
}
resource "stackitprivatepreview_sqlserverflexbeta_user" "ptlsdbuser" {
project_id = var.project_id
instance_id = stackitprivatepreview_sqlserverflexbeta_instance.msh-sna-001.instance_id
username = var.db_username
roles = ["##STACKIT_LoginManager##"]
}

View file

@ -1,57 +0,0 @@
resource "stackit_kms_keyring" "keyring001" {
project_id = var.project_id
display_name = "msh-keyring-sna01"
description = "This is a test keyring for private endpoints"
}
resource "stackit_kms_key" "key001" {
project_id = var.project_id
keyring_id = stackit_kms_keyring.keyring001.keyring_id
display_name = "msh-key-sna01"
protection = "software"
algorithm = "aes_256_gcm"
purpose = "symmetric_encrypt_decrypt"
access_scope = "SNA"
}
# data "stackitprivatepreview_sqlserverflexalpha_instance" "test" {
# project_id = var.project_id
# instance_id = var.instance_id
# region = "eu01"
# }
output "key_ring_id" {
value = stackit_kms_keyring.keyring001.id
}
resource "stackit_kms_keyring" "keyring001yy" {
project_id = var.project_id
display_name = "msh-kr-sna01"
description = "This is a test keyring for private endpoints"
}
resource "stackit_kms_key" "key001yy" {
project_id = var.project_id
keyring_id = stackit_kms_keyring.keyring001yy.keyring_id
display_name = "msh-k-001"
protection = "software"
algorithm = "aes_256_gcm"
purpose = "symmetric_encrypt_decrypt"
access_scope = "SNA"
}
# data "stackitprivatepreview_sqlserverflexalpha_instance" "test" {
# project_id = var.project_id
# instance_id = var.instance_id
# region = "eu01"
# }
output "key_ring_idxx" {
value = stackit_kms_keyring.keyring001yy.id
}
output "key_id" {
value = stackit_kms_key.key001yy.id
}

View file

@ -1,25 +0,0 @@
terraform {
required_providers {
stackit = {
source = "registry.terraform.io/stackitcloud/stackit"
version = "~> 0.70"
}
# stackitprivatepreview = {
# source = "tfregistry.sysops.stackit.rocks/mhenselin/stackitprivatepreview"
# version = "= 0.0.2-alpha"
# }
}
}
provider "stackit" {
default_region = "eu01"
enable_beta_resources = true
service_account_key_path = "../service_account.json"
}
# provider "stackitprivatepreview" {
# default_region = "eu01"
# enable_beta_resources = true
# service_account_key_path = "../service_account.json"
# }

View file

@ -1,4 +0,0 @@
#
# output "postgres_flavor" {
# value = data.stackitprivatepreview_postgresflexalpha_flavor.pgsql_flavor.flavor_id
# }

View file

@ -1,45 +0,0 @@
data "stackitprivatepreview_postgresflexalpha_flavor" "pgsql_flavor" {
project_id = var.project_id
region = "eu01"
cpu = 2
ram = 4
node_type = "Single"
storage_class = "premium-perf2-stackit"
}
resource "stackitprivatepreview_postgresflexalpha_instance" "import_for_deletion" {
project_id = var.project_id
name = "mshpetest2"
backup_schedule = "0 0 * * *"
retention_days = 45
flavor_id = data.stackitprivatepreview_postgresflexalpha_flavor.pgsql_flavor.flavor_id
replicas = 1
storage = {
# class = "premium-perf2-stackit"
performance_class = "premium-perf2-stackit"
size = 10
}
encryption = {
# key_id = stackit_kms_key.key.key_id
# keyring_id = stackit_kms_keyring.keyring.keyring_id
kek_key_id = var.key_id
kek_key_ring_id = var.keyring_id
kek_key_version = var.key_version
service_account = var.sa_email
}
network = {
acl = ["0.0.0.0/0", "193.148.160.0/19", "170.85.2.177/32"]
access_scope = "PUBLIC"
}
version = 14
}
import {
to = stackitprivatepreview_postgresflexalpha_instance.import_for_deletion
identity = {
project_id = var.project_id
region = "eu01"
instance_id = "d52b5d4c-be3f-4c14-a107-330dab99fd2e"
}
}

View file

@ -1,25 +0,0 @@
terraform {
required_providers {
# stackit = {
# source = "registry.terraform.io/stackitcloud/stackit"
# version = "~> 0.70"
# }
stackitprivatepreview = {
source = "tfregistry.sysops.stackit.rocks/mhenselin/stackitprivatepreview"
version = "> 0.0"
}
}
}
# provider "stackit" {
# default_region = "eu01"
# enable_beta_resources = true
# service_account_key_path = "./service_account.json"
# }
provider "stackitprivatepreview" {
default_region = "eu01"
enable_beta_resources = true
service_account_key_path = "../service_account.json"
}

View file

@ -1,11 +0,0 @@
variable "project_id" {
default = "<PROJECT ID UUID>"
}
variable "sa_email" {
default = "<SERVICE ACCOUNT EMAIL>"
}
variable "db_username" {
default = "<DB USERNAME>"
}

View file

@ -1,17 +0,0 @@
data "stackitprivatepreview_postgresflexalpha_flavor" "pgsql_flavor" {
project_id = var.project_id
region = "eu01"
cpu = 2
ram = 4
node_type = "Single"
storage_class = "premium-perf2-stackit"
}
data "stackitprivatepreview_postgresflexalpha_flavor" "pgsql_flavor2"{
project_id = var.project_id
region = "eu01"
cpu = 2
ram = 4
node_type = "Single"
storage_class = "premium-perf2-stackit"
}

View file

@ -1,25 +0,0 @@
terraform {
required_providers {
# stackit = {
# source = "registry.terraform.io/stackitcloud/stackit"
# version = "~> 0.70"
# }
stackitprivatepreview = {
source = "tfregistry.sysops.stackit.rocks/mhenselin/stackitprivatepreview"
version = "> 0.0"
}
}
}
# provider "stackit" {
# default_region = "eu01"
# enable_beta_resources = true
# service_account_key_path = "./service_account.json"
# }
provider "stackitprivatepreview" {
default_region = "eu01"
enable_beta_resources = true
service_account_key_path = "/home/henselinm/Development/PTLS/terraform-provider-stackit-MSH/sample/pg_instance/service_account.json"
}

View file

@ -1,11 +0,0 @@
variable "project_id" {
default = "<PROJECT ID UUID>"
}
variable "sa_email" {
default = "<SERVICE ACCOUNT EMAIL>"
}
variable "db_username" {
default = "<DB USERNAME>"
}

View file

@ -1,13 +0,0 @@
#
# data "stackitprivatepreview_sqlserverflexalpha_flavor" "sqlserver_flavor" {
# project_id = var.project_id
# region = "eu01"
# cpu = 4
# ram = 16
# node_type = "Single"
# storage_class = "premium-perf2-stackit"
# }
#
# output "sqlserver_flavor" {
# value = data.stackitprivatepreview_sqlserverflexalpha_flavor.sqlserver_flavor.flavor_id
# }

View file

@ -1,9 +0,0 @@
data "stackitprivatepreview_postgresflexalpha_flavor" "pgsql_flavor" {
project_id = var.project_id
region = "eu01"
cpu = 2
ram = 4
node_type = "Single"
storage_class = "premium-perf2-stackit"
}

View file

@ -1,25 +0,0 @@
terraform {
required_providers {
# stackit = {
# source = "registry.terraform.io/stackitcloud/stackit"
# version = "~> 0.70"
# }
stackitprivatepreview = {
source = "tfregistry.sysops.stackit.rocks/mhenselin/stackitprivatepreview"
version = "> 0.0"
}
}
}
# provider "stackit" {
# default_region = "eu01"
# enable_beta_resources = true
# service_account_key_path = "../service_account.json"
# }
provider "stackitprivatepreview" {
default_region = "eu01"
enable_beta_resources = true
service_account_key_path = "../service_account.json"
}

View file

@ -1,116 +0,0 @@
data "stackitprivatepreview_sqlserverflexbeta_flavor" "sqlserver_flavor" {
project_id = var.project_id
region = "eu01"
cpu = 4
ram = 16
node_type = "Single"
storage_class = "premium-perf2-stackit"
}
data "stackitprivatepreview_sqlserverflexbeta_flavor" "sqlserver_flavor_2" {
project_id = var.project_id
region = "eu01"
cpu = 4
ram = 32
node_type = "Replica"
storage_class = "premium-perf2-stackit"
}
resource "stackitprivatepreview_sqlserverflexbeta_instance" "msh-beta-nosna-001" {
project_id = var.project_id
name = "msh-beta-nosna-001-renamed"
backup_schedule = "0 3 * * *"
retention_days = 31
flavor_id = data.stackitprivatepreview_sqlserverflexbeta_flavor.sqlserver_flavor.flavor_id
storage = {
class = "premium-perf2-stackit"
size = 50
}
version = 2022
network = {
acl = ["0.0.0.0/0", "193.148.160.0/19"]
access_scope = "PUBLIC"
}
}
resource "stackitprivatepreview_sqlserverflexbeta_instance" "msh-beta-sna-001" {
project_id = var.project_id
name = "msh-beta-sna-001"
backup_schedule = "0 3 * * *"
retention_days = 31
flavor_id = data.stackitprivatepreview_sqlserverflexbeta_flavor.sqlserver_flavor.flavor_id
storage = {
class = "premium-perf2-stackit"
size = 5
}
version = 2022
encryption = {
#key_id = stackit_kms_key.key.key_id
#keyring_id = stackit_kms_keyring.keyring.keyring_id
#key_version = 1
# key with scope public
kek_key_id = "fe039bcf-8d7b-431a-801d-9e81371a6b7b"
# key_id = var.key_id
kek_key_ring_id = var.keyring_id
kek_key_version = var.key_version
service_account = var.sa_email
}
network = {
acl = ["0.0.0.0/0", "193.148.160.0/19"]
access_scope = "SNA"
}
}
resource "stackitprivatepreview_sqlserverflexbeta_user" "exampleuseruno" {
project_id = var.project_id
instance_id = stackitprivatepreview_sqlserverflexbeta_instance.msh-beta-nosna-001.instance_id
username = "exampleuserdue"
roles = ["##STACKIT_ProcessManager##", "##STACKIT_LoginManager##", "##STACKIT_ServerManager##"]
}
resource "stackitprivatepreview_sqlserverflexbeta_user" "exampleuser" {
project_id = var.project_id
instance_id = stackitprivatepreview_sqlserverflexbeta_instance.msh-beta-nosna-001.instance_id
username = "exampleuser"
roles = ["##STACKIT_LoginManager##"]
}
resource "stackitprivatepreview_sqlserverflexbeta_database" "mshtest002" {
project_id = var.project_id
instance_id = stackitprivatepreview_sqlserverflexbeta_instance.msh-beta-nosna-001.instance_id
name = "mshtest002"
# owner = "dbuser"
owner = stackitprivatepreview_sqlserverflexbeta_user.exampleuseruno.username
}
# data "stackitprivatepreview_sqlserverflexbeta_database" "example" {
# project_id = var.project_id
# region = "eu01"
# instance_id = "b3b63d0c-35bf-4804-84ea-5abec2a8ae58"
# database_name = "mshtest001"
# }
# output "dbdetails" {
# value = data.stackitprivatepreview_sqlserverflexbeta_database.example
# }
#
# resource "stackitprivatepreview_sqlserverflexbeta_database" "mshtest" {
# project_id = var.project_id
# instance_id = "b3b63d0c-35bf-4804-84ea-5abec2a8ae58"
# name = "mshtest"
# owner = "dbuser"
# }
#
# import {
# to = stackitprivatepreview_sqlserverflexbeta_database.mshtest
# identity = {
# project_id = var.project_id
# region = "eu01"
# instance_id = "b3b63d0c-35bf-4804-84ea-5abec2a8ae58"
# database_name = "mshtest"
# }
# }

View file

@ -1,11 +0,0 @@
variable "project_id" {
default = "<PROJECT ID UUID>"
}
variable "sa_email" {
default = "<SERVICE ACCOUNT EMAIL>"
}
variable "db_username" {
default = "<DB USERNAME>"
}

View file

@ -209,8 +209,8 @@ func (r *databaseResource) Create(
) )
database, err := postgresflexalphaWait.GetDatabaseByIdWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region, databaseID). database, err := postgresflexalphaWait.GetDatabaseByIdWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region, databaseID).
SetTimeout(30 * time.Minute). SetTimeout(15 * time.Minute).
SetSleepBeforeWait(10 * time.Second). SetSleepBeforeWait(15 * time.Second).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
@ -279,8 +279,8 @@ func (r *databaseResource) Read(
) )
databaseResp, err := postgresflexalphaWait.GetDatabaseByIdWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region, databaseID). databaseResp, err := postgresflexalphaWait.GetDatabaseByIdWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region, databaseID).
SetTimeout(30 * time.Minute). SetTimeout(15 * time.Minute).
SetSleepBeforeWait(10 * time.Second). SetSleepBeforeWait(15 * time.Second).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
@ -386,8 +386,8 @@ func (r *databaseResource) Update(
ctx = core.LogResponse(ctx) ctx = core.LogResponse(ctx)
databaseResp, err := postgresflexalphaWait.GetDatabaseByIdWaitHandler(ctx, r.client.DefaultAPI, projectId, instanceId, region, databaseId). databaseResp, err := postgresflexalphaWait.GetDatabaseByIdWaitHandler(ctx, r.client.DefaultAPI, projectId, instanceId, region, databaseId).
SetTimeout(30 * time.Minute). SetTimeout(15 * time.Minute).
SetSleepBeforeWait(10 * time.Second). SetSleepBeforeWait(15 * time.Second).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "error updating database", err.Error()) core.LogAndAddError(ctx, &resp.Diagnostics, "error updating database", err.Error())

View file

@ -208,7 +208,7 @@ func (r *instanceResource) Create(
) )
waitResp, err := wait.CreateInstanceWaitHandler(ctx, r.client.DefaultAPI, projectID, region, *instanceID). waitResp, err := wait.CreateInstanceWaitHandler(ctx, r.client.DefaultAPI, projectID, region, *instanceID).
SetTimeout(90 * time.Minute). SetTimeout(30 * time.Minute).
SetSleepBeforeWait(10 * time.Second). SetSleepBeforeWait(10 * time.Second).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
@ -446,7 +446,7 @@ func (r *instanceResource) Update(
region, region,
instanceID, instanceID,
). ).
SetTimeout(90 * time.Minute). SetTimeout(30 * time.Minute).
SetSleepBeforeWait(10 * time.Second). SetSleepBeforeWait(10 * time.Second).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {

View file

@ -799,7 +799,7 @@ func testAccCheckPostgresFlexDestroy(s *terraform.State) error {
testutils.ProjectId, testutils.ProjectId,
testutils.Region, testutils.Region,
items[i].Id, items[i].Id,
30*time.Minute, 15*time.Minute,
10*time.Second, 10*time.Second,
) )
if err != nil { if err != nil {

View file

@ -241,7 +241,7 @@ func (r *userResource) Create(
).SetSleepBeforeWait( ).SetSleepBeforeWait(
10 * time.Second, 10 * time.Second,
).SetTimeout( ).SetTimeout(
30 * time.Minute, 15 * time.Minute,
).WaitWithContext(ctx) ).WaitWithContext(ctx)
if err != nil { if err != nil {
@ -322,7 +322,7 @@ func (r *userResource) Read(
).SetSleepBeforeWait( ).SetSleepBeforeWait(
10 * time.Second, 10 * time.Second,
).SetTimeout( ).SetTimeout(
30 * time.Minute, 15 * time.Minute,
).WaitWithContext(ctx) ).WaitWithContext(ctx)
if err != nil { if err != nil {
@ -445,7 +445,7 @@ func (r *userResource) Update(
).SetSleepBeforeWait( ).SetSleepBeforeWait(
10 * time.Second, 10 * time.Second,
).SetTimeout( ).SetTimeout(
30 * time.Minute, 15 * time.Minute,
).WaitWithContext(ctx) ).WaitWithContext(ctx)
if err != nil { if err != nil {

View file

@ -193,7 +193,6 @@ func (r *databaseResource) Create(ctx context.Context, req resource.CreateReques
data.Owner.ValueString(), data.Owner.ValueString(),
). ).
SetSleepBeforeWait(10 * time.Second). SetSleepBeforeWait(10 * time.Second).
SetTimeout(90 * time.Minute).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
@ -254,9 +253,9 @@ func (r *databaseResource) Create(ctx context.Context, req resource.CreateReques
region, region,
databaseName, databaseName,
).SetSleepBeforeWait( ).SetSleepBeforeWait(
10 * time.Second, 30 * time.Second,
).SetTimeout( ).SetTimeout(
90 * time.Minute, 15 * time.Minute,
).WaitWithContext(ctx) ).WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(

View file

@ -354,8 +354,8 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
waitResp, err := wait. waitResp, err := wait.
UpdateInstanceWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region). UpdateInstanceWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region).
SetSleepBeforeWait(10 * time.Second). SetSleepBeforeWait(15 * time.Second).
SetTimeout(90 * time.Minute). SetTimeout(45 * time.Minute).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
@ -416,10 +416,7 @@ func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteReques
ctx = core.LogResponse(ctx) ctx = core.LogResponse(ctx)
delResp, err := wait.DeleteInstanceWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region). delResp, err := wait.DeleteInstanceWaitHandler(ctx, r.client.DefaultAPI, projectID, instanceID, region).WaitWithContext(ctx)
SetSleepBeforeWait(10 * time.Second).
SetTimeout(90 * time.Minute).
WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,

View file

@ -158,8 +158,7 @@ func TestAccInstance(t *testing.T) {
PreConfig: func() { PreConfig: func() {
t.Logf("testing: %s - %s", t.Name(), "create and verify") t.Logf("testing: %s - %s", t.Name(), "create and verify")
}, },
// empty refresh plan ExpectNonEmptyPlan: true,
ExpectNonEmptyPlan: false,
Config: testutils.StringFromTemplateMust( Config: testutils.StringFromTemplateMust(
"testdata/instance_template.gompl", "testdata/instance_template.gompl",
exData, exData,

View file

@ -308,7 +308,7 @@ func (r *userResource) Create(
region, region,
userId, userId,
).SetSleepBeforeWait( ).SetSleepBeforeWait(
10 * time.Second, 90 * time.Second,
).SetTimeout( ).SetTimeout(
90 * time.Minute, 90 * time.Minute,
).WaitWithContext(ctx) ).WaitWithContext(ctx)
@ -459,23 +459,23 @@ func (r *userResource) Delete(
ctx = core.InitProviderContext(ctx) ctx = core.InitProviderContext(ctx)
projectID := model.ProjectId.ValueString() projectId := model.ProjectId.ValueString()
instanceID := model.InstanceId.ValueString() instanceId := model.InstanceId.ValueString()
userID := model.UserId.ValueInt64() userId := model.UserId.ValueInt64()
region := model.Region.ValueString() region := model.Region.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectID) ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "instance_id", instanceID) ctx = tflog.SetField(ctx, "instance_id", instanceId)
ctx = tflog.SetField(ctx, "user_id", userID) ctx = tflog.SetField(ctx, "user_id", userId)
ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "region", region)
// Delete existing record set // Delete existing record set
// err := r.client.DeleteUserRequest(ctx, projectId, region, instanceId, userId).Execute() // err := r.client.DeleteUserRequest(ctx, projectId, region, instanceId, userId).Execute()
err := r.client.DefaultAPI.DeleteUserRequest(ctx, projectID, region, instanceID, userID).Execute() err := r.client.DefaultAPI.DeleteUserRequest(ctx, projectId, region, instanceId, userId).Execute()
if err != nil { if err != nil {
var oapiErr *oapierror.GenericOpenAPIError var oapiErr *oapierror.GenericOpenAPIError
ok := errors.As(err, &oapiErr) ok := errors.As(err, &oapiErr)
if !ok { if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "User Delete Error", fmt.Sprintf("error is no oapi error: %v", err)) // TODO err handling
return return
} }
@ -487,14 +487,12 @@ func (r *userResource) Delete(
// tflog.Warn(ctx, "[delete user] Wait handler got error 500") // tflog.Warn(ctx, "[delete user] Wait handler got error 500")
// return false, nil, nil // return false, nil, nil
default: default:
core.LogAndAddError(ctx, &resp.Diagnostics, "User Delete Error", fmt.Sprintf("Unexpected API error: %v", err)) // TODO err handling
return return
} }
} }
// Delete existing record set // Delete existing record set
_, err = sqlserverflexbetaWait.DeleteUserWaitHandler(ctx, r.client.DefaultAPI, projectID, region, instanceID, userID). _, err = sqlserverflexbetaWait.DeleteUserWaitHandler(ctx, r.client.DefaultAPI, projectId, region, instanceId, userId).
SetTimeout(90 * time.Minute).
SetSleepBeforeWait(10 * time.Second).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "User Delete Error", fmt.Sprintf("Calling API: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "User Delete Error", fmt.Sprintf("Calling API: %v", err))

View file

@ -281,8 +281,8 @@ func GetDatabaseByIdWaitHandler(
if databaseID > math.MaxInt32 { if databaseID > math.MaxInt32 {
return false, nil, fmt.Errorf("databaseID too large for int32") return false, nil, fmt.Errorf("databaseID too large for int32")
} }
dbID32 := int32(databaseID) //nolint:gosec // is checked above dbId32 := int32(databaseID) //nolint:gosec // is checked above
s, err := a.GetDatabaseRequest(ctx, projectID, region, instanceID, dbID32).Execute() s, err := a.GetDatabaseRequest(ctx, projectID, region, instanceID, dbId32).Execute()
if err != nil { if err != nil {
var oapiErr *oapierror.GenericOpenAPIError var oapiErr *oapierror.GenericOpenAPIError
ok := errors.As(err, &oapiErr) ok := errors.As(err, &oapiErr)
@ -290,7 +290,6 @@ func GetDatabaseByIdWaitHandler(
return false, nil, fmt.Errorf("could not convert error to oapierror.GenericOpenAPIError") return false, nil, fmt.Errorf("could not convert error to oapierror.GenericOpenAPIError")
} }
switch oapiErr.StatusCode { switch oapiErr.StatusCode {
// TODO: work-around
case http.StatusBadGateway, http.StatusGatewayTimeout, http.StatusServiceUnavailable: case http.StatusBadGateway, http.StatusGatewayTimeout, http.StatusServiceUnavailable:
tflog.Warn( tflog.Warn(
ctx, "api responded with 50[2,3,4] status", map[string]interface{}{ ctx, "api responded with 50[2,3,4] status", map[string]interface{}{

View file

@ -89,16 +89,9 @@ func CreateInstanceWaitHandler(
return false, nil, fmt.Errorf("could not convert error to oapierror.GenericOpenAPIError: %w", err) return false, nil, fmt.Errorf("could not convert error to oapierror.GenericOpenAPIError: %w", err)
} }
switch oapiErr.StatusCode { switch oapiErr.StatusCode {
case http.StatusOK:
return false, nil, nil
case http.StatusNotFound: case http.StatusNotFound:
return false, nil, nil return false, nil, nil
default: default:
// TODO: work-around
if strings.Contains(err.Error(), "is not a valid InstanceEdition") {
tflog.Info(ctx, "API WORKAROUND", map[string]interface{}{"err": err})
return false, nil, nil
}
return false, nil, fmt.Errorf("api error: %w", err) return false, nil, fmt.Errorf("api error: %w", err)
} }
} }
@ -264,6 +257,7 @@ func DeleteInstanceWaitHandler(
return true, nil, nil return true, nil, nil
}, },
) )
handler.SetTimeout(30 * time.Minute)
return handler return handler
} }
@ -405,5 +399,7 @@ func DeleteUserWaitHandler(
} }
}, },
) )
handler.SetTimeout(15 * time.Minute)
handler.SetSleepBeforeWait(15 * time.Second)
return handler return handler
} }