diff --git a/.gitignore b/.gitignore
index e376491c..8ac36960 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,9 +15,9 @@ bin/
**/terraform.tfstate**
.terraform.lock.hcl
.terraform.tfstate.lock.info
-main.tf
-example.tf
-index.tf
+**/config.tfrc
+**/variables.tf
+**/service_account.json
# Test binary, built with `go test -c`
*.test
diff --git a/docs/data-sources/postgresflexalpha_flavor.md b/docs/data-sources/postgresflexalpha_flavor.md
new file mode 100644
index 00000000..4d28ffc3
--- /dev/null
+++ b/docs/data-sources/postgresflexalpha_flavor.md
@@ -0,0 +1,43 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackitprivatepreview_postgresflexalpha_flavor Data Source - stackitprivatepreview"
+subcategory: ""
+description: |-
+
+---
+
+# stackitprivatepreview_postgresflexalpha_flavor (Data Source)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `cpu` (Number) The cpu count of the instance.
+- `node_type` (String) defines the nodeType it can be either single or replica
+- `project_id` (String) The cpu count of the instance.
+- `ram` (Number) The memory of the instance in Gibibyte.
+- `region` (String) The flavor description.
+- `storage_class` (String) The memory of the instance in Gibibyte.
+
+### Read-Only
+
+- `description` (String) The flavor description.
+- `flavor_id` (String) The flavor id of the instance flavor.
+- `id` (String) The terraform id of the instance flavor.
+- `max_gb` (Number) maximum storage which can be ordered for the flavor in Gigabyte.
+- `min_gb` (Number) minimum storage which is required to order in Gigabyte.
+- `storage_classes` (Attributes List) (see [below for nested schema](#nestedatt--storage_classes))
+
+
+### Nested Schema for `storage_classes`
+
+Read-Only:
+
+- `class` (String)
+- `max_io_per_sec` (Number)
+- `max_through_in_mb` (Number)
diff --git a/docs/data-sources/postgresflexalpha_instance.md b/docs/data-sources/postgresflexalpha_instance.md
index cb387c9d..c5c4785d 100644
--- a/docs/data-sources/postgresflexalpha_instance.md
+++ b/docs/data-sources/postgresflexalpha_instance.md
@@ -35,11 +35,12 @@ data "stackitprivatepreview_postgresflexalpha_instance" "example" {
- `backup_schedule` (String)
- `encryption` (Attributes) (see [below for nested schema](#nestedatt--encryption))
-- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor))
+- `flavor_id` (String)
- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`region`,`instance_id`".
- `name` (String) Instance name.
- `network` (Attributes) (see [below for nested schema](#nestedatt--network))
- `replicas` (Number)
+- `retention_days` (Number)
- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage))
- `version` (String)
@@ -54,18 +55,6 @@ Read-Only:
- `service_account` (String)
-
-### Nested Schema for `flavor`
-
-Read-Only:
-
-- `cpu` (Number)
-- `description` (String)
-- `id` (String)
-- `node_type` (String)
-- `ram` (Number)
-
-
### Nested Schema for `network`
diff --git a/docs/data-sources/sqlserverflexalpha_flavor.md b/docs/data-sources/sqlserverflexalpha_flavor.md
new file mode 100644
index 00000000..426a0605
--- /dev/null
+++ b/docs/data-sources/sqlserverflexalpha_flavor.md
@@ -0,0 +1,43 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackitprivatepreview_sqlserverflexalpha_flavor Data Source - stackitprivatepreview"
+subcategory: ""
+description: |-
+
+---
+
+# stackitprivatepreview_sqlserverflexalpha_flavor (Data Source)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `cpu` (Number) The cpu count of the instance.
+- `node_type` (String) defines the nodeType it can be either single or replica
+- `project_id` (String) The cpu count of the instance.
+- `ram` (Number) The memory of the instance in Gibibyte.
+- `region` (String) The flavor description.
+- `storage_class` (String) The memory of the instance in Gibibyte.
+
+### Read-Only
+
+- `description` (String) The flavor description.
+- `flavor_id` (String) The flavor id of the instance flavor.
+- `id` (String) The terraform id of the instance flavor.
+- `max_gb` (Number) maximum storage which can be ordered for the flavor in Gigabyte.
+- `min_gb` (Number) minimum storage which is required to order in Gigabyte.
+- `storage_classes` (Attributes List) (see [below for nested schema](#nestedatt--storage_classes))
+
+
+### Nested Schema for `storage_classes`
+
+Read-Only:
+
+- `class` (String)
+- `max_io_per_sec` (Number)
+- `max_through_in_mb` (Number)
diff --git a/docs/resources/postgresflex_database.md b/docs/resources/postgresflexalpha_database.md
similarity index 53%
rename from docs/resources/postgresflex_database.md
rename to docs/resources/postgresflexalpha_database.md
index d36f1712..8fdceeb5 100644
--- a/docs/resources/postgresflex_database.md
+++ b/docs/resources/postgresflexalpha_database.md
@@ -1,16 +1,31 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
-page_title: "stackitprivatepreview_postgresflex_database Resource - stackitprivatepreview"
+page_title: "stackitprivatepreview_postgresflexalpha_database Resource - stackitprivatepreview"
subcategory: ""
description: |-
Postgres Flex database resource schema. Must have a region specified in the provider configuration.
---
-# stackitprivatepreview_postgresflex_database (Resource)
+# stackitprivatepreview_postgresflexalpha_database (Resource)
Postgres Flex database resource schema. Must have a `region` specified in the provider configuration.
+## Example Usage
+```terraform
+resource "stackitprivatepreview_postgresflexalpha_database" "example" {
+ project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ instance_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ name = "mydb"
+ owner = "myusername"
+}
+
+# Only use the import statement, if you want to import an existing postgresflex database
+import {
+ to = stackitprivatepreview_postgresflexalpha_database.import-example
+ id = "${var.project_id},${var.region},${var.postgres_instance_id},${var.postgres_database_id}"
+}
+```
## Schema
@@ -28,5 +43,5 @@ Postgres Flex database resource schema. Must have a `region` specified in the pr
### Read-Only
-- `database_id` (String) Database ID.
+- `database_id` (Number) Database ID.
- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`instance_id`,`database_id`".
diff --git a/docs/resources/postgresflexalpha_instance.md b/docs/resources/postgresflexalpha_instance.md
index 969f288e..ec20a30e 100644
--- a/docs/resources/postgresflexalpha_instance.md
+++ b/docs/resources/postgresflexalpha_instance.md
@@ -44,7 +44,7 @@ import {
- `backup_schedule` (String)
- `encryption` (Attributes) The encryption block. (see [below for nested schema](#nestedatt--encryption))
-- `flavor` (Attributes) The block that defines the flavor data. (see [below for nested schema](#nestedatt--flavor))
+- `flavor_id` (String)
- `name` (String) Instance name.
- `network` (Attributes) The network block configuration. (see [below for nested schema](#nestedatt--network))
- `project_id` (String) STACKIT project ID to which the instance is associated.
@@ -73,24 +73,6 @@ Required:
- `service_account` (String) The service account ID of the service account.
-
-### Nested Schema for `flavor`
-
-Required:
-
-- `cpu` (Number) The CPU count of the flavor.
-- `ram` (Number) The RAM count of the flavor.
-
-Optional:
-
-- `id` (String) The ID of the flavor.
-- `node_type` (String) The node type of the flavor. (Single or Replicas)
-
-Read-Only:
-
-- `description` (String) The flavor detailed flavor name.
-
-
### Nested Schema for `network`
diff --git a/docs/resources/postgresflexalpha_user.md b/docs/resources/postgresflexalpha_user.md
index d0e33902..771671a8 100644
--- a/docs/resources/postgresflexalpha_user.md
+++ b/docs/resources/postgresflexalpha_user.md
@@ -44,10 +44,7 @@ import {
### Read-Only
- `connection_string` (String)
-- `host` (String)
- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`instance_id`,`user_id`".
- `password` (String, Sensitive)
-- `port` (Number)
- `status` (String)
-- `uri` (String, Sensitive)
- `user_id` (Number) User ID.
diff --git a/docs/resources/sqlserverflexalpha_instance.md b/docs/resources/sqlserverflexalpha_instance.md
index a6ec97b0..28dcbeca 100644
--- a/docs/resources/sqlserverflexalpha_instance.md
+++ b/docs/resources/sqlserverflexalpha_instance.md
@@ -42,7 +42,7 @@ import {
### Required
- `encryption` (Attributes) The encryption block. (see [below for nested schema](#nestedatt--encryption))
-- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor))
+- `flavor_id` (String)
- `name` (String) Instance name.
- `network` (Attributes) The network block. (see [below for nested schema](#nestedatt--network))
- `project_id` (String) STACKIT project ID to which the instance is associated.
@@ -75,30 +75,12 @@ Required:
- `service_account` (String)
-
-### Nested Schema for `flavor`
-
-Required:
-
-- `cpu` (Number)
-- `node_type` (String)
-- `ram` (Number)
-
-Optional:
-
-- `id` (String)
-
-Read-Only:
-
-- `description` (String)
-
-
### Nested Schema for `network`
Required:
-- `access_scope` (String) The access scope of the instance. (e.g. SNA)
+- `access_scope` (String) The access scope of the instance. (SNA | PUBLIC)
- `acl` (List of String) The Access Control List (ACL) for the SQLServer Flex instance.
Read-Only:
diff --git a/go.mod b/go.mod
index 3102263d..5e6b8d1a 100644
--- a/go.mod
+++ b/go.mod
@@ -12,22 +12,20 @@ require (
github.com/hashicorp/terraform-plugin-testing v1.14.0
github.com/stackitcloud/stackit-sdk-go/core v0.20.1
github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.4.0
- github.com/stretchr/testify v1.10.0
github.com/teambition/rrule-go v1.8.2
)
require (
- github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/kr/text v0.2.0 // indirect
- golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect
+ golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect
)
require (
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
- github.com/cloudflare/circl v1.6.1 // indirect
- github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/cloudflare/circl v1.6.2 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect
@@ -57,23 +55,21 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/oklog/run v1.2.0 // indirect
- github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/zclconf/go-cty v1.17.0 // indirect
golang.org/x/crypto v0.46.0 // indirect
- golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.19.0 // indirect
- golang.org/x/sys v0.39.0 // indirect
- golang.org/x/text v0.32.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
golang.org/x/tools v0.40.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 // indirect
- google.golang.org/grpc v1.77.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
+ google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
)
tool golang.org/x/tools/cmd/goimports
diff --git a/go.sum b/go.sum
index 50268863..2ea27291 100644
--- a/go.sum
+++ b/go.sum
@@ -11,8 +11,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=
github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
-github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
-github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
+github.com/cloudflare/circl v1.6.2 h1:hL7VBpHHKzrV5WTfHCaBsgx/HGbBYlgrwvNXEVDYYsQ=
+github.com/cloudflare/circl v1.6.2/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
@@ -68,8 +68,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA=
github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8=
-github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
-github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
+github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=
+github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
@@ -188,8 +188,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
-golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@@ -212,10 +212,10 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
-golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA=
-golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo=
+golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
@@ -224,8 +224,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
-golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
-golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@@ -238,10 +238,10 @@ gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
-google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
-google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
diff --git a/pkg/postgresflexalpha/wait/wait.go b/pkg/postgresflexalpha/wait/wait.go
index cbac012b..8a541707 100644
--- a/pkg/postgresflexalpha/wait/wait.go
+++ b/pkg/postgresflexalpha/wait/wait.go
@@ -94,6 +94,7 @@ func CreateInstanceWaitHandler(
}
}
+ tflog.Info(ctx, "Waiting for instance (calling list users")
// // User operations aren't available right after an instance is deemed successful
// // To check if they are, perform a users request
_, err = a.ListUsersRequestExecute(ctx, projectId, region, instanceId)
@@ -114,7 +115,7 @@ func CreateInstanceWaitHandler(
},
)
// Sleep before wait is set because sometimes API returns 404 right after creation request
- handler.SetTimeout(45 * time.Minute).SetSleepBeforeWait(15 * time.Second)
+ handler.SetTimeout(90 * time.Minute).SetSleepBeforeWait(30 * time.Second)
return handler
}
@@ -154,72 +155,72 @@ func PartialUpdateInstanceWaitHandler(
return handler
}
-// DeleteInstanceWaitHandler will wait for instance deletion
-func DeleteInstanceWaitHandler(
- ctx context.Context,
- a APIClientInstanceInterface,
- projectId, region, instanceId string,
-) *wait.AsyncActionHandler[struct{}] {
- handler := wait.New(
- func() (waitFinished bool, response *struct{}, err error) {
- s, err := a.GetInstanceRequestExecute(ctx, projectId, region, instanceId)
- if err != nil {
- return false, nil, err
- }
- if s == nil || s.Id == nil || *s.Id != instanceId || s.Status == nil {
- return false, nil, nil
- }
- // TODO - maybe we want to validate status if no 404 error (only unknown or terminating should be valid)
- // switch *s.Status {
- // default:
- // return true, nil, fmt.Errorf("instance with id %s has unexpected status %s", instanceId, *s.Status)
- // case InstanceStateSuccess:
- // return false, nil, nil
- // case InstanceStateTerminating:
- // return false, nil, nil
- // }
-
- // TODO - add tflog for ignored cases
- oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
- if !ok {
- return false, nil, err
- }
- if oapiErr.StatusCode != 404 {
- return false, nil, err
- }
- return true, nil, nil
- },
- )
- handler.SetTimeout(5 * time.Minute)
- return handler
-}
-
-// TODO - remove
-// ForceDeleteInstanceWaitHandler will wait for instance deletion
-func ForceDeleteInstanceWaitHandler(
- ctx context.Context,
- a APIClientInstanceInterface,
- projectId, region, instanceId string,
-) *wait.AsyncActionHandler[struct{}] {
- handler := wait.New(
- func() (waitFinished bool, response *struct{}, err error) {
- _, err = a.GetInstanceRequestExecute(ctx, projectId, region, instanceId)
- if err == nil {
- return false, nil, nil
- }
- oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
- if !ok {
- return false, nil, err
- }
- if oapiErr.StatusCode != 404 {
- return false, nil, err
- }
- return true, nil, nil
- },
- )
- handler.SetTimeout(15 * time.Minute)
- return handler
-}
+//// DeleteInstanceWaitHandler will wait for instance deletion
+//func DeleteInstanceWaitHandler(
+// ctx context.Context,
+// a APIClientInstanceInterface,
+// projectId, region, instanceId string,
+//) *wait.AsyncActionHandler[struct{}] {
+// handler := wait.New(
+// func() (waitFinished bool, response *struct{}, err error) {
+// s, err := a.GetInstanceRequestExecute(ctx, projectId, region, instanceId)
+// if err != nil {
+// return false, nil, err
+// }
+// if s == nil || s.Id == nil || *s.Id != instanceId || s.Status == nil {
+// return false, nil, nil
+// }
+// // TODO - maybe we want to validate status if no 404 error (only unknown or terminating should be valid)
+// // switch *s.Status {
+// // default:
+// // return true, nil, fmt.Errorf("instance with id %s has unexpected status %s", instanceId, *s.Status)
+// // case InstanceStateSuccess:
+// // return false, nil, nil
+// // case InstanceStateTerminating:
+// // return false, nil, nil
+// // }
+//
+// // TODO - add tflog for ignored cases
+// oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
+// if !ok {
+// return false, nil, err
+// }
+// if oapiErr.StatusCode != 404 {
+// return false, nil, err
+// }
+// return true, nil, nil
+// },
+// )
+// handler.SetTimeout(5 * time.Minute)
+// return handler
+//}
+//
+//// TODO - remove
+//// ForceDeleteInstanceWaitHandler will wait for instance deletion
+//func ForceDeleteInstanceWaitHandler(
+// ctx context.Context,
+// a APIClientInstanceInterface,
+// projectId, region, instanceId string,
+//) *wait.AsyncActionHandler[struct{}] {
+// handler := wait.New(
+// func() (waitFinished bool, response *struct{}, err error) {
+// _, err = a.GetInstanceRequestExecute(ctx, projectId, region, instanceId)
+// if err == nil {
+// return false, nil, nil
+// }
+// oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
+// if !ok {
+// return false, nil, err
+// }
+// if oapiErr.StatusCode != 404 {
+// return false, nil, err
+// }
+// return true, nil, nil
+// },
+// )
+// handler.SetTimeout(15 * time.Minute)
+// return handler
+//}
// DeleteUserWaitHandler will wait for delete
func DeleteUserWaitHandler(
diff --git a/sample/sample.tfrc.example b/sample/config.tfrc.example
similarity index 100%
rename from sample/sample.tfrc.example
rename to sample/config.tfrc.example
diff --git a/sample/main.tf b/sample/main.tf
deleted file mode 100644
index 8f791797..00000000
--- a/sample/main.tf
+++ /dev/null
@@ -1 +0,0 @@
-# see other files
diff --git a/sample/postgres/outputs.tf b/sample/postgres/outputs.tf
new file mode 100644
index 00000000..bbe5490c
--- /dev/null
+++ b/sample/postgres/outputs.tf
@@ -0,0 +1,3 @@
+output "postgres_flavor" {
+ value = data.stackitprivatepreview_postgresflexalpha_flavor.pgsql_flavor.flavor_id
+}
diff --git a/sample/postgres/postresql.tf b/sample/postgres/postresql.tf
new file mode 100644
index 00000000..57a13fae
--- /dev/null
+++ b/sample/postgres/postresql.tf
@@ -0,0 +1,79 @@
+data "stackitprivatepreview_postgresflexalpha_flavor" "pgsql_flavor" {
+ project_id = var.project_id
+ region = "eu01"
+ cpu = 2
+ ram = 4
+ node_type = "Single"
+ storage_class = "premium-perf2-stackit"
+}
+
+resource "stackitprivatepreview_postgresflexalpha_instance" "msh-sna-pe-example" {
+ project_id = var.project_id
+ name = "msh-sna-pe-example"
+ backup_schedule = "0 0 * * *"
+ retention_days = 33
+ flavor_id = data.stackitprivatepreview_postgresflexalpha_flavor.pgsql_flavor.flavor_id
+ replicas = 1
+ storage = {
+ # class = "premium-perf2-stackit"
+ class = data.stackitprivatepreview_postgresflexalpha_flavor.pgsql_flavor.storage_class
+ size = 10
+ }
+ encryption = {
+ # key_id = stackit_kms_key.key.key_id
+ # keyring_id = stackit_kms_keyring.keyring.keyring_id
+ key_id = var.key_id
+ keyring_id = var.keyring_id
+ key_version = var.key_version
+ service_account = var.sa_email
+ }
+ network = {
+ acl = ["0.0.0.0/0", "193.148.160.0/19", "170.85.2.177/32"]
+ access_scope = "SNA"
+ }
+ version = 14
+}
+
+resource "stackitprivatepreview_postgresflexalpha_user" "ptlsdbadminuser" {
+ project_id = var.project_id
+ instance_id = stackitprivatepreview_postgresflexalpha_instance.msh-sna-pe-example.instance_id
+ username = var.db_admin_username
+ roles = ["createdb", "login"]
+ # roles = ["createdb", "login", "createrole"]
+}
+
+resource "stackitprivatepreview_postgresflexalpha_user" "ptlsdbuser" {
+ project_id = var.project_id
+ instance_id = stackitprivatepreview_postgresflexalpha_instance.msh-sna-pe-example.instance_id
+ username = var.db_username
+ roles = ["login"]
+ # roles = ["createdb", "login", "createrole"]
+}
+
+resource "stackitprivatepreview_postgresflexalpha_database" "example" {
+ depends_on = [stackitprivatepreview_postgresflexalpha_user.ptlsdbadminuser]
+ project_id = var.project_id
+ instance_id = stackitprivatepreview_postgresflexalpha_instance.msh-sna-pe-example.instance_id
+ name = var.db_name
+ owner = var.db_admin_username
+}
+
+data "stackitprivatepreview_postgresflexalpha_instance" "datapsql" {
+ project_id = var.project_id
+ instance_id = var.instance_id
+ region = "eu01"
+}
+
+output "psql_instance_id" {
+ value = data.stackitprivatepreview_postgresflexalpha_instance.datapsql.instance_id
+}
+
+output "psql_user_password" {
+ value = stackitprivatepreview_postgresflexalpha_user.ptlsdbuser.password
+ sensitive = true
+}
+
+output "psql_user_conn" {
+ value = stackitprivatepreview_postgresflexalpha_user.ptlsdbuser.connection_string
+ sensitive = true
+}
diff --git a/sample/providers.tf b/sample/postgres/providers.tf
similarity index 91%
rename from sample/providers.tf
rename to sample/postgres/providers.tf
index 62502811..c5171712 100644
--- a/sample/providers.tf
+++ b/sample/postgres/providers.tf
@@ -20,5 +20,5 @@ terraform {
provider "stackitprivatepreview" {
default_region = "eu01"
enable_beta_resources = true
- service_account_key_path = "./service_account.json"
+ service_account_key_path = "../service_account.json"
}
diff --git a/sample/variables.tf.example b/sample/postgres/variables.tf.example
similarity index 100%
rename from sample/variables.tf.example
rename to sample/postgres/variables.tf.example
diff --git a/sample/postresql.tf b/sample/postresql.tf
deleted file mode 100644
index 7ed600bd..00000000
--- a/sample/postresql.tf
+++ /dev/null
@@ -1,38 +0,0 @@
-resource "stackitprivatepreview_postgresflexalpha_instance" "ptlsdbsrv" {
- project_id = var.project_id
- name = "pgsql-example-instance"
- backup_schedule = "0 0 * * *"
- retention_days = 33
- flavor = {
- cpu = 2
- ram = 4
- }
- replicas = 1
- storage = {
- class = "premium-perf2-stackit"
- size = 5
- }
- encryption = {
- # key_id = stackit_kms_key.key.key_id
- # keyring_id = stackit_kms_keyring.keyring.keyring_id
- key_id = var.key_id
- keyring_id = var.keyring_id
- key_version = var.key_version
- service_account = var.sa_email
- }
- network = {
- acl = ["0.0.0.0/0", "193.148.160.0/19"]
- access_scope = "SNA"
- }
- version = 14
-}
-
-# data "stackitprivatepreview_postgresflexalpha_instance" "datapsql" {
-# project_id = var.project_id
-# instance_id = "fdb6573e-2dea-4e1d-a638-9157cf90c3ba"
-# region = "eu01"
-# }
-#
-# output "sample_psqlinstance" {
-# value = data.stackitprivatepreview_postgresflexalpha_instance.datapsql
-# }
diff --git a/sample/sqlserver/flavor.tf b/sample/sqlserver/flavor.tf
new file mode 100644
index 00000000..667ead6f
--- /dev/null
+++ b/sample/sqlserver/flavor.tf
@@ -0,0 +1,12 @@
+data "stackitprivatepreview_sqlserverflexalpha_flavor" "sqlserver_flavor" {
+ project_id = var.project_id
+ region = "eu01"
+ cpu = 4
+ ram = 16
+ node_type = "Single"
+ storage_class = "premium-perf2-stackit"
+}
+
+output "sqlserver_flavor" {
+ value = data.stackitprivatepreview_sqlserverflexalpha_flavor.sqlserver_flavor.flavor_id
+}
diff --git a/sample/sqlserver/providers.tf b/sample/sqlserver/providers.tf
new file mode 100644
index 00000000..c5171712
--- /dev/null
+++ b/sample/sqlserver/providers.tf
@@ -0,0 +1,24 @@
+terraform {
+ required_providers {
+ # stackit = {
+ # source = "registry.terraform.io/stackitcloud/stackit"
+ # version = "~> 0.70"
+ # }
+ stackitprivatepreview = {
+ source = "registry.terraform.io/mhenselin/stackitprivatepreview"
+ version = "~> 0.1"
+ }
+ }
+}
+
+# provider "stackit" {
+# default_region = "eu01"
+# enable_beta_resources = true
+# service_account_key_path = "./service_account.json"
+# }
+
+provider "stackitprivatepreview" {
+ default_region = "eu01"
+ enable_beta_resources = true
+ service_account_key_path = "../service_account.json"
+}
diff --git a/sample/sqlserver.tf b/sample/sqlserver/sqlserver.tf
similarity index 50%
rename from sample/sqlserver.tf
rename to sample/sqlserver/sqlserver.tf
index 9d84a4f3..4206b0fc 100644
--- a/sample/sqlserver.tf
+++ b/sample/sqlserver/sqlserver.tf
@@ -18,42 +18,12 @@
# value = stackit_kms_key.key.key_id
# }
-# resource "stackitalpha_postgresflexalpha_instance" "ptlsdbsrv" {
-# project_id = var.project_id
-# name = "example-instance"
-# acl = ["0.0.0.0/0"]
-# backup_schedule = "0 0 * * *"
-# flavor = {
-# cpu = 2
-# ram = 4
-# }
-# replicas = 1
-# storage = {
-# class = "premium-perf2-stackit"
-# size = 5
-# }
-# version = 14
-# encryption = {
-# key_id = stackitalpha_kms_key.key.id
-# keyring_id = stackitalpha_kms_keyring.keyring.keyring_id
-# key_version = "1"
-# service_account = var.sa_email
-# }
-# network = {
-# access_scope = "SNA"
-# }
-# }
-
-resource "stackitprivatepreview_sqlserverflexalpha_instance" "ptlsdbsqlsrv" {
+resource "stackitprivatepreview_sqlserverflexalpha_instance" "sqlsrv" {
project_id = var.project_id
name = "msh-example-instance-002"
backup_schedule = "0 3 * * *"
retention_days = 31
- flavor = {
- cpu = 4
- ram = 16
- node_type = "Single"
- }
+ flavor_id = data.stackitprivatepreview_sqlserverflexalpha_flavor.sqlserver_flavor.flavor_id
storage = {
class = "premium-perf2-stackit"
size = 50
@@ -73,28 +43,27 @@ resource "stackitprivatepreview_sqlserverflexalpha_instance" "ptlsdbsqlsrv" {
}
}
-# data "stackitalpha_sqlserverflexalpha_instance" "test" {
+# data "stackitprivatepreview_sqlserverflexalpha_instance" "test" {
# project_id = var.project_id
# instance_id = var.instance_id
# region = "eu01"
# }
# output "test" {
-# value = data.stackitalpha_sqlserverflexalpha_instance.test
+# value = data.stackitprivatepreview_sqlserverflexalpha_instance.test
# }
-# data "stackitalpha_sqlserverflexalpha_user" "testuser" {
-# project_id = var.project_id
-# instance_id = var.instance_id
-# region = "eu01"
-# }
+resource "stackitprivatepreview_sqlserverflexalpha_user" "ptlsdbadminuser" {
+ project_id = var.project_id
+ instance_id = stackitprivatepreview_sqlserverflexalpha_instance.sqlsrv.instance_id
+ username = var.db_admin_username
+ roles = ["##STACKIT_LoginManager##", "##STACKIT_DatabaseManager##"]
+}
+
+resource "stackitprivatepreview_sqlserverflexalpha_user" "ptlsdbuser" {
+ project_id = var.project_id
+ instance_id = stackitprivatepreview_sqlserverflexalpha_instance.sqlsrv.instance_id
+ username = var.db_username
+ roles = ["##STACKIT_LoginManager##"]
+}
-# data "stackitprivatepreview_sqlserverflexalpha_instance" "existing" {
-# project_id = var.project_id
-# instance_id = "b31575e9-9dbd-4ff6-b341-82d89c34f14f"
-# region = "eu01"
-# }
-#
-# output "myinstance" {
-# value = data.stackitprivatepreview_sqlserverflexalpha_instance.existing
-# }
diff --git a/sample/sqlserver/variables.tf.example b/sample/sqlserver/variables.tf.example
new file mode 100644
index 00000000..51a70be4
--- /dev/null
+++ b/sample/sqlserver/variables.tf.example
@@ -0,0 +1,11 @@
+variable "project_id" {
+ default = ""
+}
+
+variable "sa_email" {
+ default = ""
+}
+
+variable "db_username" {
+ default = ""
+}
diff --git a/sample/tf.sh b/sample/tf.sh
index ce213a21..acec7988 100755
--- a/sample/tf.sh
+++ b/sample/tf.sh
@@ -3,28 +3,44 @@
# ./tf.sh apply > >(tee -a stdout.log) 2> >(tee -a stderr.log >&2)
-# copy or rename sample.tfrc.example and adjust it
-TERRAFORM_CONFIG=$(pwd)/sample.tfrc
-export TERRAFORM_CONFIG
+usage() {
+ echo "$0 usage:" && grep "[[:space:]].)\ #" "$0" | sed 's/#//' | sed -r 's/([a-z])\)/-\1/';
+ exit 0;
+}
-parsed_options=$(
- getopt -n "$0" -o dil -- "$@"
-) || exit
-eval "set -- $parsed_options"
-while [ "$#" -gt 0 ]; do
- case $1 in
- (-d) TF_LOG=DEBUG
- export TF_LOG
- shift;;
- (-i) TF_LOG=INFO
- export TF_LOG
- shift;;
- (-t) TF_LOG=TRACE
- export TF_LOG
- shift;;
- (--) shift; break;;
- (*) echo "Unknown option ${1}" # should never be reached.
+[ $# -eq 0 ] && usage
+
+CONFIG_FOLDER=$(dirname "$0")
+BINARY=terraform
+
+while getopts ":b:hdit" arg; do
+ case $arg in
+ b) # Set binary (default is terraform).
+ BINARY=${OPTARG}
+ shift 2
+ ;;
+ d) # Set log level to DEBUG.
+ TF_LOG=DEBUG
+ export TF_LOG
+ shift
+ ;;
+ i) # Set log level to INFO.
+ TF_LOG=INFO
+ export TF_LOG
+ shift
+ ;;
+ t) # Set log level to TRACE.
+ TF_LOG=TRACE
+ export TF_LOG
+ shift
+ ;;
+ h | *) # Display help.
+ usage
+ ;;
esac
done
-terraform "$*"
+TERRAFORM_CONFIG=${CONFIG_FOLDER}/config.tfrc
+export TERRAFORM_CONFIG
+
+${BINARY} "$@"
diff --git a/sample/tofu.sh b/sample/tofu.sh
deleted file mode 100755
index efcc076c..00000000
--- a/sample/tofu.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-
-# ./tofu.sh apply > >(tee -a stdout.log) 2> >(tee -a stderr.log >&2)
-
-# copy or rename sample.tfrc.example and adjust it
-TERRAFORM_CONFIG=$(pwd)/sample.tfrc
-export TERRAFORM_CONFIG
-
-parsed_options=$(
- getopt -n "$0" -o l -- "$@"
-) || exit
-eval "set -- $parsed_options"
-while [ "$#" -gt 0 ]; do
- case $1 in
- (-l) TF_LOG=TRACE
- export TF_LOG
- shift;;
- (--) shift; break;;
- (*) echo "Unknown option ${1}" # should never be reached.
- esac
-done
-
-tofu "$*"
diff --git a/sample/user.tf b/sample/user.tf
deleted file mode 100644
index e51ee01f..00000000
--- a/sample/user.tf
+++ /dev/null
@@ -1,20 +0,0 @@
-# data "stackitprivatepreview_postgresflexalpha_user" "example" {
-# project_id = stackitprivatepreview_postgresflexalpha_instance.ptlsdbsrv.project_id
-# instance_id = stackitprivatepreview_postgresflexalpha_instance.ptlsdbsrv.instance_id
-# user_id = 1
-# }
-#
-# resource "stackitprivatepreview_postgresflexalpha_user" "ptlsdbuser" {
-# project_id = stackitprivatepreview_postgresflexalpha_instance.ptlsdbsrv.project_id
-# instance_id = stackitprivatepreview_postgresflexalpha_instance.ptlsdbsrv.instance_id
-# username = var.db_username
-# roles = ["createdb", "login"]
-# # roles = ["createdb", "login", "createrole"]
-# }
-#
-# resource "stackitprivatepreview_sqlserverflexalpha_user" "ptlsdbuser" {
-# project_id = stackitprivatepreview_sqlserverflexalpha_instance.ptlsdbsqlsrv.project_id
-# instance_id = stackitprivatepreview_sqlserverflexalpha_instance.ptlsdbsqlsrv.instance_id
-# username = var.db_username
-# roles = ["login"]
-# }
diff --git a/scripts/check-docs.sh b/scripts/check-docs.sh
index 4577dce0..7d50bc26 100755
--- a/scripts/check-docs.sh
+++ b/scripts/check-docs.sh
@@ -9,7 +9,7 @@ ROOT_DIR=$(git rev-parse --show-toplevel)
before_hash=$(find docs -type f -exec sha256sum {} \; | sort | sha256sum | awk '{print $1}')
# re-generate the docs
-$ROOT_DIR/scripts/tfplugindocs.sh
+"${ROOT_DIR}/scripts/tfplugindocs.sh"
after_hash=$(find docs -type f -exec sha256sum {} \; | sort | sha256sum | awk '{print $1}')
diff --git a/stackit/internal/services/postgresflexalpha/database/resource.go b/stackit/internal/services/postgresflexalpha/database/resource.go
index c75b7702..b6e02f81 100644
--- a/stackit/internal/services/postgresflexalpha/database/resource.go
+++ b/stackit/internal/services/postgresflexalpha/database/resource.go
@@ -5,12 +5,15 @@ import (
"errors"
"fmt"
"net/http"
+ "regexp"
"strconv"
"strings"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
@@ -90,7 +93,7 @@ func (r *databaseResource) ModifyPlan(
// Metadata returns the resource type name.
func (r *databaseResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
- resp.TypeName = req.ProviderTypeName + "_postgresflex_database"
+ resp.TypeName = req.ProviderTypeName + "_postgresflexalpha_database"
}
// Configure adds the provider configured client to the resource.
@@ -136,15 +139,13 @@ func (r *databaseResource) Schema(_ context.Context, _ resource.SchemaRequest, r
stringplanmodifier.UseStateForUnknown(),
},
},
- "database_id": schema.StringAttribute{
+ "database_id": schema.Int64Attribute{
Description: descriptions["database_id"],
Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- Validators: []validator.String{
- validate.NoSeparator(),
+ PlanModifiers: []planmodifier.Int64{
+ int64planmodifier.UseStateForUnknown(),
},
+ Validators: []validator.Int64{},
},
"instance_id": schema.StringAttribute{
Description: descriptions["instance_id"],
@@ -171,18 +172,20 @@ func (r *databaseResource) Schema(_ context.Context, _ resource.SchemaRequest, r
},
},
"name": schema.StringAttribute{
- Description: descriptions["name"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
+ Description: descriptions["name"],
+ Required: true,
+ PlanModifiers: []planmodifier.String{},
+ Validators: []validator.String{
+ stringvalidator.RegexMatches(
+ regexp.MustCompile("^[a-z]([a-z0-9]*)?$"),
+ "must start with a letter, must have lower case letters or numbers",
+ ),
},
},
"owner": schema.StringAttribute{
- Description: descriptions["owner"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
+ Description: descriptions["owner"],
+ Required: true,
+ PlanModifiers: []planmodifier.String{},
},
"region": schema.StringAttribute{
Optional: true,
@@ -348,11 +351,86 @@ func (r *databaseResource) Read(
// Update updates the resource and sets the updated Terraform state on success.
func (r *databaseResource) Update(
ctx context.Context,
- _ resource.UpdateRequest,
+ req resource.UpdateRequest,
resp *resource.UpdateResponse,
) { // nolint:gocritic // function signature required by Terraform
- // Update shouldn't be called
- core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating database", "Database can't be updated")
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ ctx = core.InitProviderContext(ctx)
+
+ projectId := model.ProjectId.ValueString()
+ instanceId := model.InstanceId.ValueString()
+ databaseId := model.DatabaseId.ValueInt64()
+ region := model.Region.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "instance_id", instanceId)
+ ctx = tflog.SetField(ctx, "database_id", databaseId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ // Retrieve values from state
+ var stateModel Model
+ diags = req.State.Get(ctx, &stateModel)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ modified := false
+ var payload postgresflexalpha.UpdateDatabasePartiallyRequestPayload
+ if stateModel.Name != model.Name {
+ payload.Name = model.Name.ValueStringPointer()
+ modified = true
+ }
+
+ if stateModel.Owner != model.Owner {
+ payload.Owner = model.Owner.ValueStringPointer()
+ modified = true
+ }
+
+ if !modified {
+ tflog.Info(ctx, "no modification detected")
+ return
+ }
+
+ // Update existing database
+ res, err := r.client.UpdateDatabasePartiallyRequest(
+ ctx,
+ projectId,
+ region,
+ instanceId,
+ databaseId,
+ ).UpdateDatabasePartiallyRequestPayload(payload).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "error updating database", err.Error())
+ return
+ }
+
+ ctx = core.LogResponse(ctx)
+
+ // Map response body to schema
+ err = mapFieldsUpdatePartially(res, &model, region)
+ if err != nil {
+ core.LogAndAddError(
+ ctx,
+ &resp.Diagnostics,
+ "Error updating database",
+ fmt.Sprintf("Processing API payload: %v", err),
+ )
+ return
+ }
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "Postgres Flex database updated")
+
}
// Delete deletes the resource and removes the Terraform state on success.
@@ -361,7 +439,6 @@ func (r *databaseResource) Delete(
req resource.DeleteRequest,
resp *resource.DeleteResponse,
) { // nolint:gocritic // function signature required by Terraform
- // Retrieve values from plan
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
@@ -424,11 +501,11 @@ func (r *databaseResource) ImportState(
tflog.Info(ctx, "Postgres Flex database state imported")
}
-func mapFields(databaseResp *postgresflexalpha.ListDatabase, model *Model, region string) error {
- if databaseResp == nil {
+func mapFields(resp *postgresflexalpha.ListDatabase, model *Model, region string) error {
+ if resp == nil {
return fmt.Errorf("response is nil")
}
- if databaseResp.Id == nil || *databaseResp.Id == 0 {
+ if resp.Id == nil || *resp.Id == 0 {
return fmt.Errorf("id not present")
}
if model == nil {
@@ -438,8 +515,8 @@ func mapFields(databaseResp *postgresflexalpha.ListDatabase, model *Model, regio
var databaseId int64
if model.DatabaseId.ValueInt64() != 0 {
databaseId = model.DatabaseId.ValueInt64()
- } else if databaseResp.Id != nil {
- databaseId = *databaseResp.Id
+ } else if resp.Id != nil {
+ databaseId = *resp.Id
} else {
return fmt.Errorf("database id not present")
}
@@ -447,33 +524,32 @@ func mapFields(databaseResp *postgresflexalpha.ListDatabase, model *Model, regio
model.ProjectId.ValueString(), region, model.InstanceId.ValueString(), strconv.FormatInt(databaseId, 10),
)
model.DatabaseId = types.Int64Value(databaseId)
- model.Name = types.StringPointerValue(databaseResp.Name)
+ model.Name = types.StringPointerValue(resp.Name)
model.Region = types.StringValue(region)
-
- ownerStr, err := mapOwner(databaseResp)
- if err != nil {
- return fmt.Errorf("error mapping owner: %w", err)
- }
-
- model.Owner = types.StringPointerValue(ownerStr)
+ model.Owner = types.StringPointerValue(cleanString(resp.Owner))
return nil
}
-func mapOwner(databaseResp *postgresflexalpha.ListDatabase) (*string, error) {
- if databaseResp == nil {
- return nil, fmt.Errorf("response is nil")
+func mapFieldsUpdate(res *postgresflexalpha.UpdateDatabaseResponse, model *Model, region string) error {
+ if res == nil {
+ return fmt.Errorf("response is nil")
}
+ return mapFields(res.Database, model, region)
+}
- if databaseResp.Owner == nil {
- return nil, nil
+func mapFieldsUpdatePartially(res *postgresflexalpha.UpdateDatabasePartiallyResponse, model *Model, region string) error {
+ if res == nil {
+ return fmt.Errorf("response is nil")
}
- ownerStr := *databaseResp.Owner
+ return mapFields(res.Database, model, region)
+}
- // If the field is returned between with quotes, we trim them to prevent an inconsistent result after apply
- ownerStr = strings.TrimPrefix(ownerStr, `"`)
- ownerStr = strings.TrimSuffix(ownerStr, `"`)
-
- return &ownerStr, nil
+func cleanString(s *string) *string {
+ if s == nil {
+ return nil
+ }
+ res := strings.Trim(*s, "\"")
+ return &res
}
func toCreatePayload(model *Model) (*postgresflexalpha.CreateDatabaseRequestPayload, error) {
@@ -496,6 +572,7 @@ func getDatabase(
projectId, region, instanceId string,
databaseId int64,
) (*postgresflexalpha.ListDatabase, error) {
+ // TODO - implement pagination handling
resp, err := client.ListDatabasesRequestExecute(ctx, projectId, region, instanceId)
if err != nil {
return nil, err
diff --git a/stackit/internal/services/postgresflexalpha/database/resource_test.go b/stackit/internal/services/postgresflexalpha/database/resource_test.go
index 3191ed62..c4150d47 100644
--- a/stackit/internal/services/postgresflexalpha/database/resource_test.go
+++ b/stackit/internal/services/postgresflexalpha/database/resource_test.go
@@ -1,6 +1,7 @@
package postgresflexalpha
import (
+ "reflect"
"testing"
"github.com/google/go-cmp/cmp"
@@ -182,3 +183,50 @@ func TestToCreatePayload(t *testing.T) {
)
}
}
+
+func Test_cleanString(t *testing.T) {
+ type args struct {
+ s *string
+ }
+ tests := []struct {
+ name string
+ args args
+ want *string
+ }{
+ {
+ name: "simple_value",
+ args: args{
+ s: utils.Ptr("mytest"),
+ },
+ want: utils.Ptr("mytest"),
+ },
+ {
+ name: "simple_value_with_quotes",
+ args: args{
+ s: utils.Ptr("\"mytest\""),
+ },
+ want: utils.Ptr("mytest"),
+ },
+ {
+ name: "simple_values_with_quotes",
+ args: args{
+ s: utils.Ptr("\"my test here\""),
+ },
+ want: utils.Ptr("my test here"),
+ },
+ {
+ name: "simple_values",
+ args: args{
+ s: utils.Ptr("my test here"),
+ },
+ want: utils.Ptr("my test here"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := cleanString(tt.args.s); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("cleanString() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/postgresflexalpha/flavor.go b/stackit/internal/services/postgresflexalpha/flavor.go
deleted file mode 100644
index 34361068..00000000
--- a/stackit/internal/services/postgresflexalpha/flavor.go
+++ /dev/null
@@ -1,502 +0,0 @@
-package postgresflex
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/hashicorp/terraform-plugin-framework/attr"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/hashicorp/terraform-plugin-framework/types"
- "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
- "github.com/hashicorp/terraform-plugin-go/tftypes"
-)
-
-var _ basetypes.ObjectTypable = FlavorType{}
-
-type FlavorType struct {
- basetypes.ObjectType
-}
-
-func (t FlavorType) Equal(o attr.Type) bool {
- other, ok := o.(FlavorType)
-
- if !ok {
- return false
- }
-
- return t.ObjectType.Equal(other.ObjectType)
-}
-
-func (t FlavorType) String() string {
- return "FlavorType"
-}
-
-func (t FlavorType) ValueFromObject(_ context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
- var diags diag.Diagnostics
-
- attributes := in.Attributes()
-
- cpuAttribute, ok := attributes["cpu"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `cpu is missing from object`)
-
- return nil, diags
- }
-
- cpuVal, ok := cpuAttribute.(basetypes.Int64Value)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute))
- }
-
- descriptionAttribute, ok := attributes["description"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `description is missing from object`)
-
- return nil, diags
- }
-
- descriptionVal, ok := descriptionAttribute.(basetypes.StringValue)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute))
- }
-
- idAttribute, ok := attributes["id"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `id is missing from object`)
-
- return nil, diags
- }
-
- idVal, ok := idAttribute.(basetypes.StringValue)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
- }
-
- memoryAttribute, ok := attributes["memory"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `memory is missing from object`)
-
- return nil, diags
- }
-
- ramVal, ok := memoryAttribute.(basetypes.Int64Value)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute))
- }
-
- if diags.HasError() {
- return nil, diags
- }
-
- return FlavorValue{
- Cpu: cpuVal,
- Description: descriptionVal,
- Id: idVal,
- Ram: ramVal,
- state: attr.ValueStateKnown,
- }, diags
-}
-
-func NewFlavorValueNull() FlavorValue {
- return FlavorValue{
- state: attr.ValueStateNull,
- }
-}
-
-func NewFlavorValueUnknown() FlavorValue {
- return FlavorValue{
- state: attr.ValueStateUnknown,
- }
-}
-
-func NewFlavorValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (FlavorValue, diag.Diagnostics) {
- var diags diag.Diagnostics
-
- // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
- ctx := context.Background()
-
- for name, attributeType := range attributeTypes {
- attribute, ok := attributes[name]
-
- if !ok {
- diags.AddError(
- "Missing FlavorValue Attribute Value",
- "While creating a FlavorValue value, a missing attribute value was detected. "+
- "A FlavorValue must contain values for all attributes, even if null or unknown. "+
- "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
- fmt.Sprintf("FlavorValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
- )
-
- continue
- }
-
- if !attributeType.Equal(attribute.Type(ctx)) {
- diags.AddError(
- "Invalid FlavorValue Attribute Type",
- "While creating a FlavorValue value, an invalid attribute value was detected. "+
- "A FlavorValue must use a matching attribute type for the value. "+
- "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
- fmt.Sprintf("FlavorValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
- fmt.Sprintf("FlavorValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
- )
- }
- }
-
- for name := range attributes {
- _, ok := attributeTypes[name]
-
- if !ok {
- diags.AddError(
- "Extra FlavorValue Attribute Value",
- "While creating a FlavorValue value, an extra attribute value was detected. "+
- "A FlavorValue must not contain values beyond the expected attribute types. "+
- "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
- fmt.Sprintf("Extra FlavorValue Attribute Name: %s", name),
- )
- }
- }
-
- if diags.HasError() {
- return NewFlavorValueUnknown(), diags
- }
-
- cpuAttribute, ok := attributes["cpu"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `cpu is missing from object`)
-
- return NewFlavorValueUnknown(), diags
- }
-
- cpuVal, ok := cpuAttribute.(basetypes.Int64Value)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute))
- }
-
- descriptionAttribute, ok := attributes["description"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `description is missing from object`)
-
- return NewFlavorValueUnknown(), diags
- }
-
- descriptionVal, ok := descriptionAttribute.(basetypes.StringValue)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute))
- }
-
- idAttribute, ok := attributes["id"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `id is missing from object`)
-
- return NewFlavorValueUnknown(), diags
- }
-
- idVal, ok := idAttribute.(basetypes.StringValue)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
- }
-
- memoryAttribute, ok := attributes["memory"]
-
- if !ok {
- diags.AddError(
- "Attribute Missing",
- `memory is missing from object`)
-
- return NewFlavorValueUnknown(), diags
- }
-
- memoryVal, ok := memoryAttribute.(basetypes.Int64Value)
-
- if !ok {
- diags.AddError(
- "Attribute Wrong Type",
- fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute))
- }
-
- if diags.HasError() {
- return NewFlavorValueUnknown(), diags
- }
-
- return FlavorValue{
- Cpu: cpuVal,
- Description: descriptionVal,
- Id: idVal,
- Ram: memoryVal,
- state: attr.ValueStateKnown,
- }, diags
-}
-
-func NewFlavorValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) FlavorValue {
- object, diags := NewFlavorValue(attributeTypes, attributes)
-
- if diags.HasError() {
- // This could potentially be added to the diag package.
- diagsStrings := make([]string, 0, len(diags))
-
- for _, diagnostic := range diags {
- diagsStrings = append(diagsStrings, fmt.Sprintf(
- "%s | %s | %s",
- diagnostic.Severity(),
- diagnostic.Summary(),
- diagnostic.Detail()))
- }
-
- panic("NewFlavorValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
- }
-
- return object
-}
-
-func (t FlavorType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
- if in.Type() == nil {
- return NewFlavorValueNull(), nil
- }
-
- if !in.Type().Equal(t.TerraformType(ctx)) {
- return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
- }
-
- if !in.IsKnown() {
- return NewFlavorValueUnknown(), nil
- }
-
- if in.IsNull() {
- return NewFlavorValueNull(), nil
- }
-
- attributes := map[string]attr.Value{}
-
- val := map[string]tftypes.Value{}
-
- err := in.As(&val)
-
- if err != nil {
- return nil, err
- }
-
- for k, v := range val {
- a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
-
- if err != nil {
- return nil, err
- }
-
- attributes[k] = a
- }
-
- return NewFlavorValueMust(FlavorValue{}.AttributeTypes(ctx), attributes), nil
-}
-
-func (t FlavorType) ValueType(_ context.Context) attr.Value {
- return FlavorValue{}
-}
-
-var _ basetypes.ObjectValuable = FlavorValue{}
-
-type FlavorValue struct {
- Cpu basetypes.Int64Value `tfsdk:"cpu"`
- Description basetypes.StringValue `tfsdk:"description"`
- Id basetypes.StringValue `tfsdk:"id"`
- Ram basetypes.Int64Value `tfsdk:"ram"`
- state attr.ValueState
-}
-
-func (v FlavorValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
- attrTypes := make(map[string]tftypes.Type, 4)
-
- var val tftypes.Value
- var err error
-
- attrTypes["cpu"] = basetypes.Int64Type{}.TerraformType(ctx)
- attrTypes["description"] = basetypes.StringType{}.TerraformType(ctx)
- attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx)
- attrTypes["memory"] = basetypes.Int64Type{}.TerraformType(ctx)
-
- objectType := tftypes.Object{AttributeTypes: attrTypes}
-
- switch v.state {
- case attr.ValueStateKnown:
- vals := make(map[string]tftypes.Value, 4)
-
- val, err = v.Cpu.ToTerraformValue(ctx)
-
- if err != nil {
- return tftypes.NewValue(objectType, tftypes.UnknownValue), err
- }
-
- vals["cpu"] = val
-
- val, err = v.Description.ToTerraformValue(ctx)
-
- if err != nil {
- return tftypes.NewValue(objectType, tftypes.UnknownValue), err
- }
-
- vals["description"] = val
-
- val, err = v.Id.ToTerraformValue(ctx)
-
- if err != nil {
- return tftypes.NewValue(objectType, tftypes.UnknownValue), err
- }
-
- vals["id"] = val
-
- val, err = v.Ram.ToTerraformValue(ctx)
-
- if err != nil {
- return tftypes.NewValue(objectType, tftypes.UnknownValue), err
- }
-
- vals["memory"] = val
-
- if err := tftypes.ValidateValue(objectType, vals); err != nil {
- return tftypes.NewValue(objectType, tftypes.UnknownValue), err
- }
-
- return tftypes.NewValue(objectType, vals), nil
- case attr.ValueStateNull:
- return tftypes.NewValue(objectType, nil), nil
- case attr.ValueStateUnknown:
- return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
- default:
- panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
- }
-}
-
-func (v FlavorValue) IsNull() bool {
- return v.state == attr.ValueStateNull
-}
-
-func (v FlavorValue) IsUnknown() bool {
- return v.state == attr.ValueStateUnknown
-}
-
-func (v FlavorValue) String() string {
- return "FlavorValue"
-}
-
-func (v FlavorValue) ToObjectValue(_ context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
- var diags diag.Diagnostics
-
- attributeTypes := map[string]attr.Type{
- "cpu": basetypes.Int64Type{},
- "description": basetypes.StringType{},
- "id": basetypes.StringType{},
- "memory": basetypes.Int64Type{},
- }
-
- if v.IsNull() {
- return types.ObjectNull(attributeTypes), diags
- }
-
- if v.IsUnknown() {
- return types.ObjectUnknown(attributeTypes), diags
- }
-
- objVal, diags := types.ObjectValue(
- attributeTypes,
- map[string]attr.Value{
- "cpu": v.Cpu,
- "description": v.Description,
- "id": v.Id,
- "memory": v.Ram,
- })
-
- return objVal, diags
-}
-
-func (v FlavorValue) Equal(o attr.Value) bool {
- other, ok := o.(FlavorValue)
-
- if !ok {
- return false
- }
-
- if v.state != other.state {
- return false
- }
-
- if v.state != attr.ValueStateKnown {
- return true
- }
-
- if !v.Cpu.Equal(other.Cpu) {
- return false
- }
-
- if !v.Description.Equal(other.Description) {
- return false
- }
-
- if !v.Id.Equal(other.Id) {
- return false
- }
-
- if !v.Ram.Equal(other.Ram) {
- return false
- }
-
- return true
-}
-
-func (v FlavorValue) Type(ctx context.Context) attr.Type {
- return FlavorType{
- basetypes.ObjectType{
- AttrTypes: v.AttributeTypes(ctx),
- },
- }
-}
-
-func (v FlavorValue) AttributeTypes(_ context.Context) map[string]attr.Type {
- return map[string]attr.Type{
- "cpu": basetypes.Int64Type{},
- "description": basetypes.StringType{},
- "id": basetypes.StringType{},
- "memory": basetypes.Int64Type{},
- }
-}
diff --git a/stackit/internal/services/postgresflexalpha/flavor/datasource.go b/stackit/internal/services/postgresflexalpha/flavor/datasource.go
new file mode 100644
index 00000000..b7ed25ee
--- /dev/null
+++ b/stackit/internal/services/postgresflexalpha/flavor/datasource.go
@@ -0,0 +1,251 @@
+package postgresFlexAlphaFlavor
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
+ postgresflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha"
+ postgresflexUtils "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/utils"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/core"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ datasource.DataSource = &flavorDataSource{}
+)
+
+type FlavorModel struct {
+ ProjectId types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ StorageClass types.String `tfsdk:"storage_class"`
+ Cpu types.Int64 `tfsdk:"cpu"`
+ Description types.String `tfsdk:"description"`
+ Id types.String `tfsdk:"id"`
+ FlavorId types.String `tfsdk:"flavor_id"`
+ MaxGb types.Int64 `tfsdk:"max_gb"`
+ Memory types.Int64 `tfsdk:"ram"`
+ MinGb types.Int64 `tfsdk:"min_gb"`
+ NodeType types.String `tfsdk:"node_type"`
+ StorageClasses types.List `tfsdk:"storage_classes"`
+}
+
+// NewFlavorDataSource is a helper function to simplify the provider implementation.
+func NewFlavorDataSource() datasource.DataSource {
+ return &flavorDataSource{}
+}
+
+// flavorDataSource is the data source implementation.
+type flavorDataSource struct {
+ client *postgresflexalpha.APIClient
+ providerData core.ProviderData
+}
+
+// Metadata returns the data source type name.
+func (r *flavorDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_postgresflexalpha_flavor"
+}
+
+// Configure adds the provider configured client to the data source.
+func (r *flavorDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ apiClient := postgresflexUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "Postgres Flex instance client configured")
+}
+
+func (r *flavorDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "project_id": schema.StringAttribute{
+ Required: true,
+ Description: "The cpu count of the instance.",
+ MarkdownDescription: "The cpu count of the instance.",
+ },
+ "region": schema.StringAttribute{
+ Required: true,
+ Description: "The flavor description.",
+ MarkdownDescription: "The flavor description.",
+ },
+ "cpu": schema.Int64Attribute{
+ Required: true,
+ Description: "The cpu count of the instance.",
+ MarkdownDescription: "The cpu count of the instance.",
+ },
+ "ram": schema.Int64Attribute{
+ Required: true,
+ Description: "The memory of the instance in Gibibyte.",
+ MarkdownDescription: "The memory of the instance in Gibibyte.",
+ },
+ "storage_class": schema.StringAttribute{
+ Required: true,
+ Description: "The memory of the instance in Gibibyte.",
+ MarkdownDescription: "The memory of the instance in Gibibyte.",
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ Description: "The flavor description.",
+ MarkdownDescription: "The flavor description.",
+ },
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The terraform id of the instance flavor.",
+ MarkdownDescription: "The terraform id of the instance flavor.",
+ },
+ "flavor_id": schema.StringAttribute{
+ Computed: true,
+ Description: "The flavor id of the instance flavor.",
+ MarkdownDescription: "The flavor id of the instance flavor.",
+ },
+ "max_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ },
+ "min_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "minimum storage which is required to order in Gigabyte.",
+ MarkdownDescription: "minimum storage which is required to order in Gigabyte.",
+ },
+ "node_type": schema.StringAttribute{
+ Required: true,
+ Description: "defines the nodeType it can be either single or replica",
+ MarkdownDescription: "defines the nodeType it can be either single or replica",
+ },
+ "storage_classes": schema.ListNestedAttribute{
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "class": schema.StringAttribute{
+ Computed: true,
+ },
+ "max_io_per_sec": schema.Int64Attribute{
+ Computed: true,
+ },
+ "max_through_in_mb": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ CustomType: postgresflex.StorageClassesType{
+ ObjectType: types.ObjectType{
+ AttrTypes: postgresflex.StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func (r *flavorDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var model FlavorModel
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ ctx = core.InitProviderContext(ctx)
+
+ projectId := model.ProjectId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ flavors, err := getAllFlavors(ctx, r.client, projectId, region)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading flavors", fmt.Sprintf("getAllFlavors: %v", err))
+ return
+ }
+
+ var foundFlavors []postgresflexalpha.ListFlavors
+ for _, flavor := range flavors {
+ if model.Cpu.ValueInt64() != *flavor.Cpu {
+ continue
+ }
+ if model.Memory.ValueInt64() != *flavor.Memory {
+ continue
+ }
+ if model.NodeType.ValueString() != *flavor.NodeType {
+ continue
+ }
+ for _, sc := range *flavor.StorageClasses {
+ if model.StorageClass.ValueString() != *sc.Class {
+ continue
+ }
+ foundFlavors = append(foundFlavors, flavor)
+ }
+ }
+ if len(foundFlavors) == 0 {
+ resp.Diagnostics.AddError("get flavor", "could not find requested flavor")
+ return
+ }
+ if len(foundFlavors) > 1 {
+ resp.Diagnostics.AddError("get flavor", "found too many matching flavors")
+ return
+ }
+
+ f := foundFlavors[0]
+ model.Description = types.StringValue(*f.Description)
+ model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, *f.Id)
+ model.FlavorId = types.StringValue(*f.Id)
+ model.MaxGb = types.Int64Value(*f.MaxGB)
+ model.MinGb = types.Int64Value(*f.MinGB)
+
+ if f.StorageClasses == nil {
+ model.StorageClasses = types.ListNull(postgresflex.StorageClassesType{
+ ObjectType: basetypes.ObjectType{
+ AttrTypes: postgresflex.StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ })
+ } else {
+ var scList []attr.Value
+ for _, sc := range *f.StorageClasses {
+ scList = append(
+ scList,
+ postgresflex.NewStorageClassesValueMust(
+ postgresflex.StorageClassesValue{}.AttributeTypes(ctx),
+ map[string]attr.Value{
+ "class": types.StringValue(*sc.Class),
+ "max_io_per_sec": types.Int64Value(*sc.MaxIoPerSec),
+ "max_through_in_mb": types.Int64Value(*sc.MaxThroughInMb),
+ },
+ ),
+ )
+ }
+ storageClassesList := types.ListValueMust(
+ postgresflex.StorageClassesType{
+ ObjectType: basetypes.ObjectType{
+ AttrTypes: postgresflex.StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ scList,
+ )
+ model.StorageClasses = storageClassesList
+ }
+
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "Postgres Flex flavors read")
+}
diff --git a/stackit/internal/services/postgresflexalpha/flavor/functions.go b/stackit/internal/services/postgresflexalpha/flavor/functions.go
new file mode 100644
index 00000000..656b2263
--- /dev/null
+++ b/stackit/internal/services/postgresflexalpha/flavor/functions.go
@@ -0,0 +1,201 @@
+package postgresFlexAlphaFlavor
+
+import (
+ "context"
+ "fmt"
+
+ postgresflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha"
+)
+
+type flavorsClient interface {
+ GetFlavorsRequestExecute(
+ ctx context.Context,
+ projectId, region string,
+ page, size *int64,
+ sort *postgresflex.FlavorSort,
+ ) (*postgresflex.GetFlavorsResponse, error)
+}
+
+//func loadFlavorId(ctx context.Context, client flavorsClient, model *Model, flavor *flavorModel, storage *storageModel) error {
+// if model == nil {
+// return fmt.Errorf("nil model")
+// }
+// if flavor == nil {
+// return fmt.Errorf("nil flavor")
+// }
+// cpu := flavor.CPU.ValueInt64()
+// if cpu == 0 {
+// return fmt.Errorf("nil CPU")
+// }
+// ram := flavor.RAM.ValueInt64()
+// if ram == 0 {
+// return fmt.Errorf("nil RAM")
+// }
+//
+// nodeType := flavor.NodeType.ValueString()
+// if nodeType == "" {
+// if model.Replicas.IsNull() || model.Replicas.IsUnknown() {
+// return fmt.Errorf("nil NodeType")
+// }
+// switch model.Replicas.ValueInt64() {
+// case 1:
+// nodeType = "Single"
+// case 3:
+// nodeType = "Replica"
+// default:
+// return fmt.Errorf("unknown Replicas value: %d", model.Replicas.ValueInt64())
+// }
+// }
+//
+// storageClass := conversion.StringValueToPointer(storage.Class)
+// if storageClass == nil {
+// return fmt.Errorf("nil StorageClass")
+// }
+// storageSize := conversion.Int64ValueToPointer(storage.Size)
+// if storageSize == nil {
+// return fmt.Errorf("nil StorageSize")
+// }
+//
+// projectId := model.ProjectId.ValueString()
+// region := model.Region.ValueString()
+//
+// flavorList, err := getAllFlavors(ctx, client, projectId, region)
+// if err != nil {
+// return err
+// }
+//
+// avl := ""
+// foundFlavorCount := 0
+// var foundFlavors []string
+// for _, f := range flavorList {
+// if f.Id == nil || f.Cpu == nil || f.Memory == nil {
+// continue
+// }
+// if !strings.EqualFold(*f.NodeType, nodeType) {
+// continue
+// }
+// if *f.Cpu == cpu && *f.Memory == ram {
+// var useSc *postgresflex.FlavorStorageClassesStorageClass
+// for _, sc := range *f.StorageClasses {
+// if *sc.Class != *storageClass {
+// continue
+// }
+// if *storageSize < *f.MinGB || *storageSize > *f.MaxGB {
+// return fmt.Errorf("storage size %d out of bounds (min: %d - max: %d)", *storageSize, *f.MinGB, *f.MaxGB)
+// }
+// useSc = &sc
+// }
+// if useSc == nil {
+// return fmt.Errorf("no storage class found for %s", *storageClass)
+// }
+//
+// flavor.Id = types.StringValue(*f.Id)
+// flavor.Description = types.StringValue(*f.Description)
+// foundFlavors = append(foundFlavors, fmt.Sprintf("%s (%d/%d - %s)", *f.Id, *f.Cpu, *f.Memory, *f.NodeType))
+// foundFlavorCount++
+// }
+// for _, cls := range *f.StorageClasses {
+// avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM, storage %s (min: %d - max: %d)", avl, *f.Cpu, *f.Memory, *cls.Class, *f.MinGB, *f.MaxGB)
+// }
+// }
+// if foundFlavorCount > 1 {
+// return fmt.Errorf(
+// "number of flavors returned: %d\nmultiple flavors found: %d flavors\n %s",
+// len(flavorList),
+// foundFlavorCount,
+// strings.Join(foundFlavors, "\n "),
+// )
+// }
+// if flavor.Id.ValueString() == "" {
+// return fmt.Errorf("couldn't find flavor, available specs are:%s", avl)
+// }
+//
+// return nil
+//}
+
+func getAllFlavors(ctx context.Context, client flavorsClient, projectId, region string) ([]postgresflex.ListFlavors, error) {
+ if projectId == "" || region == "" {
+ return nil, fmt.Errorf("listing postgresflex flavors: projectId and region are required")
+ }
+ var flavorList []postgresflex.ListFlavors
+
+ page := int64(1)
+ size := int64(10)
+ sort := postgresflex.FLAVORSORT_INDEX_ASC
+ counter := 0
+ for {
+ res, err := client.GetFlavorsRequestExecute(ctx, projectId, region, &page, &size, &sort)
+ if err != nil {
+ return nil, fmt.Errorf("listing postgresflex flavors: %w", err)
+ }
+ if res.Flavors == nil {
+ return nil, fmt.Errorf("finding flavors for project %s", projectId)
+ }
+ pagination := res.GetPagination()
+ flavors := res.GetFlavors()
+ flavorList = append(flavorList, flavors...)
+
+ if *pagination.TotalRows < int64(len(flavorList)) {
+ return nil, fmt.Errorf("total rows is smaller than current accumulated list - that should not happen")
+ }
+ if *pagination.TotalRows == int64(len(flavorList)) {
+ break
+ }
+ page++
+
+ if page > *pagination.TotalPages {
+ break
+ }
+
+ // implement a breakpoint
+ counter++
+ if counter > 1000 {
+ panic("too many pagination results")
+ }
+ }
+ return flavorList, nil
+}
+
+//func getFlavorModelById(ctx context.Context, client flavorsClient, model *Model, flavor *flavorModel) error {
+// if model == nil {
+// return fmt.Errorf("nil model")
+// }
+// if flavor == nil {
+// return fmt.Errorf("nil flavor")
+// }
+// id := conversion.StringValueToPointer(flavor.Id)
+// if id == nil {
+// return fmt.Errorf("nil flavor ID")
+// }
+//
+// flavor.Id = types.StringValue("")
+//
+// projectId := model.ProjectId.ValueString()
+// region := model.Region.ValueString()
+//
+// flavorList, err := getAllFlavors(ctx, client, projectId, region)
+// if err != nil {
+// return err
+// }
+//
+// avl := ""
+// for _, f := range flavorList {
+// if f.Id == nil || f.Cpu == nil || f.Memory == nil {
+// continue
+// }
+// if *f.Id == *id {
+// flavor.Id = types.StringValue(*f.Id)
+// flavor.Description = types.StringValue(*f.Description)
+// flavor.CPU = types.Int64Value(*f.Cpu)
+// flavor.RAM = types.Int64Value(*f.Memory)
+// flavor.NodeType = types.StringValue(*f.NodeType)
+// break
+// }
+// avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Memory)
+// }
+// if flavor.Id.ValueString() == "" {
+// return fmt.Errorf("couldn't find flavor, available specs are: %s", avl)
+// }
+//
+// return nil
+//}
diff --git a/stackit/internal/services/postgresflexalpha/flavor/list_datasource.go.bak b/stackit/internal/services/postgresflexalpha/flavor/list_datasource.go.bak
new file mode 100644
index 00000000..9c035504
--- /dev/null
+++ b/stackit/internal/services/postgresflexalpha/flavor/list_datasource.go.bak
@@ -0,0 +1,79 @@
+package postgresFlexAlphaFlavor
+
+import (
+ "context"
+
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha"
+ postgresflexUtils "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/core"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ datasource.DataSource = &flavorListDataSource{}
+)
+
+// NewFlavorListDataSource is a helper function to simplify the provider implementation.
+func NewFlavorListDataSource() datasource.DataSource {
+ return &flavorListDataSource{}
+}
+
+// flavorDataSource is the data source implementation.
+type flavorListDataSource struct {
+ client *postgresflexalpha.APIClient
+ providerData core.ProviderData
+}
+
+// Metadata returns the data source type name.
+func (r *flavorListDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_postgresflexalpha_flavorlist"
+}
+
+// Configure adds the provider configured client to the data source.
+func (r *flavorListDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ apiClient := postgresflexUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "Postgres Flex flavors client configured")
+}
+
+func (r *flavorListDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = postgresflex.FlavorDataSourceSchema(ctx)
+}
+
+func (r *flavorListDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var model postgresflex.FlavorModel
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ ctx = core.InitProviderContext(ctx)
+
+ projectId := model.ProjectId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "Postgres Flex flavors read")
+}
diff --git a/stackit/internal/services/postgresflexalpha/flavor_data_source_gen.go b/stackit/internal/services/postgresflexalpha/flavor_data_source_gen.go
new file mode 100644
index 00000000..9e0f567e
--- /dev/null
+++ b/stackit/internal/services/postgresflexalpha/flavor_data_source_gen.go
@@ -0,0 +1,1940 @@
+// Code generated by terraform-plugin-framework-generator DO NOT EDIT.
+
+package postgresflex
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-go/tftypes"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+)
+
+func FlavorDataSourceSchema(ctx context.Context) schema.Schema {
+ return schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "cpu": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "ram": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "node_type": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "storage_class": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "flavors": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "cpu": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "The cpu count of the instance.",
+ MarkdownDescription: "The cpu count of the instance.",
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ Description: "The flavor description.",
+ MarkdownDescription: "The flavor description.",
+ },
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The id of the instance flavor.",
+ MarkdownDescription: "The id of the instance flavor.",
+ },
+ "max_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ },
+ "memory": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "The memory of the instance in Gibibyte.",
+ MarkdownDescription: "The memory of the instance in Gibibyte.",
+ },
+ "min_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "minimum storage which is required to order in Gigabyte.",
+ MarkdownDescription: "minimum storage which is required to order in Gigabyte.",
+ },
+ "node_type": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "defines the nodeType it can be either single or replica",
+ MarkdownDescription: "defines the nodeType it can be either single or replica",
+ },
+ "storage_classes": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "class": schema.StringAttribute{
+ Computed: true,
+ },
+ "max_io_per_sec": schema.Int64Attribute{
+ Computed: true,
+ },
+ "max_through_in_mb": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ CustomType: StorageClassesType{
+ ObjectType: types.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ },
+ Computed: true,
+ Description: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ },
+ },
+ CustomType: FlavorsType{
+ ObjectType: types.ObjectType{
+ AttrTypes: FlavorsValue{}.AttributeTypes(ctx),
+ },
+ },
+ },
+ Computed: true,
+ Description: "List of flavors available for the project.",
+ MarkdownDescription: "List of flavors available for the project.",
+ },
+ "page": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "Number of the page of items list to be returned.",
+ MarkdownDescription: "Number of the page of items list to be returned.",
+ },
+ "pagination": schema.SingleNestedAttribute{
+ Attributes: map[string]schema.Attribute{
+ "page": schema.Int64Attribute{
+ Computed: true,
+ },
+ "size": schema.Int64Attribute{
+ Computed: true,
+ },
+ "sort": schema.StringAttribute{
+ Computed: true,
+ },
+ "total_pages": schema.Int64Attribute{
+ Computed: true,
+ },
+ "total_rows": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ CustomType: PaginationType{
+ ObjectType: types.ObjectType{
+ AttrTypes: PaginationValue{}.AttributeTypes(ctx),
+ },
+ },
+ Computed: true,
+ },
+ "project_id": schema.StringAttribute{
+ Required: true,
+ Description: "The STACKIT project ID.",
+ MarkdownDescription: "The STACKIT project ID.",
+ },
+ "region": schema.StringAttribute{
+ Required: true,
+ Description: "The region which should be addressed",
+ MarkdownDescription: "The region which should be addressed",
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ "eu01",
+ ),
+ },
+ },
+ "size": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "Number of items to be returned on each page.",
+ MarkdownDescription: "Number of items to be returned on each page.",
+ },
+ "sort": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "Sorting of the flavors to be returned on each page.",
+ MarkdownDescription: "Sorting of the flavors to be returned on each page.",
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ "index.desc",
+ "index.asc",
+ "cpu.desc",
+ "cpu.asc",
+ "flavor_description.asc",
+ "flavor_description.desc",
+ "id.desc",
+ "id.asc",
+ "size_max.desc",
+ "size_max.asc",
+ "ram.desc",
+ "ram.asc",
+ "size_min.desc",
+ "size_min.asc",
+ "storage_class.asc",
+ "storage_class.desc",
+ "node_type.asc",
+ "node_type.desc",
+ ),
+ },
+ },
+ },
+ }
+}
+
+type FlavorsModel struct {
+ Cpu types.Int64 `tfsdk:"cpu"`
+ Ram types.Int64 `tfsdk:"ram"`
+ NodeType types.String `tfsdk:"node_type"`
+ Flavors types.List `tfsdk:"flavors"`
+ Page types.Int64 `tfsdk:"page"`
+ Pagination PaginationValue `tfsdk:"pagination"`
+ ProjectId types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ Size types.Int64 `tfsdk:"size"`
+ Sort types.String `tfsdk:"sort"`
+}
+
+var _ basetypes.ObjectTypable = FlavorsType{}
+
+type FlavorsType struct {
+ basetypes.ObjectType
+}
+
+func (t FlavorsType) Equal(o attr.Type) bool {
+ other, ok := o.(FlavorsType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t FlavorsType) String() string {
+ return "FlavorsType"
+}
+
+func (t FlavorsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ cpuAttribute, ok := attributes["cpu"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `cpu is missing from object`)
+
+ return nil, diags
+ }
+
+ cpuVal, ok := cpuAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute))
+ }
+
+ descriptionAttribute, ok := attributes["description"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `description is missing from object`)
+
+ return nil, diags
+ }
+
+ descriptionVal, ok := descriptionAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute))
+ }
+
+ idAttribute, ok := attributes["id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `id is missing from object`)
+
+ return nil, diags
+ }
+
+ idVal, ok := idAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
+ }
+
+ maxGbAttribute, ok := attributes["max_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_gb is missing from object`)
+
+ return nil, diags
+ }
+
+ maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute))
+ }
+
+ memoryAttribute, ok := attributes["memory"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `memory is missing from object`)
+
+ return nil, diags
+ }
+
+ memoryVal, ok := memoryAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute))
+ }
+
+ minGbAttribute, ok := attributes["min_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `min_gb is missing from object`)
+
+ return nil, diags
+ }
+
+ minGbVal, ok := minGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute))
+ }
+
+ nodeTypeAttribute, ok := attributes["node_type"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `node_type is missing from object`)
+
+ return nil, diags
+ }
+
+ nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute))
+ }
+
+ storageClassesAttribute, ok := attributes["storage_classes"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `storage_classes is missing from object`)
+
+ return nil, diags
+ }
+
+ storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return FlavorsValue{
+ Cpu: cpuVal,
+ Description: descriptionVal,
+ Id: idVal,
+ MaxGb: maxGbVal,
+ Memory: memoryVal,
+ MinGb: minGbVal,
+ NodeType: nodeTypeVal,
+ StorageClasses: storageClassesVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewFlavorsValueNull() FlavorsValue {
+ return FlavorsValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewFlavorsValueUnknown() FlavorsValue {
+ return FlavorsValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewFlavorsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (FlavorsValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing FlavorsValue Attribute Value",
+ "While creating a FlavorsValue value, a missing attribute value was detected. "+
+ "A FlavorsValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid FlavorsValue Attribute Type",
+ "While creating a FlavorsValue value, an invalid attribute value was detected. "+
+ "A FlavorsValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("FlavorsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra FlavorsValue Attribute Value",
+ "While creating a FlavorsValue value, an extra attribute value was detected. "+
+ "A FlavorsValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra FlavorsValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ cpuAttribute, ok := attributes["cpu"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `cpu is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ cpuVal, ok := cpuAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute))
+ }
+
+ descriptionAttribute, ok := attributes["description"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `description is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ descriptionVal, ok := descriptionAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute))
+ }
+
+ idAttribute, ok := attributes["id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `id is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ idVal, ok := idAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
+ }
+
+ maxGbAttribute, ok := attributes["max_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_gb is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute))
+ }
+
+ memoryAttribute, ok := attributes["memory"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `memory is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ memoryVal, ok := memoryAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute))
+ }
+
+ minGbAttribute, ok := attributes["min_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `min_gb is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ minGbVal, ok := minGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute))
+ }
+
+ nodeTypeAttribute, ok := attributes["node_type"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `node_type is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute))
+ }
+
+ storageClassesAttribute, ok := attributes["storage_classes"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `storage_classes is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute))
+ }
+
+ if diags.HasError() {
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ return FlavorsValue{
+ Cpu: cpuVal,
+ Description: descriptionVal,
+ Id: idVal,
+ MaxGb: maxGbVal,
+ Memory: memoryVal,
+ MinGb: minGbVal,
+ NodeType: nodeTypeVal,
+ StorageClasses: storageClassesVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewFlavorsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) FlavorsValue {
+ object, diags := NewFlavorsValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewFlavorsValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t FlavorsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewFlavorsValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewFlavorsValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewFlavorsValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewFlavorsValueMust(FlavorsValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t FlavorsType) ValueType(ctx context.Context) attr.Value {
+ return FlavorsValue{}
+}
+
+var _ basetypes.ObjectValuable = FlavorsValue{}
+
+type FlavorsValue struct {
+ Cpu basetypes.Int64Value `tfsdk:"cpu"`
+ Description basetypes.StringValue `tfsdk:"description"`
+ Id basetypes.StringValue `tfsdk:"id"`
+ MaxGb basetypes.Int64Value `tfsdk:"max_gb"`
+ Memory basetypes.Int64Value `tfsdk:"memory"`
+ MinGb basetypes.Int64Value `tfsdk:"min_gb"`
+ NodeType basetypes.StringValue `tfsdk:"node_type"`
+ StorageClasses basetypes.ListValue `tfsdk:"storage_classes"`
+ state attr.ValueState
+}
+
+func (v FlavorsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 8)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["cpu"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["description"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["max_gb"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["memory"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["min_gb"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["node_type"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["storage_classes"] = basetypes.ListType{
+ ElemType: StorageClassesValue{}.Type(ctx),
+ }.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 8)
+
+ val, err = v.Cpu.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["cpu"] = val
+
+ val, err = v.Description.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["description"] = val
+
+ val, err = v.Id.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["id"] = val
+
+ val, err = v.MaxGb.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["max_gb"] = val
+
+ val, err = v.Memory.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["memory"] = val
+
+ val, err = v.MinGb.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["min_gb"] = val
+
+ val, err = v.NodeType.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["node_type"] = val
+
+ val, err = v.StorageClasses.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["storage_classes"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v FlavorsValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v FlavorsValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v FlavorsValue) String() string {
+ return "FlavorsValue"
+}
+
+func (v FlavorsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ storageClasses := types.ListValueMust(
+ StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ v.StorageClasses.Elements(),
+ )
+
+ if v.StorageClasses.IsNull() {
+ storageClasses = types.ListNull(
+ StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ )
+ }
+
+ if v.StorageClasses.IsUnknown() {
+ storageClasses = types.ListUnknown(
+ StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ )
+ }
+
+ attributeTypes := map[string]attr.Type{
+ "cpu": basetypes.Int64Type{},
+ "description": basetypes.StringType{},
+ "id": basetypes.StringType{},
+ "max_gb": basetypes.Int64Type{},
+ "memory": basetypes.Int64Type{},
+ "min_gb": basetypes.Int64Type{},
+ "node_type": basetypes.StringType{},
+ "storage_classes": basetypes.ListType{
+ ElemType: StorageClassesValue{}.Type(ctx),
+ },
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "cpu": v.Cpu,
+ "description": v.Description,
+ "id": v.Id,
+ "max_gb": v.MaxGb,
+ "memory": v.Memory,
+ "min_gb": v.MinGb,
+ "node_type": v.NodeType,
+ "storage_classes": storageClasses,
+ })
+
+ return objVal, diags
+}
+
+func (v FlavorsValue) Equal(o attr.Value) bool {
+ other, ok := o.(FlavorsValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Cpu.Equal(other.Cpu) {
+ return false
+ }
+
+ if !v.Description.Equal(other.Description) {
+ return false
+ }
+
+ if !v.Id.Equal(other.Id) {
+ return false
+ }
+
+ if !v.MaxGb.Equal(other.MaxGb) {
+ return false
+ }
+
+ if !v.Memory.Equal(other.Memory) {
+ return false
+ }
+
+ if !v.MinGb.Equal(other.MinGb) {
+ return false
+ }
+
+ if !v.NodeType.Equal(other.NodeType) {
+ return false
+ }
+
+ if !v.StorageClasses.Equal(other.StorageClasses) {
+ return false
+ }
+
+ return true
+}
+
+func (v FlavorsValue) Type(ctx context.Context) attr.Type {
+ return FlavorsType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v FlavorsValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "cpu": basetypes.Int64Type{},
+ "description": basetypes.StringType{},
+ "id": basetypes.StringType{},
+ "max_gb": basetypes.Int64Type{},
+ "memory": basetypes.Int64Type{},
+ "min_gb": basetypes.Int64Type{},
+ "node_type": basetypes.StringType{},
+ "storage_classes": basetypes.ListType{
+ ElemType: StorageClassesValue{}.Type(ctx),
+ },
+ }
+}
+
+var _ basetypes.ObjectTypable = StorageClassesType{}
+
+type StorageClassesType struct {
+ basetypes.ObjectType
+}
+
+func (t StorageClassesType) Equal(o attr.Type) bool {
+ other, ok := o.(StorageClassesType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t StorageClassesType) String() string {
+ return "StorageClassesType"
+}
+
+func (t StorageClassesType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ classAttribute, ok := attributes["class"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `class is missing from object`)
+
+ return nil, diags
+ }
+
+ classVal, ok := classAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute))
+ }
+
+ maxIoPerSecAttribute, ok := attributes["max_io_per_sec"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_io_per_sec is missing from object`)
+
+ return nil, diags
+ }
+
+ maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute))
+ }
+
+ maxThroughInMbAttribute, ok := attributes["max_through_in_mb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_through_in_mb is missing from object`)
+
+ return nil, diags
+ }
+
+ maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return StorageClassesValue{
+ Class: classVal,
+ MaxIoPerSec: maxIoPerSecVal,
+ MaxThroughInMb: maxThroughInMbVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewStorageClassesValueNull() StorageClassesValue {
+ return StorageClassesValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewStorageClassesValueUnknown() StorageClassesValue {
+ return StorageClassesValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewStorageClassesValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (StorageClassesValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing StorageClassesValue Attribute Value",
+ "While creating a StorageClassesValue value, a missing attribute value was detected. "+
+ "A StorageClassesValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid StorageClassesValue Attribute Type",
+ "While creating a StorageClassesValue value, an invalid attribute value was detected. "+
+ "A StorageClassesValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("StorageClassesValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra StorageClassesValue Attribute Value",
+ "While creating a StorageClassesValue value, an extra attribute value was detected. "+
+ "A StorageClassesValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra StorageClassesValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ classAttribute, ok := attributes["class"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `class is missing from object`)
+
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ classVal, ok := classAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute))
+ }
+
+ maxIoPerSecAttribute, ok := attributes["max_io_per_sec"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_io_per_sec is missing from object`)
+
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute))
+ }
+
+ maxThroughInMbAttribute, ok := attributes["max_through_in_mb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_through_in_mb is missing from object`)
+
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute))
+ }
+
+ if diags.HasError() {
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ return StorageClassesValue{
+ Class: classVal,
+ MaxIoPerSec: maxIoPerSecVal,
+ MaxThroughInMb: maxThroughInMbVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewStorageClassesValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) StorageClassesValue {
+ object, diags := NewStorageClassesValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewStorageClassesValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t StorageClassesType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewStorageClassesValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewStorageClassesValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewStorageClassesValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewStorageClassesValueMust(StorageClassesValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t StorageClassesType) ValueType(ctx context.Context) attr.Value {
+ return StorageClassesValue{}
+}
+
+var _ basetypes.ObjectValuable = StorageClassesValue{}
+
+type StorageClassesValue struct {
+ Class basetypes.StringValue `tfsdk:"class"`
+ MaxIoPerSec basetypes.Int64Value `tfsdk:"max_io_per_sec"`
+ MaxThroughInMb basetypes.Int64Value `tfsdk:"max_through_in_mb"`
+ state attr.ValueState
+}
+
+func (v StorageClassesValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 3)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["class"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["max_io_per_sec"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["max_through_in_mb"] = basetypes.Int64Type{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 3)
+
+ val, err = v.Class.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["class"] = val
+
+ val, err = v.MaxIoPerSec.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["max_io_per_sec"] = val
+
+ val, err = v.MaxThroughInMb.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["max_through_in_mb"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v StorageClassesValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v StorageClassesValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v StorageClassesValue) String() string {
+ return "StorageClassesValue"
+}
+
+func (v StorageClassesValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "class": basetypes.StringType{},
+ "max_io_per_sec": basetypes.Int64Type{},
+ "max_through_in_mb": basetypes.Int64Type{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "class": v.Class,
+ "max_io_per_sec": v.MaxIoPerSec,
+ "max_through_in_mb": v.MaxThroughInMb,
+ })
+
+ return objVal, diags
+}
+
+func (v StorageClassesValue) Equal(o attr.Value) bool {
+ other, ok := o.(StorageClassesValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Class.Equal(other.Class) {
+ return false
+ }
+
+ if !v.MaxIoPerSec.Equal(other.MaxIoPerSec) {
+ return false
+ }
+
+ if !v.MaxThroughInMb.Equal(other.MaxThroughInMb) {
+ return false
+ }
+
+ return true
+}
+
+func (v StorageClassesValue) Type(ctx context.Context) attr.Type {
+ return StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v StorageClassesValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "class": basetypes.StringType{},
+ "max_io_per_sec": basetypes.Int64Type{},
+ "max_through_in_mb": basetypes.Int64Type{},
+ }
+}
+
+var _ basetypes.ObjectTypable = PaginationType{}
+
+type PaginationType struct {
+ basetypes.ObjectType
+}
+
+func (t PaginationType) Equal(o attr.Type) bool {
+ other, ok := o.(PaginationType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t PaginationType) String() string {
+ return "PaginationType"
+}
+
+func (t PaginationType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ pageAttribute, ok := attributes["page"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `page is missing from object`)
+
+ return nil, diags
+ }
+
+ pageVal, ok := pageAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return nil, diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ sortAttribute, ok := attributes["sort"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `sort is missing from object`)
+
+ return nil, diags
+ }
+
+ sortVal, ok := sortAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute))
+ }
+
+ totalPagesAttribute, ok := attributes["total_pages"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_pages is missing from object`)
+
+ return nil, diags
+ }
+
+ totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute))
+ }
+
+ totalRowsAttribute, ok := attributes["total_rows"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_rows is missing from object`)
+
+ return nil, diags
+ }
+
+ totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return PaginationValue{
+ Page: pageVal,
+ Size: sizeVal,
+ Sort: sortVal,
+ TotalPages: totalPagesVal,
+ TotalRows: totalRowsVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewPaginationValueNull() PaginationValue {
+ return PaginationValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewPaginationValueUnknown() PaginationValue {
+ return PaginationValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewPaginationValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (PaginationValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing PaginationValue Attribute Value",
+ "While creating a PaginationValue value, a missing attribute value was detected. "+
+ "A PaginationValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid PaginationValue Attribute Type",
+ "While creating a PaginationValue value, an invalid attribute value was detected. "+
+ "A PaginationValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra PaginationValue Attribute Value",
+ "While creating a PaginationValue value, an extra attribute value was detected. "+
+ "A PaginationValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra PaginationValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewPaginationValueUnknown(), diags
+ }
+
+ pageAttribute, ok := attributes["page"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `page is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ pageVal, ok := pageAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ sortAttribute, ok := attributes["sort"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `sort is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ sortVal, ok := sortAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute))
+ }
+
+ totalPagesAttribute, ok := attributes["total_pages"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_pages is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute))
+ }
+
+ totalRowsAttribute, ok := attributes["total_rows"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_rows is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute))
+ }
+
+ if diags.HasError() {
+ return NewPaginationValueUnknown(), diags
+ }
+
+ return PaginationValue{
+ Page: pageVal,
+ Size: sizeVal,
+ Sort: sortVal,
+ TotalPages: totalPagesVal,
+ TotalRows: totalRowsVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewPaginationValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) PaginationValue {
+ object, diags := NewPaginationValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewPaginationValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t PaginationType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewPaginationValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewPaginationValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewPaginationValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewPaginationValueMust(PaginationValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t PaginationType) ValueType(ctx context.Context) attr.Value {
+ return PaginationValue{}
+}
+
+var _ basetypes.ObjectValuable = PaginationValue{}
+
+type PaginationValue struct {
+ Page basetypes.Int64Value `tfsdk:"page"`
+ Size basetypes.Int64Value `tfsdk:"size"`
+ Sort basetypes.StringValue `tfsdk:"sort"`
+ TotalPages basetypes.Int64Value `tfsdk:"total_pages"`
+ TotalRows basetypes.Int64Value `tfsdk:"total_rows"`
+ state attr.ValueState
+}
+
+func (v PaginationValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 5)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["page"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["size"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["sort"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["total_pages"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["total_rows"] = basetypes.Int64Type{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 5)
+
+ val, err = v.Page.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["page"] = val
+
+ val, err = v.Size.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["size"] = val
+
+ val, err = v.Sort.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["sort"] = val
+
+ val, err = v.TotalPages.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["total_pages"] = val
+
+ val, err = v.TotalRows.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["total_rows"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v PaginationValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v PaginationValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v PaginationValue) String() string {
+ return "PaginationValue"
+}
+
+func (v PaginationValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "page": basetypes.Int64Type{},
+ "size": basetypes.Int64Type{},
+ "sort": basetypes.StringType{},
+ "total_pages": basetypes.Int64Type{},
+ "total_rows": basetypes.Int64Type{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "page": v.Page,
+ "size": v.Size,
+ "sort": v.Sort,
+ "total_pages": v.TotalPages,
+ "total_rows": v.TotalRows,
+ })
+
+ return objVal, diags
+}
+
+func (v PaginationValue) Equal(o attr.Value) bool {
+ other, ok := o.(PaginationValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Page.Equal(other.Page) {
+ return false
+ }
+
+ if !v.Size.Equal(other.Size) {
+ return false
+ }
+
+ if !v.Sort.Equal(other.Sort) {
+ return false
+ }
+
+ if !v.TotalPages.Equal(other.TotalPages) {
+ return false
+ }
+
+ if !v.TotalRows.Equal(other.TotalRows) {
+ return false
+ }
+
+ return true
+}
+
+func (v PaginationValue) Type(ctx context.Context) attr.Type {
+ return PaginationType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v PaginationValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "page": basetypes.Int64Type{},
+ "size": basetypes.Int64Type{},
+ "sort": basetypes.StringType{},
+ "total_pages": basetypes.Int64Type{},
+ "total_rows": basetypes.Int64Type{},
+ }
+}
diff --git a/stackit/internal/services/postgresflexalpha/flavor_test.go b/stackit/internal/services/postgresflexalpha/flavor_test.go
deleted file mode 100644
index ecdedb8b..00000000
--- a/stackit/internal/services/postgresflexalpha/flavor_test.go
+++ /dev/null
@@ -1,514 +0,0 @@
-package postgresflex
-
-import (
- "context"
- "reflect"
- "testing"
-
- "github.com/hashicorp/terraform-plugin-framework/attr"
- "github.com/hashicorp/terraform-plugin-framework/diag"
- "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
- "github.com/hashicorp/terraform-plugin-go/tftypes"
-)
-
-func TestFlavorType_Equal(t1 *testing.T) {
- type fields struct {
- ObjectType basetypes.ObjectType
- }
- type args struct {
- o attr.Type
- }
- tests := []struct {
- name string
- fields fields
- args args
- want bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t1.Run(tt.name, func(t1 *testing.T) {
- t := FlavorType{
- ObjectType: tt.fields.ObjectType,
- }
- if got := t.Equal(tt.args.o); got != tt.want {
- t1.Errorf("Equal() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorType_String(t1 *testing.T) {
- type fields struct {
- ObjectType basetypes.ObjectType
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t1.Run(tt.name, func(t1 *testing.T) {
- t := FlavorType{
- ObjectType: tt.fields.ObjectType,
- }
- if got := t.String(); got != tt.want {
- t1.Errorf("String() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorType_ValueFromObject(t1 *testing.T) {
- type fields struct {
- ObjectType basetypes.ObjectType
- }
- type args struct {
- in0 context.Context
- in basetypes.ObjectValue
- }
- tests := []struct {
- name string
- fields fields
- args args
- want basetypes.ObjectValuable
- want1 diag.Diagnostics
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t1.Run(tt.name, func(t1 *testing.T) {
- t := FlavorType{
- ObjectType: tt.fields.ObjectType,
- }
- got, got1 := t.ValueFromObject(tt.args.in0, tt.args.in)
- if !reflect.DeepEqual(got, tt.want) {
- t1.Errorf("ValueFromObject() got = %v, want %v", got, tt.want)
- }
- if !reflect.DeepEqual(got1, tt.want1) {
- t1.Errorf("ValueFromObject() got1 = %v, want %v", got1, tt.want1)
- }
- })
- }
-}
-
-func TestFlavorType_ValueFromTerraform(t1 *testing.T) {
- type fields struct {
- ObjectType basetypes.ObjectType
- }
- type args struct {
- ctx context.Context
- in tftypes.Value
- }
- tests := []struct {
- name string
- fields fields
- args args
- want attr.Value
- wantErr bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t1.Run(tt.name, func(t1 *testing.T) {
- t := FlavorType{
- ObjectType: tt.fields.ObjectType,
- }
- got, err := t.ValueFromTerraform(tt.args.ctx, tt.args.in)
- if (err != nil) != tt.wantErr {
- t1.Errorf("ValueFromTerraform() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t1.Errorf("ValueFromTerraform() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorType_ValueType(t1 *testing.T) {
- type fields struct {
- ObjectType basetypes.ObjectType
- }
- type args struct {
- in0 context.Context
- }
- tests := []struct {
- name string
- fields fields
- args args
- want attr.Value
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t1.Run(tt.name, func(t1 *testing.T) {
- t := FlavorType{
- ObjectType: tt.fields.ObjectType,
- }
- if got := t.ValueType(tt.args.in0); !reflect.DeepEqual(got, tt.want) {
- t1.Errorf("ValueType() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorValue_AttributeTypes(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- type args struct {
- in0 context.Context
- }
- tests := []struct {
- name string
- fields fields
- args args
- want map[string]attr.Type
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- if got := v.AttributeTypes(tt.args.in0); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("AttributeTypes() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorValue_Equal(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- type args struct {
- o attr.Value
- }
- tests := []struct {
- name string
- fields fields
- args args
- want bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- if got := v.Equal(tt.args.o); got != tt.want {
- t.Errorf("Equal() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorValue_IsNull(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- tests := []struct {
- name string
- fields fields
- want bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- if got := v.IsNull(); got != tt.want {
- t.Errorf("IsNull() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorValue_IsUnknown(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- tests := []struct {
- name string
- fields fields
- want bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- if got := v.IsUnknown(); got != tt.want {
- t.Errorf("IsUnknown() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorValue_String(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- tests := []struct {
- name string
- fields fields
- want string
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- if got := v.String(); got != tt.want {
- t.Errorf("String() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorValue_ToObjectValue(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- type args struct {
- in0 context.Context
- }
- tests := []struct {
- name string
- fields fields
- args args
- want basetypes.ObjectValue
- want1 diag.Diagnostics
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- got, got1 := v.ToObjectValue(tt.args.in0)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("ToObjectValue() got = %v, want %v", got, tt.want)
- }
- if !reflect.DeepEqual(got1, tt.want1) {
- t.Errorf("ToObjectValue() got1 = %v, want %v", got1, tt.want1)
- }
- })
- }
-}
-
-func TestFlavorValue_ToTerraformValue(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- type args struct {
- ctx context.Context
- }
- tests := []struct {
- name string
- fields fields
- args args
- want tftypes.Value
- wantErr bool
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- got, err := v.ToTerraformValue(tt.args.ctx)
- if (err != nil) != tt.wantErr {
- t.Errorf("ToTerraformValue() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("ToTerraformValue() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestFlavorValue_Type(t *testing.T) {
- type fields struct {
- Cpu basetypes.Int64Value
- Description basetypes.StringValue
- Id basetypes.StringValue
- Ram basetypes.Int64Value
- state attr.ValueState
- }
- type args struct {
- ctx context.Context
- }
- tests := []struct {
- name string
- fields fields
- args args
- want attr.Type
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- v := FlavorValue{
- Cpu: tt.fields.Cpu,
- Description: tt.fields.Description,
- Id: tt.fields.Id,
- Ram: tt.fields.Ram,
- state: tt.fields.state,
- }
- if got := v.Type(tt.args.ctx); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("Type() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestNewFlavorValue(t *testing.T) {
- type args struct {
- attributeTypes map[string]attr.Type
- attributes map[string]attr.Value
- }
- tests := []struct {
- name string
- args args
- want FlavorValue
- want1 diag.Diagnostics
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, got1 := NewFlavorValue(tt.args.attributeTypes, tt.args.attributes)
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("NewFlavorValue() got = %v, want %v", got, tt.want)
- }
- if !reflect.DeepEqual(got1, tt.want1) {
- t.Errorf("NewFlavorValue() got1 = %v, want %v", got1, tt.want1)
- }
- })
- }
-}
-
-func TestNewFlavorValueMust(t *testing.T) {
- type args struct {
- attributeTypes map[string]attr.Type
- attributes map[string]attr.Value
- }
- tests := []struct {
- name string
- args args
- want FlavorValue
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := NewFlavorValueMust(tt.args.attributeTypes, tt.args.attributes); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("NewFlavorValueMust() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestNewFlavorValueNull(t *testing.T) {
- tests := []struct {
- name string
- want FlavorValue
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := NewFlavorValueNull(); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("NewFlavorValueNull() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestNewFlavorValueUnknown(t *testing.T) {
- tests := []struct {
- name string
- want FlavorValue
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := NewFlavorValueUnknown(); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("NewFlavorValueUnknown() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/stackit/internal/services/postgresflexalpha/functions.go b/stackit/internal/services/postgresflexalpha/functions.go
deleted file mode 100644
index a8632760..00000000
--- a/stackit/internal/services/postgresflexalpha/functions.go
+++ /dev/null
@@ -1 +0,0 @@
-package postgresflex
diff --git a/stackit/internal/services/postgresflexalpha/instance/datasource.go b/stackit/internal/services/postgresflexalpha/instance/datasource.go
index 98aeb59f..c7b83646 100644
--- a/stackit/internal/services/postgresflexalpha/instance/datasource.go
+++ b/stackit/internal/services/postgresflexalpha/instance/datasource.go
@@ -100,25 +100,11 @@ func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaReques
"backup_schedule": schema.StringAttribute{
Computed: true,
},
- "flavor": schema.SingleNestedAttribute{
+ "retention_days": schema.Int64Attribute{
+ Computed: true,
+ },
+ "flavor_id": schema.StringAttribute{
Computed: true,
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Computed: true,
- },
- "description": schema.StringAttribute{
- Computed: true,
- },
- "cpu": schema.Int64Attribute{
- Computed: true,
- },
- "ram": schema.Int64Attribute{
- Computed: true,
- },
- "node_type": schema.StringAttribute{
- Computed: true,
- },
- },
},
"replicas": schema.Int64Attribute{
Computed: true,
@@ -226,31 +212,6 @@ func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadReques
ctx = core.LogResponse(ctx)
- var flavor = &flavorModel{}
- if instanceResp != nil && instanceResp.FlavorId != nil {
- flavor.Id = types.StringValue(*instanceResp.FlavorId)
- }
-
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- err := getFlavorModelById(ctx, r.client, &model, flavor)
- if err != nil {
- resp.Diagnostics.AddError(err.Error(), err.Error())
- return
- }
-
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
var storage = &storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
@@ -278,7 +239,7 @@ func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadReques
}
}
- err = mapFields(ctx, instanceResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, r.client, instanceResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
return
diff --git a/stackit/internal/services/postgresflexalpha/instance/functions.go b/stackit/internal/services/postgresflexalpha/instance/functions.go
index f415cc69..41a0b3e3 100644
--- a/stackit/internal/services/postgresflexalpha/instance/functions.go
+++ b/stackit/internal/services/postgresflexalpha/instance/functions.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"math"
- "strings"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
@@ -20,9 +19,9 @@ type postgresflexClient interface {
func mapFields(
ctx context.Context,
+ client postgresflexClient,
resp *postgresflex.GetInstanceResponse,
model *Model,
- flavor *flavorModel,
storage *storageModel,
encryption *encryptionModel,
network *networkModel,
@@ -80,19 +79,11 @@ func mapFields(
return fmt.Errorf("creating network (acl list): %w", core.DiagsToError(diags))
}
- var routerAddress string
- if instance.Network.RouterAddress != nil {
- routerAddress = *instance.Network.RouterAddress
- diags.AddWarning("field missing while mapping fields", "router_address was empty in API response")
- }
- if instance.Network.InstanceAddress == nil {
- return fmt.Errorf("creating network: no instance address returned")
- }
networkValues = map[string]attr.Value{
"acl": aclList,
- "access_scope": types.StringValue(string(*instance.Network.AccessScope)),
- "instance_address": types.StringValue(*instance.Network.InstanceAddress),
- "router_address": types.StringValue(routerAddress),
+ "access_scope": types.StringPointerValue((*string)(instance.Network.AccessScope)),
+ "instance_address": types.StringPointerValue(instance.Network.InstanceAddress),
+ "router_address": types.StringPointerValue(instance.Network.RouterAddress),
}
}
networkObject, diags := types.ObjectValue(networkTypes, networkValues)
@@ -100,48 +91,6 @@ func mapFields(
return fmt.Errorf("creating network: %w", core.DiagsToError(diags))
}
- var flavorValues map[string]attr.Value
- if instance.FlavorId == nil || *instance.FlavorId == "" {
- return fmt.Errorf("instance has no flavor id")
- }
- if !flavor.Id.IsUnknown() && !flavor.Id.IsNull() {
- if *instance.FlavorId != flavor.Id.ValueString() {
- return fmt.Errorf("instance has different flavor id %s - %s", *instance.FlavorId, flavor.Id.ValueString())
- }
- }
- if model.Flavor.IsNull() || model.Flavor.IsUnknown() {
- var nodeType string
- if flavor.NodeType.IsUnknown() || flavor.NodeType.IsNull() {
- if instance.Replicas == nil {
- return fmt.Errorf("instance has no replicas setting")
- }
- switch *instance.Replicas {
- case 1:
- nodeType = "Single"
- case 3:
- nodeType = "Replicas"
- default:
- return fmt.Errorf("could not determine replicas settings")
- }
- } else {
- nodeType = flavor.NodeType.ValueString()
- }
- flavorValues = map[string]attr.Value{
- "id": flavor.Id,
- "description": flavor.Description,
- "cpu": flavor.CPU,
- "ram": flavor.RAM,
- "node_type": types.StringValue(nodeType),
- }
- } else {
- flavorValues = model.Flavor.Attributes()
- }
-
- flavorObject, diags := types.ObjectValue(flavorTypes, flavorValues)
- if diags.HasError() {
- return fmt.Errorf("creating flavor: %w", core.DiagsToError(diags))
- }
-
var storageValues map[string]attr.Value
if instance.Storage == nil {
storageValues = map[string]attr.Value{
@@ -167,10 +116,8 @@ func mapFields(
model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, instanceId)
model.InstanceId = types.StringValue(instanceId)
model.Name = types.StringPointerValue(instance.Name)
- model.Network = networkObject
model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule)
- model.Flavor = flavorObject
- // TODO - verify working
+ model.FlavorId = types.StringPointerValue(instance.FlavorId)
model.Replicas = types.Int64Value(int64(*instance.Replicas))
model.Storage = storageObject
model.Version = types.StringPointerValue(instance.Version)
@@ -182,7 +129,6 @@ func mapFields(
func toCreatePayload(
model *Model,
- flavor *flavorModel,
storage *storageModel,
enc *encryptionModel,
net *networkModel,
@@ -190,9 +136,6 @@ func toCreatePayload(
if model == nil {
return nil, fmt.Errorf("nil model")
}
- if flavor == nil {
- return nil, fmt.Errorf("nil flavor")
- }
if storage == nil {
return nil, fmt.Errorf("nil storage")
}
@@ -239,24 +182,11 @@ func toCreatePayload(
}
}
- if model.Replicas.IsNull() || model.Replicas.IsUnknown() {
- if !flavor.NodeType.IsNull() && !flavor.NodeType.IsUnknown() {
- switch strings.ToLower(flavor.NodeType.ValueString()) {
- case "single":
- replVal = int32(1)
- case "replica":
- replVal = int32(3)
- default:
- return nil, fmt.Errorf("flavor has invalid replica attribute")
- }
- }
- }
-
return &postgresflex.CreateInstanceRequestPayload{
Acl: &aclElements,
BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
Encryption: encryptionPayload,
- FlavorId: conversion.StringValueToPointer(flavor.Id),
+ FlavorId: conversion.StringValueToPointer(model.FlavorId),
Name: conversion.StringValueToPointer(model.Name),
Network: networkPayload,
Replicas: postgresflex.CreateInstanceRequestPayloadGetReplicasAttributeType(&replVal),
@@ -266,13 +196,14 @@ func toCreatePayload(
}, nil
}
-func toUpdatePayload(model *Model, flavor *flavorModel, storage *storageModel, _ *networkModel) (*postgresflex.UpdateInstancePartiallyRequestPayload, error) {
+func toUpdatePayload(
+ model *Model,
+ storage *storageModel,
+ _ *networkModel,
+) (*postgresflex.UpdateInstancePartiallyRequestPayload, error) {
if model == nil {
return nil, fmt.Errorf("nil model")
}
- if flavor == nil {
- return nil, fmt.Errorf("nil flavor")
- }
if storage == nil {
return nil, fmt.Errorf("nil storage")
}
@@ -282,7 +213,7 @@ func toUpdatePayload(model *Model, flavor *flavorModel, storage *storageModel, _
// Items: &acl,
// },
BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
- FlavorId: conversion.StringValueToPointer(flavor.Id),
+ FlavorId: conversion.StringValueToPointer(model.FlavorId),
Name: conversion.StringValueToPointer(model.Name),
// Replicas: conversion.Int64ValueToPointer(model.Replicas),
Storage: &postgresflex.StorageUpdate{
@@ -291,187 +222,3 @@ func toUpdatePayload(model *Model, flavor *flavorModel, storage *storageModel, _
Version: conversion.StringValueToPointer(model.Version),
}, nil
}
-
-func loadFlavorId(ctx context.Context, client postgresflexClient, model *Model, flavor *flavorModel, storage *storageModel) error {
- if model == nil {
- return fmt.Errorf("nil model")
- }
- if flavor == nil {
- return fmt.Errorf("nil flavor")
- }
- cpu := flavor.CPU.ValueInt64()
- if cpu == 0 {
- return fmt.Errorf("nil CPU")
- }
- ram := flavor.RAM.ValueInt64()
- if ram == 0 {
- return fmt.Errorf("nil RAM")
- }
-
- nodeType := flavor.NodeType.ValueString()
- if nodeType == "" {
- if model.Replicas.IsNull() || model.Replicas.IsUnknown() {
- return fmt.Errorf("nil NodeType")
- }
- switch model.Replicas.ValueInt64() {
- case 1:
- nodeType = "Single"
- case 3:
- nodeType = "Replica"
- default:
- return fmt.Errorf("unknown Replicas value: %d", model.Replicas.ValueInt64())
- }
- }
-
- storageClass := conversion.StringValueToPointer(storage.Class)
- if storageClass == nil {
- return fmt.Errorf("nil StorageClass")
- }
- storageSize := conversion.Int64ValueToPointer(storage.Size)
- if storageSize == nil {
- return fmt.Errorf("nil StorageSize")
- }
-
- projectId := model.ProjectId.ValueString()
- region := model.Region.ValueString()
-
- flavorList, err := getAllFlavors(ctx, client, projectId, region)
- if err != nil {
- return err
- }
-
- avl := ""
- foundFlavorCount := 0
- var foundFlavors []string
- for _, f := range flavorList {
- if f.Id == nil || f.Cpu == nil || f.Memory == nil {
- continue
- }
- if !strings.EqualFold(*f.NodeType, nodeType) {
- continue
- }
- if *f.Cpu == cpu && *f.Memory == ram {
- var useSc *postgresflex.FlavorStorageClassesStorageClass
- for _, sc := range *f.StorageClasses {
- if *sc.Class != *storageClass {
- continue
- }
- if *storageSize < *f.MinGB || *storageSize > *f.MaxGB {
- return fmt.Errorf("storage size %d out of bounds (min: %d - max: %d)", *storageSize, *f.MinGB, *f.MaxGB)
- }
- useSc = &sc
- }
- if useSc == nil {
- return fmt.Errorf("no storage class found for %s", *storageClass)
- }
-
- flavor.Id = types.StringValue(*f.Id)
- flavor.Description = types.StringValue(*f.Description)
- foundFlavors = append(foundFlavors, fmt.Sprintf("%s (%d/%d - %s)", *f.Id, *f.Cpu, *f.Memory, *f.NodeType))
- foundFlavorCount++
- }
- for _, cls := range *f.StorageClasses {
- avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM, storage %s (min: %d - max: %d)", avl, *f.Cpu, *f.Memory, *cls.Class, *f.MinGB, *f.MaxGB)
- }
- }
- if foundFlavorCount > 1 {
- return fmt.Errorf(
- "number of flavors returned: %d\nmultiple flavors found: %d flavors\n %s",
- len(flavorList),
- foundFlavorCount,
- strings.Join(foundFlavors, "\n "),
- )
- }
- if flavor.Id.ValueString() == "" {
- return fmt.Errorf("couldn't find flavor, available specs are:%s", avl)
- }
-
- return nil
-}
-
-func getAllFlavors(ctx context.Context, client postgresflexClient, projectId, region string) ([]postgresflex.ListFlavors, error) {
- if projectId == "" || region == "" {
- return nil, fmt.Errorf("listing postgresflex flavors: projectId and region are required")
- }
- var flavorList []postgresflex.ListFlavors
-
- page := int64(1)
- size := int64(10)
- sort := postgresflex.FLAVORSORT_INDEX_ASC
- counter := 0
- for {
- res, err := client.GetFlavorsRequestExecute(ctx, projectId, region, &page, &size, &sort)
- if err != nil {
- return nil, fmt.Errorf("listing postgresflex flavors: %w", err)
- }
- if res.Flavors == nil {
- return nil, fmt.Errorf("finding flavors for project %s", projectId)
- }
- pagination := res.GetPagination()
- flavors := res.GetFlavors()
- flavorList = append(flavorList, flavors...)
-
- if *pagination.TotalRows < int64(len(flavorList)) {
- return nil, fmt.Errorf("total rows is smaller than current accumulated list - that should not happen")
- }
- if *pagination.TotalRows == int64(len(flavorList)) {
- break
- }
- page++
-
- if page > *pagination.TotalPages {
- break
- }
-
- // implement a breakpoint
- counter++
- if counter > 1000 {
- panic("too many pagination results")
- }
- }
- return flavorList, nil
-}
-
-func getFlavorModelById(ctx context.Context, client postgresflexClient, model *Model, flavor *flavorModel) error {
- if model == nil {
- return fmt.Errorf("nil model")
- }
- if flavor == nil {
- return fmt.Errorf("nil flavor")
- }
- id := conversion.StringValueToPointer(flavor.Id)
- if id == nil {
- return fmt.Errorf("nil flavor ID")
- }
-
- flavor.Id = types.StringValue("")
-
- projectId := model.ProjectId.ValueString()
- region := model.Region.ValueString()
-
- flavorList, err := getAllFlavors(ctx, client, projectId, region)
- if err != nil {
- return err
- }
-
- avl := ""
- for _, f := range flavorList {
- if f.Id == nil || f.Cpu == nil || f.Memory == nil {
- continue
- }
- if *f.Id == *id {
- flavor.Id = types.StringValue(*f.Id)
- flavor.Description = types.StringValue(*f.Description)
- flavor.CPU = types.Int64Value(*f.Cpu)
- flavor.RAM = types.Int64Value(*f.Memory)
- flavor.NodeType = types.StringValue(*f.NodeType)
- break
- }
- avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Memory)
- }
- if flavor.Id.ValueString() == "" {
- return fmt.Errorf("couldn't find flavor, available specs are: %s", avl)
- }
-
- return nil
-}
diff --git a/stackit/internal/services/postgresflexalpha/instance/functions_test.go b/stackit/internal/services/postgresflexalpha/instance/functions_test.go
index 10bc53a6..2c1fbebf 100644
--- a/stackit/internal/services/postgresflexalpha/instance/functions_test.go
+++ b/stackit/internal/services/postgresflexalpha/instance/functions_test.go
@@ -3,11 +3,7 @@ package postgresflexalpha
import (
"context"
"fmt"
- "reflect"
- "testing"
- "github.com/google/go-cmp/cmp"
- "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
postgresflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha"
"github.com/stackitcloud/stackit-sdk-go/core/utils"
)
@@ -490,327 +486,303 @@ func (c postgresFlexClientMocked) GetFlavorsRequestExecute(
return &res, nil
}
-func Test_getAllFlavors(t *testing.T) {
- type args struct {
- projectId string
- region string
- }
- tests := []struct {
- name string
- args args
- firstItem int
- lastItem int
- want []postgresflex.ListFlavors
- wantErr bool
- }{
- {
- name: "find exactly one flavor",
- args: args{
- projectId: "project",
- region: "region",
- },
- firstItem: 0,
- lastItem: 0,
- want: []postgresflex.ListFlavors{
- testFlavorToResponseFlavor(responseList[0]),
- },
- wantErr: false,
- },
- {
- name: "get exactly 1 page flavors",
- args: args{
- projectId: "project",
- region: "region",
- },
- firstItem: 0,
- lastItem: 9,
- want: testFlavorListToResponseFlavorList(responseList[0:10]),
- wantErr: false,
- },
- {
- name: "get exactly 20 flavors",
- args: args{
- projectId: "project",
- region: "region",
- },
- firstItem: 0,
- lastItem: 20,
- // 0 indexed therefore we want :21
- want: testFlavorListToResponseFlavorList(responseList[0:21]),
- wantErr: false,
- },
- {
- name: "get all flavors",
- args: args{
- projectId: "project",
- region: "region",
- },
- firstItem: 0,
- lastItem: len(responseList),
- want: testFlavorListToResponseFlavorList(responseList),
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- first := tt.firstItem
- if first > len(responseList)-1 {
- first = len(responseList) - 1
- }
- last := tt.lastItem
- if last > len(responseList)-1 {
- last = len(responseList) - 1
- }
- mockClient := postgresFlexClientMocked{
- returnError: tt.wantErr,
- firstItem: first,
- lastItem: last,
- }
- got, err := getAllFlavors(context.TODO(), mockClient, tt.args.projectId, tt.args.region)
- if (err != nil) != tt.wantErr {
- t.Errorf("getAllFlavors() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
+//func Test_getAllFlavors(t *testing.T) {
+// type args struct {
+// projectId string
+// region string
+// }
+// tests := []struct {
+// name string
+// args args
+// firstItem int
+// lastItem int
+// want []postgresflex.ListFlavors
+// wantErr bool
+// }{
+// {
+// name: "find exactly one flavor",
+// args: args{
+// projectId: "project",
+// region: "region",
+// },
+// firstItem: 0,
+// lastItem: 0,
+// want: []postgresflex.ListFlavors{
+// testFlavorToResponseFlavor(responseList[0]),
+// },
+// wantErr: false,
+// },
+// {
+// name: "get exactly 1 page flavors",
+// args: args{
+// projectId: "project",
+// region: "region",
+// },
+// firstItem: 0,
+// lastItem: 9,
+// want: testFlavorListToResponseFlavorList(responseList[0:10]),
+// wantErr: false,
+// },
+// {
+// name: "get exactly 20 flavors",
+// args: args{
+// projectId: "project",
+// region: "region",
+// },
+// firstItem: 0,
+// lastItem: 20,
+// // 0 indexed therefore we want :21
+// want: testFlavorListToResponseFlavorList(responseList[0:21]),
+// wantErr: false,
+// },
+// {
+// name: "get all flavors",
+// args: args{
+// projectId: "project",
+// region: "region",
+// },
+// firstItem: 0,
+// lastItem: len(responseList),
+// want: testFlavorListToResponseFlavorList(responseList),
+// wantErr: false,
+// },
+// }
+// for _, tt := range tests {
+// t.Run(tt.name, func(t *testing.T) {
+// first := tt.firstItem
+// if first > len(responseList)-1 {
+// first = len(responseList) - 1
+// }
+// last := tt.lastItem
+// if last > len(responseList)-1 {
+// last = len(responseList) - 1
+// }
+// mockClient := postgresFlexClientMocked{
+// returnError: tt.wantErr,
+// firstItem: first,
+// lastItem: last,
+// }
+// got, err := getAllFlavors(context.TODO(), mockClient, tt.args.projectId, tt.args.region)
+// if (err != nil) != tt.wantErr {
+// t.Errorf("getAllFlavors() error = %v, wantErr %v", err, tt.wantErr)
+// return
+// }
+//
+// if diff := cmp.Diff(tt.want, got); diff != "" {
+// t.Errorf("mismatch (-want +got):\n%s", diff)
+// }
+//
+// if !reflect.DeepEqual(got, tt.want) {
+// t.Errorf("getAllFlavors() got = %v, want %v", got, tt.want)
+// }
+// })
+// }
+//}
- if diff := cmp.Diff(tt.want, got); diff != "" {
- t.Errorf("mismatch (-want +got):\n%s", diff)
- }
-
- if !reflect.DeepEqual(got, tt.want) {
- t.Errorf("getAllFlavors() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func Test_loadFlavorId(t *testing.T) {
- type args struct {
- ctx context.Context
- model *Model
- flavor *flavorModel
- storage *storageModel
- }
- tests := []struct {
- name string
- args args
- firstItem int
- lastItem int
- want []postgresflex.ListFlavors
- wantErr bool
- }{
- {
- name: "find a single flavor",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(1),
- RAM: basetypes.NewInt64Value(1),
- NodeType: basetypes.NewStringValue("Single"),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 0,
- lastItem: 3,
- want: []postgresflex.ListFlavors{
- testFlavorToResponseFlavor(responseList[0]),
- },
- wantErr: false,
- },
- {
- name: "find a single flavor by replicas option",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- Replicas: basetypes.NewInt64Value(1),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(1),
- RAM: basetypes.NewInt64Value(1),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 0,
- lastItem: 3,
- want: []postgresflex.ListFlavors{
- testFlavorToResponseFlavor(responseList[0]),
- },
- wantErr: false,
- },
- {
- name: "fail finding find a single flavor by replicas option",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- Replicas: basetypes.NewInt64Value(1),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(1),
- RAM: basetypes.NewInt64Value(1),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 13,
- lastItem: 23,
- want: []postgresflex.ListFlavors{},
- wantErr: true,
- },
- {
- name: "find a replicas flavor lower case",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(1),
- RAM: basetypes.NewInt64Value(1),
- NodeType: basetypes.NewStringValue("replica"),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 0,
- lastItem: len(responseList) - 1,
- want: []postgresflex.ListFlavors{
- testFlavorToResponseFlavor(responseList[16]),
- },
- wantErr: false,
- },
- {
- name: "find a replicas flavor CamelCase",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(1),
- RAM: basetypes.NewInt64Value(1),
- NodeType: basetypes.NewStringValue("Replica"),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 0,
- lastItem: len(responseList) - 1,
- want: []postgresflex.ListFlavors{
- testFlavorToResponseFlavor(responseList[16]),
- },
- wantErr: false,
- },
- {
- name: "find a replicas flavor by replicas option",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- Replicas: basetypes.NewInt64Value(3),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(1),
- RAM: basetypes.NewInt64Value(1),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 0,
- lastItem: len(responseList) - 1,
- want: []postgresflex.ListFlavors{
- testFlavorToResponseFlavor(responseList[16]),
- },
- wantErr: false,
- },
- {
- name: "fail finding a replica flavor",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- Replicas: basetypes.NewInt64Value(3),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(1),
- RAM: basetypes.NewInt64Value(1),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 0,
- lastItem: 10,
- want: []postgresflex.ListFlavors{},
- wantErr: true,
- },
- {
- name: "no flavor found error",
- args: args{
- ctx: context.Background(),
- model: &Model{
- ProjectId: basetypes.NewStringValue("project"),
- Region: basetypes.NewStringValue("region"),
- },
- flavor: &flavorModel{
- CPU: basetypes.NewInt64Value(10),
- RAM: basetypes.NewInt64Value(1000),
- NodeType: basetypes.NewStringValue("Single"),
- },
- storage: &storageModel{
- Class: basetypes.NewStringValue("sc1"),
- Size: basetypes.NewInt64Value(100),
- },
- },
- firstItem: 0,
- lastItem: 3,
- want: []postgresflex.ListFlavors{},
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- first := tt.firstItem
- if first > len(responseList)-1 {
- first = len(responseList) - 1
- }
- last := tt.lastItem
- if last > len(responseList)-1 {
- last = len(responseList) - 1
- }
- mockClient := postgresFlexClientMocked{
- returnError: tt.wantErr,
- firstItem: first,
- lastItem: last,
- }
- if err := loadFlavorId(tt.args.ctx, mockClient, tt.args.model, tt.args.flavor, tt.args.storage); (err != nil) != tt.wantErr {
- t.Errorf("loadFlavorId() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
+//func Test_loadFlavorId(t *testing.T) {
+// type args struct {
+// ctx context.Context
+// model *Model
+// storage *storageModel
+// }
+// tests := []struct {
+// name string
+// args args
+// firstItem int
+// lastItem int
+// want []postgresflex.ListFlavors
+// wantErr bool
+// }{
+// {
+// name: "find a single flavor",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 0,
+// lastItem: 3,
+// want: []postgresflex.ListFlavors{
+// testFlavorToResponseFlavor(responseList[0]),
+// },
+// wantErr: false,
+// },
+// {
+// name: "find a single flavor by replicas option",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// Replicas: basetypes.NewInt64Value(1),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 0,
+// lastItem: 3,
+// want: []postgresflex.ListFlavors{
+// testFlavorToResponseFlavor(responseList[0]),
+// },
+// wantErr: false,
+// },
+// {
+// name: "fail finding find a single flavor by replicas option",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// Replicas: basetypes.NewInt64Value(1),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 13,
+// lastItem: 23,
+// want: []postgresflex.ListFlavors{},
+// wantErr: true,
+// },
+// {
+// name: "find a replicas flavor lower case",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 0,
+// lastItem: len(responseList) - 1,
+// want: []postgresflex.ListFlavors{
+// testFlavorToResponseFlavor(responseList[16]),
+// },
+// wantErr: false,
+// },
+// {
+// name: "find a replicas flavor CamelCase",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 0,
+// lastItem: len(responseList) - 1,
+// want: []postgresflex.ListFlavors{
+// testFlavorToResponseFlavor(responseList[16]),
+// },
+// wantErr: false,
+// },
+// {
+// name: "find a replicas flavor by replicas option",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// Replicas: basetypes.NewInt64Value(3),
+// },
+// flavor: &flavorModel{
+// CPU: basetypes.NewInt64Value(1),
+// RAM: basetypes.NewInt64Value(1),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 0,
+// lastItem: len(responseList) - 1,
+// want: []postgresflex.ListFlavors{
+// testFlavorToResponseFlavor(responseList[16]),
+// },
+// wantErr: false,
+// },
+// {
+// name: "fail finding a replica flavor",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// Replicas: basetypes.NewInt64Value(3),
+// },
+// flavor: &flavorModel{
+// CPU: basetypes.NewInt64Value(1),
+// RAM: basetypes.NewInt64Value(1),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 0,
+// lastItem: 10,
+// want: []postgresflex.ListFlavors{},
+// wantErr: true,
+// },
+// {
+// name: "no flavor found error",
+// args: args{
+// ctx: context.Background(),
+// model: &Model{
+// ProjectId: basetypes.NewStringValue("project"),
+// Region: basetypes.NewStringValue("region"),
+// },
+// flavor: &flavorModel{
+// CPU: basetypes.NewInt64Value(10),
+// RAM: basetypes.NewInt64Value(1000),
+// NodeType: basetypes.NewStringValue("Single"),
+// },
+// storage: &storageModel{
+// Class: basetypes.NewStringValue("sc1"),
+// Size: basetypes.NewInt64Value(100),
+// },
+// },
+// firstItem: 0,
+// lastItem: 3,
+// want: []postgresflex.ListFlavors{},
+// wantErr: true,
+// },
+// }
+// for _, tt := range tests {
+// t.Run(tt.name, func(t *testing.T) {
+// first := tt.firstItem
+// if first > len(responseList)-1 {
+// first = len(responseList) - 1
+// }
+// last := tt.lastItem
+// if last > len(responseList)-1 {
+// last = len(responseList) - 1
+// }
+// mockClient := postgresFlexClientMocked{
+// returnError: tt.wantErr,
+// firstItem: first,
+// lastItem: last,
+// }
+// if err := loadFlavorId(tt.args.ctx, mockClient, tt.args.model, tt.args.flavor, tt.args.storage); (err != nil) != tt.wantErr {
+// t.Errorf("loadFlavorId() error = %v, wantErr %v", err, tt.wantErr)
+// }
+// })
+// }
+//}
diff --git a/stackit/internal/services/postgresflexalpha/instance/models.go b/stackit/internal/services/postgresflexalpha/instance/models.go
index 51d74521..9325e511 100644
--- a/stackit/internal/services/postgresflexalpha/instance/models.go
+++ b/stackit/internal/services/postgresflexalpha/instance/models.go
@@ -12,7 +12,7 @@ type Model struct {
ProjectId types.String `tfsdk:"project_id"`
Name types.String `tfsdk:"name"`
BackupSchedule types.String `tfsdk:"backup_schedule"`
- Flavor types.Object `tfsdk:"flavor"`
+ FlavorId types.String `tfsdk:"flavor_id"`
Replicas types.Int64 `tfsdk:"replicas"`
RetentionDays types.Int64 `tfsdk:"retention_days"`
Storage types.Object `tfsdk:"storage"`
@@ -22,9 +22,11 @@ type Model struct {
Network types.Object `tfsdk:"network"`
}
-type IdentityModel struct {
- ID types.String `tfsdk:"id"`
-}
+//type IdentityModel struct {
+// InstanceId types.String `tfsdk:"instance_id"`
+// Region types.String `tfsdk:"region"`
+// ProjectId types.String `tfsdk:"project_id"`
+//}
type encryptionModel struct {
KeyRingId types.String `tfsdk:"keyring_id"`
@@ -54,24 +56,6 @@ var networkTypes = map[string]attr.Type{
"router_address": basetypes.StringType{},
}
-// Struct corresponding to Model.Flavor
-type flavorModel struct {
- Id types.String `tfsdk:"id"`
- Description types.String `tfsdk:"description"`
- CPU types.Int64 `tfsdk:"cpu"`
- RAM types.Int64 `tfsdk:"ram"`
- NodeType types.String `tfsdk:"node_type"`
-}
-
-// Types corresponding to flavorModel
-var flavorTypes = map[string]attr.Type{
- "id": basetypes.StringType{},
- "description": basetypes.StringType{},
- "cpu": basetypes.Int64Type{},
- "ram": basetypes.Int64Type{},
- "node_type": basetypes.StringType{},
-}
-
// Struct corresponding to Model.Storage
type storageModel struct {
Class types.String `tfsdk:"class"`
diff --git a/stackit/internal/services/postgresflexalpha/instance/resource.go b/stackit/internal/services/postgresflexalpha/instance/resource.go
index 2787656a..950147e0 100644
--- a/stackit/internal/services/postgresflexalpha/instance/resource.go
+++ b/stackit/internal/services/postgresflexalpha/instance/resource.go
@@ -6,16 +6,13 @@ import (
"net/http"
"regexp"
"strings"
- "time"
- "github.com/hashicorp/terraform-plugin-framework/resource/identityschema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
postgresflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha"
"github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha/wait"
postgresflexUtils "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
- "github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
@@ -42,7 +39,7 @@ var (
_ resource.ResourceWithImportState = &instanceResource{}
_ resource.ResourceWithModifyPlan = &instanceResource{}
_ resource.ResourceWithValidateConfig = &instanceResource{}
- _ resource.ResourceWithIdentity = &instanceResource{}
+ //_ resource.ResourceWithIdentity = &instanceResource{}
)
// NewInstanceResource is a helper function to simplify the provider implementation.
@@ -128,35 +125,30 @@ func (r *instanceResource) Configure(ctx context.Context, req resource.Configure
// Schema defines the schema for the resource.
func (r *instanceResource) Schema(_ context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
descriptions := map[string]string{
- "main": "Postgres Flex instance resource schema. Must have a `region` specified in the provider configuration.",
- "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".",
- "instance_id": "ID of the PostgresFlex instance.",
- "project_id": "STACKIT project ID to which the instance is associated.",
- "name": "Instance name.",
- "backup_schedule": "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
- "retention_days": "The days of the retention period.",
- "flavor": "The block that defines the flavor data.",
- "flavor_id": "The ID of the flavor.",
- "flavor_description": "The flavor detailed flavor name.",
- "flavor_cpu": "The CPU count of the flavor.",
- "flavor_ram": "The RAM count of the flavor.",
- "flavor_node_type": "The node type of the flavor. (Single or Replicas)",
- "replicas": "The number of replicas.",
- "storage": "The block of the storage configuration.",
- "storage_class": "The storage class used.",
- "storage_size": "The disk size of the storage.",
- "region": "The resource region. If not defined, the provider region is used.",
- "version": "The database version used.",
- "encryption": "The encryption block.",
- "keyring_id": "KeyRing ID of the encryption key.",
- "key_id": "Key ID of the encryption key.",
- "key_version": "Key version of the encryption key.",
- "service_account": "The service account ID of the service account.",
- "network": "The network block configuration.",
- "access_scope": "The access scope. (Either SNA or PUBLIC)",
- "acl": "The Access Control List (ACL) for the PostgresFlex instance.",
- "instance_address": "The returned instance address.",
- "router_address": "The returned router address.",
+ "main": "Postgres Flex instance resource schema. Must have a `region` specified in the provider configuration.",
+ "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".",
+ "instance_id": "ID of the PostgresFlex instance.",
+ "project_id": "STACKIT project ID to which the instance is associated.",
+ "name": "Instance name.",
+ "backup_schedule": "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
+ "retention_days": "The days of the retention period.",
+ "flavor_id": "The ID of the flavor.",
+ "replicas": "The number of replicas.",
+ "storage": "The block of the storage configuration.",
+ "storage_class": "The storage class used.",
+ "storage_size": "The disk size of the storage.",
+ "region": "The resource region. If not defined, the provider region is used.",
+ "version": "The database version used.",
+ "encryption": "The encryption block.",
+ "keyring_id": "KeyRing ID of the encryption key.",
+ "key_id": "Key ID of the encryption key.",
+ "key_version": "Key version of the encryption key.",
+ "service_account": "The service account ID of the service account.",
+ "network": "The network block configuration.",
+ "access_scope": "The access scope. (Either SNA or PUBLIC)",
+ "acl": "The Access Control List (ACL) for the PostgresFlex instance.",
+ "instance_address": "The returned instance address.",
+ "router_address": "The returned router address.",
}
resp.Schema = schema.Schema{
@@ -210,45 +202,8 @@ func (r *instanceResource) Schema(_ context.Context, req resource.SchemaRequest,
Description: descriptions["retention_days"],
Required: true,
},
- "flavor": schema.SingleNestedAttribute{
- Required: true,
- Description: descriptions["flavor"],
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Description: descriptions["flavor_id"],
- Computed: true,
- Optional: true,
- PlanModifiers: []planmodifier.String{
- UseStateForUnknownIfFlavorUnchanged(req),
- stringplanmodifier.RequiresReplace(),
- },
- },
- "description": schema.StringAttribute{
- Computed: true,
- Description: descriptions["flavor_description"],
- PlanModifiers: []planmodifier.String{
- UseStateForUnknownIfFlavorUnchanged(req),
- },
- },
- "cpu": schema.Int64Attribute{
- Description: descriptions["flavor_cpu"],
- Required: true,
- },
- "ram": schema.Int64Attribute{
- Description: descriptions["flavor_ram"],
- Required: true,
- },
- "node_type": schema.StringAttribute{
- Description: descriptions["flavor_node_type"],
- Computed: true,
- Optional: true,
- PlanModifiers: []planmodifier.String{
- // TODO @mhenselin anschauen
- UseStateForUnknownIfFlavorUnchanged(req),
- stringplanmodifier.RequiresReplace(),
- },
- },
- },
+ "flavor_id": schema.StringAttribute{
+ Required: true,
},
"replicas": schema.Int64Attribute{
Required: true,
@@ -390,15 +345,21 @@ func (r *instanceResource) Schema(_ context.Context, req resource.SchemaRequest,
}
}
-func (r *instanceResource) IdentitySchema(_ context.Context, _ resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) {
- resp.IdentitySchema = identityschema.Schema{
- Attributes: map[string]identityschema.Attribute{
- "id": identityschema.StringAttribute{
- RequiredForImport: true, // must be set during import by the practitioner
- },
- },
- }
-}
+//func (r *instanceResource) IdentitySchema(_ context.Context, _ resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) {
+// resp.IdentitySchema = identityschema.Schema{
+// Attributes: map[string]identityschema.Attribute{
+// "project_id": identityschema.StringAttribute{
+// RequiredForImport: true, // must be set during import by the practitioner
+// },
+// "region": identityschema.StringAttribute{
+// RequiredForImport: true, // must be set during import by the practitioner
+// },
+// "instance_id": identityschema.StringAttribute{
+// RequiredForImport: true, // must be set during import by the practitioner
+// },
+// },
+// }
+//}
// Create creates the resource and sets the initial Terraform state.
func (r *instanceResource) Create(
@@ -406,7 +367,6 @@ func (r *instanceResource) Create(
req resource.CreateRequest,
resp *resource.CreateResponse,
) { // nolint:gocritic // function signature required by Terraform
- // Retrieve values from plan
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
@@ -430,42 +390,6 @@ func (r *instanceResource) Create(
}
}
- var flavor = &flavorModel{}
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- err := loadFlavorId(ctx, r.client, &model, flavor, storage)
- if err != nil {
- core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Loading flavor ID: %v", err))
- return
- }
- }
-
- if flavor.Id.IsNull() || flavor.Id.IsUnknown() {
- err := loadFlavorId(ctx, r.client, &model, flavor, storage)
- if err != nil {
- resp.Diagnostics.AddError(err.Error(), err.Error())
- return
- }
- flavorValues := map[string]attr.Value{
- "id": flavor.Id,
- "description": flavor.Description,
- "cpu": flavor.CPU,
- "ram": flavor.RAM,
- "node_type": flavor.NodeType,
- }
- var flavorObject basetypes.ObjectValue
- flavorObject, diags = types.ObjectValue(flavorTypes, flavorValues)
- resp.Diagnostics.Append(diags...)
- if diags.HasError() {
- return
- }
- model.Flavor = flavorObject
- }
-
var encryption = &encryptionModel{}
if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
@@ -494,7 +418,7 @@ func (r *instanceResource) Create(
}
// Generate API request body from model
- payload, err := toCreatePayload(&model, flavor, storage, encryption, network)
+ payload, err := toCreatePayload(&model, storage, encryption, network)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err))
return
@@ -516,11 +440,13 @@ func (r *instanceResource) Create(
return
}
- // Set data returned by API in identity
- identity := IdentityModel{
- ID: utils.BuildInternalTerraformId(projectId, region, instanceId),
- }
- resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...)
+ //// Set data returned by API in identity
+ //identity := IdentityModel{
+ // InstanceId: types.StringValue(instanceId),
+ // Region: types.StringValue(region),
+ // ProjectId: types.StringValue(projectId),
+ //}
+ //resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...)
waitResp, err := wait.CreateInstanceWaitHandler(ctx, r.client, projectId, region, instanceId).WaitWithContext(ctx)
if err != nil {
@@ -529,7 +455,7 @@ func (r *instanceResource) Create(
}
// Map response body to schema
- err = mapFields(ctx, waitResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, r.client, waitResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API payload: %v", err))
return
@@ -552,12 +478,12 @@ func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, r
return
}
- // Read identity data
- var identityData IdentityModel
- resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...)
- if resp.Diagnostics.HasError() {
- return
- }
+ //// Read identity data
+ //var identityData IdentityModel
+ //resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...)
+ //if resp.Diagnostics.HasError() {
+ // return
+ //}
ctx = core.InitProviderContext(ctx)
@@ -568,35 +494,27 @@ func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, r
ctx = tflog.SetField(ctx, "instance_id", instanceId)
ctx = tflog.SetField(ctx, "region", region)
- var flavor = &flavorModel{}
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
- var storage = &storageModel{}
+ var storage = storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
- diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
+ diags = model.Storage.As(ctx, &storage, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
- var network = &networkModel{}
+ var network = networkModel{}
if !model.Network.IsNull() && !model.Network.IsUnknown() {
- diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
+ diags = model.Network.As(ctx, &network, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
- var encryption = &encryptionModel{}
+ var encryption = encryptionModel{}
if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
- diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
+ diags = model.Encryption.As(ctx, &encryption, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
@@ -617,7 +535,7 @@ func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, r
ctx = core.LogResponse(ctx)
// Map response body to schema
- err = mapFields(ctx, instanceResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, r.client, instanceResp, &model, &storage, &encryption, &network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
return
@@ -629,18 +547,19 @@ func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, r
return
}
- identityData.ID = model.Id
- resp.Diagnostics.Append(resp.Identity.Set(ctx, identityData)...)
- if resp.Diagnostics.HasError() {
- return
- }
+ //identityData.InstanceId = model.InstanceId
+ //identityData.Region = model.Region
+ //identityData.ProjectId = model.ProjectId
+ //resp.Diagnostics.Append(resp.Identity.Set(ctx, identityData)...)
+ //if resp.Diagnostics.HasError() {
+ // return
+ //}
tflog.Info(ctx, "Postgres Flex instance read")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
- // Retrieve values from plan
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
@@ -676,20 +595,6 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
}
}
- var flavor = &flavorModel{}
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- err := loadFlavorId(ctx, r.client, &model, flavor, storage)
- if err != nil {
- core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Loading flavor ID: %v", err))
- return
- }
- }
-
var network = &networkModel{}
if !model.Network.IsNull() && !model.Network.IsUnknown() {
diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
@@ -709,7 +614,7 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
}
// Generate API request body from model
- payload, err := toUpdatePayload(&model, flavor, storage, network)
+ payload, err := toUpdatePayload(&model, storage, network)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Creating API payload: %v", err))
return
@@ -730,7 +635,7 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
}
// Map response body to schema
- err = mapFields(ctx, waitResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, r.client, waitResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API payload: %v", err))
return
@@ -745,7 +650,6 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
// Delete deletes the resource and removes the Terraform state on success.
func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
- // Retrieve values from state
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
@@ -771,11 +675,22 @@ func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteReques
ctx = core.LogResponse(ctx)
- _, err = wait.DeleteInstanceWaitHandler(ctx, r.client, projectId, region, instanceId).SetTimeout(45 * time.Minute).WaitWithContext(ctx)
+ //_, err = wait.DeleteInstanceWaitHandler(ctx, r.client, projectId, region, instanceId).SetTimeout(45 * time.Minute).WaitWithContext(ctx)
+ //if err != nil {
+ // core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err))
+ // return
+ //}
+
+ _, err = r.client.GetInstanceRequest(ctx, projectId, region, instanceId).Execute()
if err != nil {
- core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err))
- return
+ oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
+ if ok && oapiErr.StatusCode != http.StatusNotFound {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", err.Error())
+ return
+ }
}
+
+ resp.State.RemoveResource(ctx)
tflog.Info(ctx, "Postgres Flex instance deleted")
}
diff --git a/stackit/internal/services/postgresflexalpha/instance/resource_test.go b/stackit/internal/services/postgresflexalpha/instance/resource_test.go
index 6cc0a8fa..b5ef0849 100644
--- a/stackit/internal/services/postgresflexalpha/instance/resource_test.go
+++ b/stackit/internal/services/postgresflexalpha/instance/resource_test.go
@@ -32,7 +32,6 @@ func TestMapFields(t *testing.T) {
description string
state Model
input *postgresflex.GetInstanceResponse
- flavor *flavorModel
storage *storageModel
encryption *encryptionModel
network *networkModel
@@ -51,9 +50,6 @@ func TestMapFields(t *testing.T) {
FlavorId: utils.Ptr("flavor_id"),
Replicas: postgresflex.GetInstanceResponseGetReplicasAttributeType(utils.Ptr(int32(1))),
},
- &flavorModel{
- NodeType: types.StringValue("Single"),
- },
&storageModel{},
&encryptionModel{},
&networkModel{
@@ -67,16 +63,10 @@ func TestMapFields(t *testing.T) {
InstanceId: types.StringValue("iid"),
ProjectId: types.StringValue("pid"),
Name: types.StringNull(),
+ FlavorId: types.StringValue("flavor_id"),
//ACL: types.ListNull(types.StringType),
BackupSchedule: types.StringNull(),
- Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
- "id": types.StringNull(),
- "description": types.StringNull(),
- "cpu": types.Int64Null(),
- "ram": types.Int64Null(),
- "node_type": types.StringValue("Single"),
- }),
- Replicas: types.Int64Value(1),
+ Replicas: types.Int64Value(1),
Encryption: types.ObjectValueMust(encryptionTypes, map[string]attr.Value{
"keyring_id": types.StringNull(),
"key_id": types.StringNull(),
@@ -171,7 +161,6 @@ func TestMapFields(t *testing.T) {
ProjectId: types.StringValue("pid"),
},
nil,
- &flavorModel{},
&storageModel{},
&encryptionModel{},
&networkModel{},
@@ -186,7 +175,6 @@ func TestMapFields(t *testing.T) {
ProjectId: types.StringValue("pid"),
},
&postgresflex.GetInstanceResponse{},
- &flavorModel{},
&storageModel{},
&encryptionModel{},
&networkModel{},
@@ -197,7 +185,21 @@ func TestMapFields(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
- err := mapFields(context.Background(), tt.input, &tt.state, tt.flavor, tt.storage, tt.encryption, tt.network, tt.region)
+ client := postgresFlexClientMocked{
+ returnError: false,
+ firstItem: 0,
+ lastItem: 0,
+ }
+ err := mapFields(
+ context.Background(),
+ client,
+ tt.input,
+ &tt.state,
+ tt.storage,
+ tt.encryption,
+ tt.network,
+ tt.region,
+ )
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}
@@ -219,7 +221,6 @@ func TestToCreatePayload(t *testing.T) {
description string
input *Model
inputAcl []string
- inputFlavor *flavorModel
inputStorage *storageModel
inputEncryption *encryptionModel
inputNetwork *networkModel
@@ -232,34 +233,6 @@ func TestToCreatePayload(t *testing.T) {
Replicas: types.Int64Value(1),
},
[]string{},
- &flavorModel{},
- &storageModel{},
- &encryptionModel{},
- &networkModel{
- ACL: types.ListValueMust(types.StringType, []attr.Value{
- types.StringValue("0.0.0.0/0"),
- }),
- },
- &postgresflex.CreateInstanceRequestPayload{
- Acl: &[]string{"0.0.0.0/0"},
- Storage: postgresflex.CreateInstanceRequestPayloadGetStorageAttributeType(&postgresflex.Storage{}),
- Encryption: &postgresflex.InstanceEncryption{},
- Network: &postgresflex.InstanceNetwork{
- Acl: &[]string{"0.0.0.0/0"},
- },
- Replicas: postgresflex.CreateInstanceRequestPayloadGetReplicasAttributeType(utils.Ptr(int32(1))),
- },
- true,
- },
- {
- "use flavor node_type instead of replicas",
- &Model{},
- []string{
- "0.0.0.0/0",
- },
- &flavorModel{
- NodeType: types.StringValue("Single"),
- },
&storageModel{},
&encryptionModel{},
&networkModel{
@@ -282,7 +255,6 @@ func TestToCreatePayload(t *testing.T) {
"nil_model",
nil,
[]string{},
- &flavorModel{},
&storageModel{},
&encryptionModel{},
&networkModel{},
@@ -293,7 +265,6 @@ func TestToCreatePayload(t *testing.T) {
"nil_acl",
&Model{},
nil,
- &flavorModel{},
&storageModel{},
&encryptionModel{},
&networkModel{},
@@ -304,7 +275,6 @@ func TestToCreatePayload(t *testing.T) {
"nil_flavor",
&Model{},
[]string{},
- nil,
&storageModel{},
&encryptionModel{},
&networkModel{},
@@ -315,7 +285,6 @@ func TestToCreatePayload(t *testing.T) {
"nil_storage",
&Model{},
[]string{},
- &flavorModel{},
nil,
&encryptionModel{},
&networkModel{},
@@ -325,7 +294,7 @@ func TestToCreatePayload(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
- output, err := toCreatePayload(tt.input, tt.inputFlavor, tt.inputStorage, tt.inputEncryption, tt.inputNetwork)
+ output, err := toCreatePayload(tt.input, tt.inputStorage, tt.inputEncryption, tt.inputNetwork)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}
diff --git a/stackit/internal/services/postgresflexalpha/instance/use_state_for_unknown_if_flavor_unchanged_modifier.go b/stackit/internal/services/postgresflexalpha/instance/use_state_for_unknown_if_flavor_unchanged_modifier.go
deleted file mode 100644
index 1e860eec..00000000
--- a/stackit/internal/services/postgresflexalpha/instance/use_state_for_unknown_if_flavor_unchanged_modifier.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package postgresflexalpha
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-framework/resource"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
- "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
-)
-
-type useStateForUnknownIfFlavorUnchangedModifier struct {
- Req resource.SchemaRequest
-}
-
-// UseStateForUnknownIfFlavorUnchanged returns a plan modifier similar to UseStateForUnknown
-// if the RAM and CPU values are not changed in the plan. Otherwise, the plan modifier does nothing.
-func UseStateForUnknownIfFlavorUnchanged(req resource.SchemaRequest) planmodifier.String {
- return useStateForUnknownIfFlavorUnchangedModifier{
- Req: req,
- }
-}
-
-func (m useStateForUnknownIfFlavorUnchangedModifier) Description(context.Context) string {
- return "UseStateForUnknownIfFlavorUnchanged returns a plan modifier similar to UseStateForUnknown if the RAM and CPU values are not changed in the plan. Otherwise, the plan modifier does nothing."
-}
-
-func (m useStateForUnknownIfFlavorUnchangedModifier) MarkdownDescription(ctx context.Context) string {
- return m.Description(ctx)
-}
-
-func (m useStateForUnknownIfFlavorUnchangedModifier) PlanModifyString(ctx context.Context, req planmodifier.StringRequest, resp *planmodifier.StringResponse) { // nolint:gocritic // function signature required by Terraform
- // Do nothing if there is no state value.
- if req.StateValue.IsNull() {
- return
- }
-
- // Do nothing if there is a known planned value.
- if !req.PlanValue.IsUnknown() {
- return
- }
-
- // Do nothing if there is an unknown configuration value, otherwise interpolation gets messed up.
- if req.ConfigValue.IsUnknown() {
- return
- }
-
- // The above checks are taken from the UseStateForUnknown plan modifier implementation
- // (https://github.com/hashicorp/terraform-plugin-framework/blob/main/resource/schema/stringplanmodifier/use_state_for_unknown.go#L38)
-
- var stateModel Model
- diags := req.State.Get(ctx, &stateModel)
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- var stateFlavor = &flavorModel{}
- if !stateModel.Flavor.IsNull() && !stateModel.Flavor.IsUnknown() {
- diags = stateModel.Flavor.As(ctx, stateFlavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var planModel Model
- diags = req.Plan.Get(ctx, &planModel)
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- var planFlavor = &flavorModel{}
- if !planModel.Flavor.IsNull() && !planModel.Flavor.IsUnknown() {
- diags = planModel.Flavor.As(ctx, planFlavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- if planFlavor.CPU == stateFlavor.CPU && planFlavor.RAM == stateFlavor.RAM {
- resp.PlanValue = req.StateValue
- }
-}
diff --git a/stackit/internal/services/postgresflexalpha/postgresflex_acc_test.go b/stackit/internal/services/postgresflexalpha/postgresflex_acc_test.go
index 5177bae1..16a7cb2b 100644
--- a/stackit/internal/services/postgresflexalpha/postgresflex_acc_test.go
+++ b/stackit/internal/services/postgresflexalpha/postgresflex_acc_test.go
@@ -18,7 +18,6 @@ import (
"github.com/stackitcloud/stackit-sdk-go/core/config"
postgresflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha"
- "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha/wait"
)
// Instance resource data
@@ -364,10 +363,10 @@ func testAccCheckPostgresFlexDestroy(s *terraform.State) error {
if err != nil {
return fmt.Errorf("deleting instance %s during CheckDestroy: %w", *items[i].Id, err)
}
- _, err = wait.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, testutil.Region, *items[i].Id).WaitWithContext(ctx)
- if err != nil {
- return fmt.Errorf("deleting instance %s during CheckDestroy: waiting for deletion %w", *items[i].Id, err)
- }
+ //_, err = wait.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, testutil.Region, *items[i].Id).WaitWithContext(ctx)
+ //if err != nil {
+ // return fmt.Errorf("deleting instance %s during CheckDestroy: waiting for deletion %w", *items[i].Id, err)
+ //}
}
}
return nil
diff --git a/stackit/internal/services/postgresflexalpha/user/resource.go b/stackit/internal/services/postgresflexalpha/user/resource.go
index ceebc0b2..6d3eece0 100644
--- a/stackit/internal/services/postgresflexalpha/user/resource.go
+++ b/stackit/internal/services/postgresflexalpha/user/resource.go
@@ -41,16 +41,16 @@ var (
)
type Model struct {
- Id types.String `tfsdk:"id"` // needed by TF
- UserId types.Int64 `tfsdk:"user_id"`
- InstanceId types.String `tfsdk:"instance_id"`
- ProjectId types.String `tfsdk:"project_id"`
- Username types.String `tfsdk:"username"`
- Roles types.Set `tfsdk:"roles"`
- Password types.String `tfsdk:"password"`
- Host types.String `tfsdk:"host"`
- Port types.Int64 `tfsdk:"port"`
- Uri types.String `tfsdk:"uri"`
+ Id types.String `tfsdk:"id"` // needed by TF
+ UserId types.Int64 `tfsdk:"user_id"`
+ InstanceId types.String `tfsdk:"instance_id"`
+ ProjectId types.String `tfsdk:"project_id"`
+ Username types.String `tfsdk:"username"`
+ Roles types.Set `tfsdk:"roles"`
+ Password types.String `tfsdk:"password"`
+ //Host types.String `tfsdk:"host"`
+ //Port types.Int64 `tfsdk:"port"`
+ //Uri types.String `tfsdk:"uri"`
Region types.String `tfsdk:"region"`
Status types.String `tfsdk:"status"`
ConnectionString types.String `tfsdk:"connection_string"`
@@ -124,6 +124,7 @@ func (r *userResource) Configure(ctx context.Context, req resource.ConfigureRequ
// Schema defines the schema for the resource.
func (r *userResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ // rolesOptions := []string{"login", "createdb", "createrole"}
rolesOptions := []string{"login", "createdb"}
descriptions := map[string]string{
@@ -181,9 +182,9 @@ func (r *userResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
},
},
"username": schema.StringAttribute{
- Required: true,
+ Required: true,
PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
+ // stringplanmodifier.RequiresReplace(),
},
},
"roles": schema.SetAttribute{
@@ -192,7 +193,7 @@ func (r *userResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
Required: true,
Validators: []validator.Set{
setvalidator.ValueStringsAre(
- stringvalidator.OneOf("login", "createdb"),
+ stringvalidator.OneOf(rolesOptions...),
),
},
},
@@ -200,16 +201,16 @@ func (r *userResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
Computed: true,
Sensitive: true,
},
- "host": schema.StringAttribute{
- Computed: true,
- },
- "port": schema.Int64Attribute{
- Computed: true,
- },
- "uri": schema.StringAttribute{
- Computed: true,
- Sensitive: true,
- },
+ //"host": schema.StringAttribute{
+ // Computed: true,
+ //},
+ //"port": schema.Int64Attribute{
+ // Computed: true,
+ //},
+ //"uri": schema.StringAttribute{
+ // Computed: true,
+ // Sensitive: true,
+ //},
"region": schema.StringAttribute{
Optional: true,
// must be computed to allow for storing the override value from the provider
@@ -375,7 +376,6 @@ func (r *userResource) Update(
req resource.UpdateRequest,
resp *resource.UpdateResponse,
) { // nolint:gocritic // function signature required by Terraform
- // Retrieve values from plan
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
@@ -466,7 +466,6 @@ func (r *userResource) Delete(
req resource.DeleteRequest,
resp *resource.DeleteResponse,
) { // nolint:gocritic // function signature required by Terraform
- // Retrieve values from plan
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
@@ -581,9 +580,10 @@ func mapFieldsCreate(
model.Roles = rolesSet
}
- model.Password = types.StringValue(*user.Password)
+ model.Password = types.StringPointerValue(user.Password)
model.Region = types.StringValue(region)
model.Status = types.StringPointerValue(user.Status)
+ //model.Host = types.StringPointerValue()
model.ConnectionString = types.StringPointerValue(user.ConnectionString)
return nil
@@ -625,8 +625,8 @@ func mapFields(userResp *postgresflex.GetUserResponse, model *Model, region stri
}
model.Roles = rolesSet
}
- model.Host = types.StringPointerValue(user.Host)
- model.Port = types.Int64PointerValue(user.Port)
+ //model.Host = types.StringPointerValue(user.Host)
+ //model.Port = types.Int64PointerValue(user.Port)
model.Region = types.StringValue(region)
model.Status = types.StringPointerValue(user.Status)
model.ConnectionString = types.StringPointerValue(user.ConnectionString)
@@ -667,6 +667,7 @@ func toUpdatePayload(model *Model, roles *[]string) (
}
return &postgresflex.UpdateUserRequestPayload{
+ Name: conversion.StringValueToPointer(model.Username),
Roles: toPayloadRoles(roles),
}, nil
}
diff --git a/stackit/internal/services/sqlserverflexalpha/flavor/datasource.go b/stackit/internal/services/sqlserverflexalpha/flavor/datasource.go
new file mode 100644
index 00000000..46ad0387
--- /dev/null
+++ b/stackit/internal/services/sqlserverflexalpha/flavor/datasource.go
@@ -0,0 +1,252 @@
+package sqlserverFlexAlphaFlavor
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/sqlserverflexalpha"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/utils"
+
+ sqlserverflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha"
+ sqlserverflexUtils "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/core"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ datasource.DataSource = &flavorDataSource{}
+)
+
+type FlavorModel struct {
+ ProjectId types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ StorageClass types.String `tfsdk:"storage_class"`
+ Cpu types.Int64 `tfsdk:"cpu"`
+ Description types.String `tfsdk:"description"`
+ Id types.String `tfsdk:"id"`
+ FlavorId types.String `tfsdk:"flavor_id"`
+ MaxGb types.Int64 `tfsdk:"max_gb"`
+ Memory types.Int64 `tfsdk:"ram"`
+ MinGb types.Int64 `tfsdk:"min_gb"`
+ NodeType types.String `tfsdk:"node_type"`
+ StorageClasses types.List `tfsdk:"storage_classes"`
+}
+
+// NewFlavorDataSource is a helper function to simplify the provider implementation.
+func NewFlavorDataSource() datasource.DataSource {
+ return &flavorDataSource{}
+}
+
+// flavorDataSource is the data source implementation.
+type flavorDataSource struct {
+ client *sqlserverflexalpha.APIClient
+ providerData core.ProviderData
+}
+
+// Metadata returns the data source type name.
+func (r *flavorDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_flavor"
+}
+
+// Configure adds the provider configured client to the data source.
+func (r *flavorDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ apiClient := sqlserverflexUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "Postgres Flex instance client configured")
+}
+
+func (r *flavorDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "project_id": schema.StringAttribute{
+ Required: true,
+ Description: "The cpu count of the instance.",
+ MarkdownDescription: "The cpu count of the instance.",
+ },
+ "region": schema.StringAttribute{
+ Required: true,
+ Description: "The flavor description.",
+ MarkdownDescription: "The flavor description.",
+ },
+ "cpu": schema.Int64Attribute{
+ Required: true,
+ Description: "The cpu count of the instance.",
+ MarkdownDescription: "The cpu count of the instance.",
+ },
+ "ram": schema.Int64Attribute{
+ Required: true,
+ Description: "The memory of the instance in Gibibyte.",
+ MarkdownDescription: "The memory of the instance in Gibibyte.",
+ },
+ "storage_class": schema.StringAttribute{
+ Required: true,
+ Description: "The memory of the instance in Gibibyte.",
+ MarkdownDescription: "The memory of the instance in Gibibyte.",
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ Description: "The flavor description.",
+ MarkdownDescription: "The flavor description.",
+ },
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The terraform id of the instance flavor.",
+ MarkdownDescription: "The terraform id of the instance flavor.",
+ },
+ "flavor_id": schema.StringAttribute{
+ Computed: true,
+ Description: "The flavor id of the instance flavor.",
+ MarkdownDescription: "The flavor id of the instance flavor.",
+ },
+ "max_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ },
+ "min_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "minimum storage which is required to order in Gigabyte.",
+ MarkdownDescription: "minimum storage which is required to order in Gigabyte.",
+ },
+ "node_type": schema.StringAttribute{
+ Required: true,
+ Description: "defines the nodeType it can be either single or replica",
+ MarkdownDescription: "defines the nodeType it can be either single or replica",
+ },
+ "storage_classes": schema.ListNestedAttribute{
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "class": schema.StringAttribute{
+ Computed: true,
+ },
+ "max_io_per_sec": schema.Int64Attribute{
+ Computed: true,
+ },
+ "max_through_in_mb": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ CustomType: sqlserverflex.StorageClassesType{
+ ObjectType: types.ObjectType{
+ AttrTypes: sqlserverflex.StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func (r *flavorDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var model FlavorModel
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ ctx = core.InitProviderContext(ctx)
+
+ projectId := model.ProjectId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ flavors, err := getAllFlavors(ctx, r.client, projectId, region)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading flavors", fmt.Sprintf("getAllFlavors: %v", err))
+ return
+ }
+
+ var foundFlavors []sqlserverflexalpha.ListFlavors
+ for _, flavor := range flavors {
+ if model.Cpu.ValueInt64() != *flavor.Cpu {
+ continue
+ }
+ if model.Memory.ValueInt64() != *flavor.Memory {
+ continue
+ }
+ if model.NodeType.ValueString() != *flavor.NodeType {
+ continue
+ }
+ for _, sc := range *flavor.StorageClasses {
+ if model.StorageClass.ValueString() != *sc.Class {
+ continue
+ }
+ foundFlavors = append(foundFlavors, flavor)
+ }
+ }
+ if len(foundFlavors) == 0 {
+ resp.Diagnostics.AddError("get flavor", "could not find requested flavor")
+ return
+ }
+ if len(foundFlavors) > 1 {
+ resp.Diagnostics.AddError("get flavor", "found too many matching flavors")
+ return
+ }
+
+ f := foundFlavors[0]
+ model.Description = types.StringValue(*f.Description)
+ model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, *f.Id)
+ model.FlavorId = types.StringValue(*f.Id)
+ model.MaxGb = types.Int64Value(*f.MaxGB)
+ model.MinGb = types.Int64Value(*f.MinGB)
+
+ if f.StorageClasses == nil {
+ model.StorageClasses = types.ListNull(sqlserverflex.StorageClassesType{
+ ObjectType: basetypes.ObjectType{
+ AttrTypes: sqlserverflex.StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ })
+ } else {
+ var scList []attr.Value
+ for _, sc := range *f.StorageClasses {
+ scList = append(
+ scList,
+ sqlserverflex.NewStorageClassesValueMust(
+ sqlserverflex.StorageClassesValue{}.AttributeTypes(ctx),
+ map[string]attr.Value{
+ "class": types.StringValue(*sc.Class),
+ "max_io_per_sec": types.Int64Value(*sc.MaxIoPerSec),
+ "max_through_in_mb": types.Int64Value(*sc.MaxThroughInMb),
+ },
+ ),
+ )
+ }
+ storageClassesList := types.ListValueMust(
+ sqlserverflex.StorageClassesType{
+ ObjectType: basetypes.ObjectType{
+ AttrTypes: sqlserverflex.StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ scList,
+ )
+ model.StorageClasses = storageClassesList
+ }
+
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "Postgres Flex flavors read")
+}
diff --git a/stackit/internal/services/sqlserverflexalpha/flavor/functions.go b/stackit/internal/services/sqlserverflexalpha/flavor/functions.go
new file mode 100644
index 00000000..ca2196b2
--- /dev/null
+++ b/stackit/internal/services/sqlserverflexalpha/flavor/functions.go
@@ -0,0 +1,201 @@
+package sqlserverFlexAlphaFlavor
+
+import (
+ "context"
+ "fmt"
+
+ sqlserverflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/sqlserverflexalpha"
+)
+
+type flavorsClient interface {
+ GetFlavorsRequestExecute(
+ ctx context.Context,
+ projectId, region string,
+ page, size *int64,
+ sort *sqlserverflex.FlavorSort,
+ ) (*sqlserverflex.GetFlavorsResponse, error)
+}
+
+//func loadFlavorId(ctx context.Context, client flavorsClient, model *Model, flavor *flavorModel, storage *storageModel) error {
+// if model == nil {
+// return fmt.Errorf("nil model")
+// }
+// if flavor == nil {
+// return fmt.Errorf("nil flavor")
+// }
+// cpu := flavor.CPU.ValueInt64()
+// if cpu == 0 {
+// return fmt.Errorf("nil CPU")
+// }
+// ram := flavor.RAM.ValueInt64()
+// if ram == 0 {
+// return fmt.Errorf("nil RAM")
+// }
+//
+// nodeType := flavor.NodeType.ValueString()
+// if nodeType == "" {
+// if model.Replicas.IsNull() || model.Replicas.IsUnknown() {
+// return fmt.Errorf("nil NodeType")
+// }
+// switch model.Replicas.ValueInt64() {
+// case 1:
+// nodeType = "Single"
+// case 3:
+// nodeType = "Replica"
+// default:
+// return fmt.Errorf("unknown Replicas value: %d", model.Replicas.ValueInt64())
+// }
+// }
+//
+// storageClass := conversion.StringValueToPointer(storage.Class)
+// if storageClass == nil {
+// return fmt.Errorf("nil StorageClass")
+// }
+// storageSize := conversion.Int64ValueToPointer(storage.Size)
+// if storageSize == nil {
+// return fmt.Errorf("nil StorageSize")
+// }
+//
+// projectId := model.ProjectId.ValueString()
+// region := model.Region.ValueString()
+//
+// flavorList, err := getAllFlavors(ctx, client, projectId, region)
+// if err != nil {
+// return err
+// }
+//
+// avl := ""
+// foundFlavorCount := 0
+// var foundFlavors []string
+// for _, f := range flavorList {
+// if f.Id == nil || f.Cpu == nil || f.Memory == nil {
+// continue
+// }
+// if !strings.EqualFold(*f.NodeType, nodeType) {
+// continue
+// }
+// if *f.Cpu == cpu && *f.Memory == ram {
+// var useSc *sqlserverflex.FlavorStorageClassesStorageClass
+// for _, sc := range *f.StorageClasses {
+// if *sc.Class != *storageClass {
+// continue
+// }
+// if *storageSize < *f.MinGB || *storageSize > *f.MaxGB {
+// return fmt.Errorf("storage size %d out of bounds (min: %d - max: %d)", *storageSize, *f.MinGB, *f.MaxGB)
+// }
+// useSc = &sc
+// }
+// if useSc == nil {
+// return fmt.Errorf("no storage class found for %s", *storageClass)
+// }
+//
+// flavor.Id = types.StringValue(*f.Id)
+// flavor.Description = types.StringValue(*f.Description)
+// foundFlavors = append(foundFlavors, fmt.Sprintf("%s (%d/%d - %s)", *f.Id, *f.Cpu, *f.Memory, *f.NodeType))
+// foundFlavorCount++
+// }
+// for _, cls := range *f.StorageClasses {
+// avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM, storage %s (min: %d - max: %d)", avl, *f.Cpu, *f.Memory, *cls.Class, *f.MinGB, *f.MaxGB)
+// }
+// }
+// if foundFlavorCount > 1 {
+// return fmt.Errorf(
+// "number of flavors returned: %d\nmultiple flavors found: %d flavors\n %s",
+// len(flavorList),
+// foundFlavorCount,
+// strings.Join(foundFlavors, "\n "),
+// )
+// }
+// if flavor.Id.ValueString() == "" {
+// return fmt.Errorf("couldn't find flavor, available specs are:%s", avl)
+// }
+//
+// return nil
+//}
+
+func getAllFlavors(ctx context.Context, client flavorsClient, projectId, region string) ([]sqlserverflex.ListFlavors, error) {
+ if projectId == "" || region == "" {
+ return nil, fmt.Errorf("listing sqlserverflex flavors: projectId and region are required")
+ }
+ var flavorList []sqlserverflex.ListFlavors
+
+ page := int64(1)
+ size := int64(10)
+ sort := sqlserverflex.FLAVORSORT_INDEX_ASC
+ counter := 0
+ for {
+ res, err := client.GetFlavorsRequestExecute(ctx, projectId, region, &page, &size, &sort)
+ if err != nil {
+ return nil, fmt.Errorf("listing sqlserverflex flavors: %w", err)
+ }
+ if res.Flavors == nil {
+ return nil, fmt.Errorf("finding flavors for project %s", projectId)
+ }
+ pagination := res.GetPagination()
+ flavors := res.GetFlavors()
+ flavorList = append(flavorList, flavors...)
+
+ if *pagination.TotalRows < int64(len(flavorList)) {
+ return nil, fmt.Errorf("total rows is smaller than current accumulated list - that should not happen")
+ }
+ if *pagination.TotalRows == int64(len(flavorList)) {
+ break
+ }
+ page++
+
+ if page > *pagination.TotalPages {
+ break
+ }
+
+ // implement a breakpoint
+ counter++
+ if counter > 1000 {
+ panic("too many pagination results")
+ }
+ }
+ return flavorList, nil
+}
+
+//func getFlavorModelById(ctx context.Context, client flavorsClient, model *Model, flavor *flavorModel) error {
+// if model == nil {
+// return fmt.Errorf("nil model")
+// }
+// if flavor == nil {
+// return fmt.Errorf("nil flavor")
+// }
+// id := conversion.StringValueToPointer(flavor.Id)
+// if id == nil {
+// return fmt.Errorf("nil flavor ID")
+// }
+//
+// flavor.Id = types.StringValue("")
+//
+// projectId := model.ProjectId.ValueString()
+// region := model.Region.ValueString()
+//
+// flavorList, err := getAllFlavors(ctx, client, projectId, region)
+// if err != nil {
+// return err
+// }
+//
+// avl := ""
+// for _, f := range flavorList {
+// if f.Id == nil || f.Cpu == nil || f.Memory == nil {
+// continue
+// }
+// if *f.Id == *id {
+// flavor.Id = types.StringValue(*f.Id)
+// flavor.Description = types.StringValue(*f.Description)
+// flavor.CPU = types.Int64Value(*f.Cpu)
+// flavor.RAM = types.Int64Value(*f.Memory)
+// flavor.NodeType = types.StringValue(*f.NodeType)
+// break
+// }
+// avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Memory)
+// }
+// if flavor.Id.ValueString() == "" {
+// return fmt.Errorf("couldn't find flavor, available specs are: %s", avl)
+// }
+//
+// return nil
+//}
diff --git a/stackit/internal/services/sqlserverflexalpha/flavor/list_datasource.go.bak b/stackit/internal/services/sqlserverflexalpha/flavor/list_datasource.go.bak
new file mode 100644
index 00000000..9c035504
--- /dev/null
+++ b/stackit/internal/services/sqlserverflexalpha/flavor/list_datasource.go.bak
@@ -0,0 +1,79 @@
+package postgresFlexAlphaFlavor
+
+import (
+ "context"
+
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/postgresflexalpha"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha"
+ postgresflexUtils "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/core"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ datasource.DataSource = &flavorListDataSource{}
+)
+
+// NewFlavorListDataSource is a helper function to simplify the provider implementation.
+func NewFlavorListDataSource() datasource.DataSource {
+ return &flavorListDataSource{}
+}
+
+// flavorDataSource is the data source implementation.
+type flavorListDataSource struct {
+ client *postgresflexalpha.APIClient
+ providerData core.ProviderData
+}
+
+// Metadata returns the data source type name.
+func (r *flavorListDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_postgresflexalpha_flavorlist"
+}
+
+// Configure adds the provider configured client to the data source.
+func (r *flavorListDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ var ok bool
+ r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics)
+ if !ok {
+ return
+ }
+
+ apiClient := postgresflexUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ r.client = apiClient
+ tflog.Info(ctx, "Postgres Flex flavors client configured")
+}
+
+func (r *flavorListDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = postgresflex.FlavorDataSourceSchema(ctx)
+}
+
+func (r *flavorListDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var model postgresflex.FlavorModel
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ ctx = core.InitProviderContext(ctx)
+
+ projectId := model.ProjectId.ValueString()
+ region := r.providerData.GetRegionWithOverride(model.Region)
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "region", region)
+
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "Postgres Flex flavors read")
+}
diff --git a/stackit/internal/services/sqlserverflexalpha/flavor_data_source_gen.go b/stackit/internal/services/sqlserverflexalpha/flavor_data_source_gen.go
new file mode 100644
index 00000000..d9e87331
--- /dev/null
+++ b/stackit/internal/services/sqlserverflexalpha/flavor_data_source_gen.go
@@ -0,0 +1,1940 @@
+// Code generated by terraform-plugin-framework-generator DO NOT EDIT.
+
+package sqlserverflex
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-go/tftypes"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+)
+
+func FlavorDataSourceSchema(ctx context.Context) schema.Schema {
+ return schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "cpu": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "ram": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "node_type": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "storage_class": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ //Description: "The cpu count of the instance.",
+ //MarkdownDescription: "The cpu count of the instance.",
+ },
+ "flavors": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "cpu": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "The cpu count of the instance.",
+ MarkdownDescription: "The cpu count of the instance.",
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ Description: "The flavor description.",
+ MarkdownDescription: "The flavor description.",
+ },
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The id of the instance flavor.",
+ MarkdownDescription: "The id of the instance flavor.",
+ },
+ "max_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ },
+ "memory": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "The memory of the instance in Gibibyte.",
+ MarkdownDescription: "The memory of the instance in Gibibyte.",
+ },
+ "min_gb": schema.Int64Attribute{
+ Computed: true,
+ Description: "minimum storage which is required to order in Gigabyte.",
+ MarkdownDescription: "minimum storage which is required to order in Gigabyte.",
+ },
+ "node_type": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "defines the nodeType it can be either single or replica",
+ MarkdownDescription: "defines the nodeType it can be either single or replica",
+ },
+ "storage_classes": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "class": schema.StringAttribute{
+ Computed: true,
+ },
+ "max_io_per_sec": schema.Int64Attribute{
+ Computed: true,
+ },
+ "max_through_in_mb": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ CustomType: StorageClassesType{
+ ObjectType: types.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ },
+ Computed: true,
+ Description: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.",
+ },
+ },
+ CustomType: FlavorsType{
+ ObjectType: types.ObjectType{
+ AttrTypes: FlavorsValue{}.AttributeTypes(ctx),
+ },
+ },
+ },
+ Computed: true,
+ Description: "List of flavors available for the project.",
+ MarkdownDescription: "List of flavors available for the project.",
+ },
+ "page": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "Number of the page of items list to be returned.",
+ MarkdownDescription: "Number of the page of items list to be returned.",
+ },
+ "pagination": schema.SingleNestedAttribute{
+ Attributes: map[string]schema.Attribute{
+ "page": schema.Int64Attribute{
+ Computed: true,
+ },
+ "size": schema.Int64Attribute{
+ Computed: true,
+ },
+ "sort": schema.StringAttribute{
+ Computed: true,
+ },
+ "total_pages": schema.Int64Attribute{
+ Computed: true,
+ },
+ "total_rows": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ CustomType: PaginationType{
+ ObjectType: types.ObjectType{
+ AttrTypes: PaginationValue{}.AttributeTypes(ctx),
+ },
+ },
+ Computed: true,
+ },
+ "project_id": schema.StringAttribute{
+ Required: true,
+ Description: "The STACKIT project ID.",
+ MarkdownDescription: "The STACKIT project ID.",
+ },
+ "region": schema.StringAttribute{
+ Required: true,
+ Description: "The region which should be addressed",
+ MarkdownDescription: "The region which should be addressed",
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ "eu01",
+ ),
+ },
+ },
+ "size": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "Number of items to be returned on each page.",
+ MarkdownDescription: "Number of items to be returned on each page.",
+ },
+ "sort": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "Sorting of the flavors to be returned on each page.",
+ MarkdownDescription: "Sorting of the flavors to be returned on each page.",
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ "index.desc",
+ "index.asc",
+ "cpu.desc",
+ "cpu.asc",
+ "flavor_description.asc",
+ "flavor_description.desc",
+ "id.desc",
+ "id.asc",
+ "size_max.desc",
+ "size_max.asc",
+ "ram.desc",
+ "ram.asc",
+ "size_min.desc",
+ "size_min.asc",
+ "storage_class.asc",
+ "storage_class.desc",
+ "node_type.asc",
+ "node_type.desc",
+ ),
+ },
+ },
+ },
+ }
+}
+
+type FlavorsModel struct {
+ Cpu types.Int64 `tfsdk:"cpu"`
+ Ram types.Int64 `tfsdk:"ram"`
+ NodeType types.String `tfsdk:"node_type"`
+ Flavors types.List `tfsdk:"flavors"`
+ Page types.Int64 `tfsdk:"page"`
+ Pagination PaginationValue `tfsdk:"pagination"`
+ ProjectId types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ Size types.Int64 `tfsdk:"size"`
+ Sort types.String `tfsdk:"sort"`
+}
+
+var _ basetypes.ObjectTypable = FlavorsType{}
+
+type FlavorsType struct {
+ basetypes.ObjectType
+}
+
+func (t FlavorsType) Equal(o attr.Type) bool {
+ other, ok := o.(FlavorsType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t FlavorsType) String() string {
+ return "FlavorsType"
+}
+
+func (t FlavorsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ cpuAttribute, ok := attributes["cpu"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `cpu is missing from object`)
+
+ return nil, diags
+ }
+
+ cpuVal, ok := cpuAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute))
+ }
+
+ descriptionAttribute, ok := attributes["description"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `description is missing from object`)
+
+ return nil, diags
+ }
+
+ descriptionVal, ok := descriptionAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute))
+ }
+
+ idAttribute, ok := attributes["id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `id is missing from object`)
+
+ return nil, diags
+ }
+
+ idVal, ok := idAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
+ }
+
+ maxGbAttribute, ok := attributes["max_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_gb is missing from object`)
+
+ return nil, diags
+ }
+
+ maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute))
+ }
+
+ memoryAttribute, ok := attributes["memory"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `memory is missing from object`)
+
+ return nil, diags
+ }
+
+ memoryVal, ok := memoryAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute))
+ }
+
+ minGbAttribute, ok := attributes["min_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `min_gb is missing from object`)
+
+ return nil, diags
+ }
+
+ minGbVal, ok := minGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute))
+ }
+
+ nodeTypeAttribute, ok := attributes["node_type"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `node_type is missing from object`)
+
+ return nil, diags
+ }
+
+ nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute))
+ }
+
+ storageClassesAttribute, ok := attributes["storage_classes"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `storage_classes is missing from object`)
+
+ return nil, diags
+ }
+
+ storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return FlavorsValue{
+ Cpu: cpuVal,
+ Description: descriptionVal,
+ Id: idVal,
+ MaxGb: maxGbVal,
+ Memory: memoryVal,
+ MinGb: minGbVal,
+ NodeType: nodeTypeVal,
+ StorageClasses: storageClassesVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewFlavorsValueNull() FlavorsValue {
+ return FlavorsValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewFlavorsValueUnknown() FlavorsValue {
+ return FlavorsValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewFlavorsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (FlavorsValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing FlavorsValue Attribute Value",
+ "While creating a FlavorsValue value, a missing attribute value was detected. "+
+ "A FlavorsValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid FlavorsValue Attribute Type",
+ "While creating a FlavorsValue value, an invalid attribute value was detected. "+
+ "A FlavorsValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("FlavorsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra FlavorsValue Attribute Value",
+ "While creating a FlavorsValue value, an extra attribute value was detected. "+
+ "A FlavorsValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra FlavorsValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ cpuAttribute, ok := attributes["cpu"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `cpu is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ cpuVal, ok := cpuAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute))
+ }
+
+ descriptionAttribute, ok := attributes["description"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `description is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ descriptionVal, ok := descriptionAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute))
+ }
+
+ idAttribute, ok := attributes["id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `id is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ idVal, ok := idAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
+ }
+
+ maxGbAttribute, ok := attributes["max_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_gb is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute))
+ }
+
+ memoryAttribute, ok := attributes["memory"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `memory is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ memoryVal, ok := memoryAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute))
+ }
+
+ minGbAttribute, ok := attributes["min_gb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `min_gb is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ minGbVal, ok := minGbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute))
+ }
+
+ nodeTypeAttribute, ok := attributes["node_type"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `node_type is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute))
+ }
+
+ storageClassesAttribute, ok := attributes["storage_classes"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `storage_classes is missing from object`)
+
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute))
+ }
+
+ if diags.HasError() {
+ return NewFlavorsValueUnknown(), diags
+ }
+
+ return FlavorsValue{
+ Cpu: cpuVal,
+ Description: descriptionVal,
+ Id: idVal,
+ MaxGb: maxGbVal,
+ Memory: memoryVal,
+ MinGb: minGbVal,
+ NodeType: nodeTypeVal,
+ StorageClasses: storageClassesVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewFlavorsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) FlavorsValue {
+ object, diags := NewFlavorsValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewFlavorsValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t FlavorsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewFlavorsValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewFlavorsValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewFlavorsValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewFlavorsValueMust(FlavorsValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t FlavorsType) ValueType(ctx context.Context) attr.Value {
+ return FlavorsValue{}
+}
+
+var _ basetypes.ObjectValuable = FlavorsValue{}
+
+type FlavorsValue struct {
+ Cpu basetypes.Int64Value `tfsdk:"cpu"`
+ Description basetypes.StringValue `tfsdk:"description"`
+ Id basetypes.StringValue `tfsdk:"id"`
+ MaxGb basetypes.Int64Value `tfsdk:"max_gb"`
+ Memory basetypes.Int64Value `tfsdk:"memory"`
+ MinGb basetypes.Int64Value `tfsdk:"min_gb"`
+ NodeType basetypes.StringValue `tfsdk:"node_type"`
+ StorageClasses basetypes.ListValue `tfsdk:"storage_classes"`
+ state attr.ValueState
+}
+
+func (v FlavorsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 8)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["cpu"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["description"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["max_gb"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["memory"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["min_gb"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["node_type"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["storage_classes"] = basetypes.ListType{
+ ElemType: StorageClassesValue{}.Type(ctx),
+ }.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 8)
+
+ val, err = v.Cpu.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["cpu"] = val
+
+ val, err = v.Description.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["description"] = val
+
+ val, err = v.Id.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["id"] = val
+
+ val, err = v.MaxGb.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["max_gb"] = val
+
+ val, err = v.Memory.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["memory"] = val
+
+ val, err = v.MinGb.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["min_gb"] = val
+
+ val, err = v.NodeType.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["node_type"] = val
+
+ val, err = v.StorageClasses.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["storage_classes"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v FlavorsValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v FlavorsValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v FlavorsValue) String() string {
+ return "FlavorsValue"
+}
+
+func (v FlavorsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ storageClasses := types.ListValueMust(
+ StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ v.StorageClasses.Elements(),
+ )
+
+ if v.StorageClasses.IsNull() {
+ storageClasses = types.ListNull(
+ StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ )
+ }
+
+ if v.StorageClasses.IsUnknown() {
+ storageClasses = types.ListUnknown(
+ StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: StorageClassesValue{}.AttributeTypes(ctx),
+ },
+ },
+ )
+ }
+
+ attributeTypes := map[string]attr.Type{
+ "cpu": basetypes.Int64Type{},
+ "description": basetypes.StringType{},
+ "id": basetypes.StringType{},
+ "max_gb": basetypes.Int64Type{},
+ "memory": basetypes.Int64Type{},
+ "min_gb": basetypes.Int64Type{},
+ "node_type": basetypes.StringType{},
+ "storage_classes": basetypes.ListType{
+ ElemType: StorageClassesValue{}.Type(ctx),
+ },
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "cpu": v.Cpu,
+ "description": v.Description,
+ "id": v.Id,
+ "max_gb": v.MaxGb,
+ "memory": v.Memory,
+ "min_gb": v.MinGb,
+ "node_type": v.NodeType,
+ "storage_classes": storageClasses,
+ })
+
+ return objVal, diags
+}
+
+func (v FlavorsValue) Equal(o attr.Value) bool {
+ other, ok := o.(FlavorsValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Cpu.Equal(other.Cpu) {
+ return false
+ }
+
+ if !v.Description.Equal(other.Description) {
+ return false
+ }
+
+ if !v.Id.Equal(other.Id) {
+ return false
+ }
+
+ if !v.MaxGb.Equal(other.MaxGb) {
+ return false
+ }
+
+ if !v.Memory.Equal(other.Memory) {
+ return false
+ }
+
+ if !v.MinGb.Equal(other.MinGb) {
+ return false
+ }
+
+ if !v.NodeType.Equal(other.NodeType) {
+ return false
+ }
+
+ if !v.StorageClasses.Equal(other.StorageClasses) {
+ return false
+ }
+
+ return true
+}
+
+func (v FlavorsValue) Type(ctx context.Context) attr.Type {
+ return FlavorsType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v FlavorsValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "cpu": basetypes.Int64Type{},
+ "description": basetypes.StringType{},
+ "id": basetypes.StringType{},
+ "max_gb": basetypes.Int64Type{},
+ "memory": basetypes.Int64Type{},
+ "min_gb": basetypes.Int64Type{},
+ "node_type": basetypes.StringType{},
+ "storage_classes": basetypes.ListType{
+ ElemType: StorageClassesValue{}.Type(ctx),
+ },
+ }
+}
+
+var _ basetypes.ObjectTypable = StorageClassesType{}
+
+type StorageClassesType struct {
+ basetypes.ObjectType
+}
+
+func (t StorageClassesType) Equal(o attr.Type) bool {
+ other, ok := o.(StorageClassesType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t StorageClassesType) String() string {
+ return "StorageClassesType"
+}
+
+func (t StorageClassesType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ classAttribute, ok := attributes["class"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `class is missing from object`)
+
+ return nil, diags
+ }
+
+ classVal, ok := classAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute))
+ }
+
+ maxIoPerSecAttribute, ok := attributes["max_io_per_sec"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_io_per_sec is missing from object`)
+
+ return nil, diags
+ }
+
+ maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute))
+ }
+
+ maxThroughInMbAttribute, ok := attributes["max_through_in_mb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_through_in_mb is missing from object`)
+
+ return nil, diags
+ }
+
+ maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return StorageClassesValue{
+ Class: classVal,
+ MaxIoPerSec: maxIoPerSecVal,
+ MaxThroughInMb: maxThroughInMbVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewStorageClassesValueNull() StorageClassesValue {
+ return StorageClassesValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewStorageClassesValueUnknown() StorageClassesValue {
+ return StorageClassesValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewStorageClassesValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (StorageClassesValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing StorageClassesValue Attribute Value",
+ "While creating a StorageClassesValue value, a missing attribute value was detected. "+
+ "A StorageClassesValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid StorageClassesValue Attribute Type",
+ "While creating a StorageClassesValue value, an invalid attribute value was detected. "+
+ "A StorageClassesValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("StorageClassesValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra StorageClassesValue Attribute Value",
+ "While creating a StorageClassesValue value, an extra attribute value was detected. "+
+ "A StorageClassesValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra StorageClassesValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ classAttribute, ok := attributes["class"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `class is missing from object`)
+
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ classVal, ok := classAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute))
+ }
+
+ maxIoPerSecAttribute, ok := attributes["max_io_per_sec"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_io_per_sec is missing from object`)
+
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute))
+ }
+
+ maxThroughInMbAttribute, ok := attributes["max_through_in_mb"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `max_through_in_mb is missing from object`)
+
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute))
+ }
+
+ if diags.HasError() {
+ return NewStorageClassesValueUnknown(), diags
+ }
+
+ return StorageClassesValue{
+ Class: classVal,
+ MaxIoPerSec: maxIoPerSecVal,
+ MaxThroughInMb: maxThroughInMbVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewStorageClassesValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) StorageClassesValue {
+ object, diags := NewStorageClassesValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewStorageClassesValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t StorageClassesType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewStorageClassesValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewStorageClassesValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewStorageClassesValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewStorageClassesValueMust(StorageClassesValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t StorageClassesType) ValueType(ctx context.Context) attr.Value {
+ return StorageClassesValue{}
+}
+
+var _ basetypes.ObjectValuable = StorageClassesValue{}
+
+type StorageClassesValue struct {
+ Class basetypes.StringValue `tfsdk:"class"`
+ MaxIoPerSec basetypes.Int64Value `tfsdk:"max_io_per_sec"`
+ MaxThroughInMb basetypes.Int64Value `tfsdk:"max_through_in_mb"`
+ state attr.ValueState
+}
+
+func (v StorageClassesValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 3)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["class"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["max_io_per_sec"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["max_through_in_mb"] = basetypes.Int64Type{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 3)
+
+ val, err = v.Class.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["class"] = val
+
+ val, err = v.MaxIoPerSec.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["max_io_per_sec"] = val
+
+ val, err = v.MaxThroughInMb.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["max_through_in_mb"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v StorageClassesValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v StorageClassesValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v StorageClassesValue) String() string {
+ return "StorageClassesValue"
+}
+
+func (v StorageClassesValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "class": basetypes.StringType{},
+ "max_io_per_sec": basetypes.Int64Type{},
+ "max_through_in_mb": basetypes.Int64Type{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "class": v.Class,
+ "max_io_per_sec": v.MaxIoPerSec,
+ "max_through_in_mb": v.MaxThroughInMb,
+ })
+
+ return objVal, diags
+}
+
+func (v StorageClassesValue) Equal(o attr.Value) bool {
+ other, ok := o.(StorageClassesValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Class.Equal(other.Class) {
+ return false
+ }
+
+ if !v.MaxIoPerSec.Equal(other.MaxIoPerSec) {
+ return false
+ }
+
+ if !v.MaxThroughInMb.Equal(other.MaxThroughInMb) {
+ return false
+ }
+
+ return true
+}
+
+func (v StorageClassesValue) Type(ctx context.Context) attr.Type {
+ return StorageClassesType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v StorageClassesValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "class": basetypes.StringType{},
+ "max_io_per_sec": basetypes.Int64Type{},
+ "max_through_in_mb": basetypes.Int64Type{},
+ }
+}
+
+var _ basetypes.ObjectTypable = PaginationType{}
+
+type PaginationType struct {
+ basetypes.ObjectType
+}
+
+func (t PaginationType) Equal(o attr.Type) bool {
+ other, ok := o.(PaginationType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t PaginationType) String() string {
+ return "PaginationType"
+}
+
+func (t PaginationType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ pageAttribute, ok := attributes["page"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `page is missing from object`)
+
+ return nil, diags
+ }
+
+ pageVal, ok := pageAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return nil, diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ sortAttribute, ok := attributes["sort"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `sort is missing from object`)
+
+ return nil, diags
+ }
+
+ sortVal, ok := sortAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute))
+ }
+
+ totalPagesAttribute, ok := attributes["total_pages"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_pages is missing from object`)
+
+ return nil, diags
+ }
+
+ totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute))
+ }
+
+ totalRowsAttribute, ok := attributes["total_rows"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_rows is missing from object`)
+
+ return nil, diags
+ }
+
+ totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return PaginationValue{
+ Page: pageVal,
+ Size: sizeVal,
+ Sort: sortVal,
+ TotalPages: totalPagesVal,
+ TotalRows: totalRowsVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewPaginationValueNull() PaginationValue {
+ return PaginationValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewPaginationValueUnknown() PaginationValue {
+ return PaginationValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewPaginationValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (PaginationValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing PaginationValue Attribute Value",
+ "While creating a PaginationValue value, a missing attribute value was detected. "+
+ "A PaginationValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid PaginationValue Attribute Type",
+ "While creating a PaginationValue value, an invalid attribute value was detected. "+
+ "A PaginationValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra PaginationValue Attribute Value",
+ "While creating a PaginationValue value, an extra attribute value was detected. "+
+ "A PaginationValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra PaginationValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewPaginationValueUnknown(), diags
+ }
+
+ pageAttribute, ok := attributes["page"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `page is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ pageVal, ok := pageAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ sortAttribute, ok := attributes["sort"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `sort is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ sortVal, ok := sortAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute))
+ }
+
+ totalPagesAttribute, ok := attributes["total_pages"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_pages is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute))
+ }
+
+ totalRowsAttribute, ok := attributes["total_rows"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_rows is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute))
+ }
+
+ if diags.HasError() {
+ return NewPaginationValueUnknown(), diags
+ }
+
+ return PaginationValue{
+ Page: pageVal,
+ Size: sizeVal,
+ Sort: sortVal,
+ TotalPages: totalPagesVal,
+ TotalRows: totalRowsVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewPaginationValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) PaginationValue {
+ object, diags := NewPaginationValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewPaginationValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t PaginationType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewPaginationValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewPaginationValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewPaginationValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewPaginationValueMust(PaginationValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t PaginationType) ValueType(ctx context.Context) attr.Value {
+ return PaginationValue{}
+}
+
+var _ basetypes.ObjectValuable = PaginationValue{}
+
+type PaginationValue struct {
+ Page basetypes.Int64Value `tfsdk:"page"`
+ Size basetypes.Int64Value `tfsdk:"size"`
+ Sort basetypes.StringValue `tfsdk:"sort"`
+ TotalPages basetypes.Int64Value `tfsdk:"total_pages"`
+ TotalRows basetypes.Int64Value `tfsdk:"total_rows"`
+ state attr.ValueState
+}
+
+func (v PaginationValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 5)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["page"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["size"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["sort"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["total_pages"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["total_rows"] = basetypes.Int64Type{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 5)
+
+ val, err = v.Page.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["page"] = val
+
+ val, err = v.Size.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["size"] = val
+
+ val, err = v.Sort.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["sort"] = val
+
+ val, err = v.TotalPages.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["total_pages"] = val
+
+ val, err = v.TotalRows.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["total_rows"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v PaginationValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v PaginationValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v PaginationValue) String() string {
+ return "PaginationValue"
+}
+
+func (v PaginationValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "page": basetypes.Int64Type{},
+ "size": basetypes.Int64Type{},
+ "sort": basetypes.StringType{},
+ "total_pages": basetypes.Int64Type{},
+ "total_rows": basetypes.Int64Type{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "page": v.Page,
+ "size": v.Size,
+ "sort": v.Sort,
+ "total_pages": v.TotalPages,
+ "total_rows": v.TotalRows,
+ })
+
+ return objVal, diags
+}
+
+func (v PaginationValue) Equal(o attr.Value) bool {
+ other, ok := o.(PaginationValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Page.Equal(other.Page) {
+ return false
+ }
+
+ if !v.Size.Equal(other.Size) {
+ return false
+ }
+
+ if !v.Sort.Equal(other.Sort) {
+ return false
+ }
+
+ if !v.TotalPages.Equal(other.TotalPages) {
+ return false
+ }
+
+ if !v.TotalRows.Equal(other.TotalRows) {
+ return false
+ }
+
+ return true
+}
+
+func (v PaginationValue) Type(ctx context.Context) attr.Type {
+ return PaginationType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v PaginationValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "page": basetypes.Int64Type{},
+ "size": basetypes.Int64Type{},
+ "sort": basetypes.StringType{},
+ "total_pages": basetypes.Int64Type{},
+ "total_rows": basetypes.Int64Type{},
+ }
+}
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/datasource.go b/stackit/internal/services/sqlserverflexalpha/instance/datasource.go
index 67ddf8a6..a916cae2 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/datasource.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/datasource.go
@@ -253,22 +253,6 @@ func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadReques
ctx = core.LogResponse(ctx)
- var flavor = &flavorModel{}
- if model.Flavor.IsNull() || model.Flavor.IsUnknown() {
- flavor.Id = types.StringValue(*instanceResp.FlavorId)
- if flavor.Id.IsNull() || flavor.Id.IsUnknown() || flavor.Id.String() == "" {
- panic("WTF FlavorId can not be null or empty string")
- }
- err = getFlavorModelById(ctx, r.client, &model, flavor)
- if err != nil {
- resp.Diagnostics.AddError(err.Error(), err.Error())
- return
- }
- if flavor.CPU.IsNull() || flavor.CPU.IsUnknown() || flavor.CPU.String() == "" {
- panic("WTF FlavorId can not be null or empty string")
- }
- }
-
var storage = &storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
@@ -296,7 +280,7 @@ func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadReques
}
}
- err = mapFields(ctx, instanceResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
return
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/functions.go b/stackit/internal/services/sqlserverflexalpha/instance/functions.go
index c5195381..11522db9 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/functions.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/functions.go
@@ -4,11 +4,9 @@ import (
"context"
"fmt"
"math"
- "strings"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/types"
- "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
sqlserverflex "github.com/mhenselin/terraform-provider-stackitprivatepreview/pkg/sqlserverflexalpha"
"github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
"github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/core"
@@ -19,7 +17,7 @@ type sqlserverflexClient interface {
GetFlavorsRequestExecute(ctx context.Context, projectId, region string, page, size *int64, sort *sqlserverflex.FlavorSort) (*sqlserverflex.GetFlavorsResponse, error)
}
-func mapFields(ctx context.Context, resp *sqlserverflex.GetInstanceResponse, model *Model, flavor *flavorModel, storage *storageModel, encryption *encryptionModel, network *networkModel, region string) error {
+func mapFields(ctx context.Context, resp *sqlserverflex.GetInstanceResponse, model *Model, storage *storageModel, encryption *encryptionModel, network *networkModel, region string) error {
if resp == nil {
return fmt.Errorf("response input is nil")
}
@@ -37,26 +35,6 @@ func mapFields(ctx context.Context, resp *sqlserverflex.GetInstanceResponse, mod
return fmt.Errorf("instance id not present")
}
- var flavorValues map[string]attr.Value
- if instance.FlavorId == nil {
- return fmt.Errorf("instance has no flavor id")
- }
- if *instance.FlavorId != flavor.Id.ValueString() {
- return fmt.Errorf("instance has different flavor id %s - %s", *instance.FlavorId, flavor.Id.ValueString())
- }
-
- flavorValues = map[string]attr.Value{
- "id": flavor.Id,
- "description": flavor.Description,
- "cpu": flavor.CPU,
- "ram": flavor.RAM,
- "node_type": flavor.NodeType,
- }
- flavorObject, diags := types.ObjectValue(flavorTypes, flavorValues)
- if diags.HasError() {
- return fmt.Errorf("creating flavor: %w", core.DiagsToError(diags))
- }
-
var storageValues map[string]attr.Value
if instance.Storage == nil {
storageValues = map[string]attr.Value{
@@ -163,7 +141,7 @@ func mapFields(ctx context.Context, resp *sqlserverflex.GetInstanceResponse, mod
model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, instanceId)
model.InstanceId = types.StringValue(instanceId)
model.Name = types.StringPointerValue(instance.Name)
- model.Flavor = flavorObject
+ model.FlavorId = types.StringPointerValue(instance.FlavorId)
model.Replicas = types.Int64Value(int64(*instance.Replicas))
model.Storage = storageObject
model.Version = types.StringValue(string(*instance.Version))
@@ -182,10 +160,6 @@ func toCreatePayload(model *Model, storage *storageModel, encryption *encryption
return nil, fmt.Errorf("nil model")
}
- if model.Flavor.IsNull() || model.Flavor.IsUnknown() {
- return nil, fmt.Errorf("nil flavor")
- }
-
storagePayload := &sqlserverflex.CreateInstanceRequestPayloadGetStorageArgType{}
if storage != nil {
storagePayload.Class = conversion.StringValueToPointer(storage.Class)
@@ -200,16 +174,6 @@ func toCreatePayload(model *Model, storage *storageModel, encryption *encryption
encryptionPayload.ServiceAccount = conversion.StringValueToPointer(encryption.ServiceAccount)
}
- flavorId := ""
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- modelValues := model.Flavor.Attributes()
- if _, ok := modelValues["id"]; !ok {
- return nil, fmt.Errorf("flavor has not yet been created")
- }
- // TODO - how to get rid of that trim?
- flavorId = strings.Trim(modelValues["id"].String(), "\"")
- }
-
var aclElements []string
if network != nil && !network.ACL.IsNull() && !network.ACL.IsUnknown() {
aclElements = make([]string, 0, len(network.ACL.Elements()))
@@ -227,31 +191,20 @@ func toCreatePayload(model *Model, storage *storageModel, encryption *encryption
return &sqlserverflex.CreateInstanceRequestPayload{
BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
- FlavorId: &flavorId,
+ Encryption: encryptionPayload,
+ FlavorId: conversion.StringValueToPointer(model.FlavorId),
Name: conversion.StringValueToPointer(model.Name),
+ Network: networkPayload,
+ RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays),
Storage: storagePayload,
Version: sqlserverflex.CreateInstanceRequestPayloadGetVersionAttributeType(conversion.StringValueToPointer(model.Version)),
- Encryption: encryptionPayload,
- RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays),
- Network: networkPayload,
}, nil
}
-func toUpdatePayload(model *Model, storage *storageModel, network *networkModel) (*sqlserverflex.UpdateInstancePartiallyRequestPayload, error) {
+func toUpdatePartiallyPayload(model *Model, storage *storageModel, network *networkModel) (*sqlserverflex.UpdateInstancePartiallyRequestPayload, error) {
if model == nil {
return nil, fmt.Errorf("nil model")
}
- if model.Flavor.IsNull() || model.Flavor.IsUnknown() {
- return nil, fmt.Errorf("nil flavor")
- }
- var flavorMdl flavorModel
- diag := model.Flavor.As(context.Background(), &flavorMdl, basetypes.ObjectAsOptions{
- UnhandledNullAsEmpty: true,
- UnhandledUnknownAsEmpty: false,
- })
- if diag.HasError() {
- return nil, fmt.Errorf("flavor conversion error: %v", diag.Errors())
- }
storagePayload := &sqlserverflex.UpdateInstanceRequestPayloadGetStorageArgType{}
if storage != nil {
@@ -277,12 +230,11 @@ func toUpdatePayload(model *Model, storage *storageModel, network *networkModel)
return nil, fmt.Errorf("replica count too big: %d", model.Replicas.ValueInt64())
}
replCount := int32(model.Replicas.ValueInt64()) // nolint:gosec // check is performed above
- flavorId := flavorMdl.Id.ValueString()
return &sqlserverflex.UpdateInstancePartiallyRequestPayload{
- Network: networkPayload,
BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
- FlavorId: &flavorId,
+ FlavorId: conversion.StringValueToPointer(model.FlavorId),
Name: conversion.StringValueToPointer(model.Name),
+ Network: networkPayload,
Replicas: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetReplicasAttributeType(&replCount),
RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays),
Storage: storagePayload,
@@ -290,152 +242,15 @@ func toUpdatePayload(model *Model, storage *storageModel, network *networkModel)
}, nil
}
-func getAllFlavors(ctx context.Context, client sqlserverflexClient, projectId, region string) ([]sqlserverflex.ListFlavors, error) {
- if projectId == "" || region == "" {
- return nil, fmt.Errorf("listing sqlserverflex flavors: projectId and region are required")
- }
- var flavorList []sqlserverflex.ListFlavors
-
- page := int64(1)
- size := int64(10)
- for {
- sort := sqlserverflex.FLAVORSORT_INDEX_ASC
- res, err := client.GetFlavorsRequestExecute(ctx, projectId, region, &page, &size, &sort)
- if err != nil {
- return nil, fmt.Errorf("listing sqlserverflex flavors: %w", err)
- }
- if res.Flavors == nil {
- return nil, fmt.Errorf("finding flavors for project %s", projectId)
- }
- pagination := res.GetPagination()
- flavorList = append(flavorList, *res.Flavors...)
-
- if *pagination.TotalRows == int64(len(flavorList)) {
- break
- }
- page++
- }
- return flavorList, nil
-}
-
-func loadFlavorId(ctx context.Context, client sqlserverflexClient, model *Model, flavor *flavorModel, storage *storageModel) error {
- if model == nil {
- return fmt.Errorf("nil model")
- }
- if flavor == nil {
- return fmt.Errorf("nil flavor")
- }
- cpu := conversion.Int64ValueToPointer(flavor.CPU)
- if cpu == nil {
- return fmt.Errorf("nil CPU")
- }
- ram := conversion.Int64ValueToPointer(flavor.RAM)
- if ram == nil {
- return fmt.Errorf("nil RAM")
- }
- nodeType := conversion.StringValueToPointer(flavor.NodeType)
- if nodeType == nil {
- return fmt.Errorf("nil NodeType")
- }
- storageClass := conversion.StringValueToPointer(storage.Class)
- if storageClass == nil {
- return fmt.Errorf("nil StorageClass")
- }
- storageSize := conversion.Int64ValueToPointer(storage.Size)
- if storageSize == nil {
- return fmt.Errorf("nil StorageSize")
- }
-
- projectId := model.ProjectId.ValueString()
- region := model.Region.ValueString()
-
- flavorList, err := getAllFlavors(ctx, client, projectId, region)
- if err != nil {
- return err
- }
-
- avl := ""
- foundFlavorCount := 0
- for _, f := range flavorList {
- if f.Id == nil || f.Cpu == nil || f.Memory == nil {
- continue
- }
- if !strings.EqualFold(*f.NodeType, *nodeType) {
- continue
- }
- if *f.Cpu == *cpu && *f.Memory == *ram {
- var useSc *sqlserverflex.FlavorStorageClassesStorageClass
- for _, sc := range *f.StorageClasses {
- if *sc.Class != *storageClass {
- continue
- }
- if *storageSize < *f.MinGB || *storageSize > *f.MaxGB {
- return fmt.Errorf("storage size %d out of bounds (min: %d - max: %d)", *storageSize, *f.MinGB, *f.MaxGB)
- }
- useSc = &sc
- }
- if useSc == nil {
- return fmt.Errorf("no storage class found for %s", *storageClass)
- }
-
- flavor.Id = types.StringValue(*f.Id)
- flavor.Description = types.StringValue(*f.Description)
- foundFlavorCount++
- }
- for _, cls := range *f.StorageClasses {
- avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM, storage %s (min: %d - max: %d)", avl, *f.Cpu, *f.Memory, *cls.Class, *f.MinGB, *f.MaxGB)
- }
- }
- if foundFlavorCount > 1 {
- return fmt.Errorf("multiple flavors found: %d flavors", foundFlavorCount)
- }
- if flavor.Id.ValueString() == "" {
- return fmt.Errorf("couldn't find flavor, available specs are:%s", avl)
- }
-
- return nil
-}
-
-func getFlavorModelById(ctx context.Context, client sqlserverflexClient, model *Model, flavor *flavorModel) error {
- if model == nil {
- return fmt.Errorf("nil model")
- }
- if flavor == nil {
- return fmt.Errorf("nil flavor")
- }
- id := conversion.StringValueToPointer(flavor.Id)
- if id == nil {
- return fmt.Errorf("nil flavor ID")
- }
-
- flavor.Id = types.StringValue("")
-
- projectId := model.ProjectId.ValueString()
- region := model.Region.ValueString()
-
- flavorList, err := getAllFlavors(ctx, client, projectId, region)
- if err != nil {
- return err
- }
-
- avl := ""
- for _, f := range flavorList {
- if f.Id == nil || f.Cpu == nil || f.Memory == nil {
- continue
- }
- if *f.Id == *id {
- flavor.Id = types.StringValue(*f.Id)
- flavor.Description = types.StringValue(*f.Description)
- flavor.CPU = types.Int64Value(*f.Cpu)
- flavor.RAM = types.Int64Value(*f.Memory)
- flavor.NodeType = types.StringValue(*f.NodeType)
- break
- }
- avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Memory)
- }
- if flavor.Id.ValueString() == "" {
- return fmt.Errorf("couldn't find flavor, available specs are: %s", avl)
- }
-
- return nil
+func toUpdatePayload(model *Model, storage *storageModel, network *networkModel) (*sqlserverflex.UpdateInstanceRequestPayload, error) {
+ return &sqlserverflex.UpdateInstanceRequestPayload{
+ BackupSchedule: nil,
+ FlavorId: nil,
+ Name: nil,
+ Network: nil,
+ Replicas: nil,
+ RetentionDays: nil,
+ Storage: nil,
+ Version: nil,
+ }, nil
}
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/resource.go b/stackit/internal/services/sqlserverflexalpha/instance/resource.go
index f7d4694e..3e364ed6 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/resource.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/resource.go
@@ -10,7 +10,6 @@ import (
"strings"
"time"
- "github.com/hashicorp/terraform-plugin-framework-validators/int64validator"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier"
sqlserverflexUtils "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils"
@@ -57,7 +56,7 @@ type Model struct {
ProjectId types.String `tfsdk:"project_id"`
Name types.String `tfsdk:"name"`
BackupSchedule types.String `tfsdk:"backup_schedule"`
- Flavor types.Object `tfsdk:"flavor"`
+ FlavorId types.String `tfsdk:"flavor_id"`
Encryption types.Object `tfsdk:"encryption"`
IsDeletable types.Bool `tfsdk:"is_deletable"`
Storage types.Object `tfsdk:"storage"`
@@ -98,24 +97,6 @@ var networkTypes = map[string]attr.Type{
"router_address": basetypes.StringType{},
}
-// Struct corresponding to Model.FlavorId
-type flavorModel struct {
- Id types.String `tfsdk:"id"`
- Description types.String `tfsdk:"description"`
- CPU types.Int64 `tfsdk:"cpu"`
- RAM types.Int64 `tfsdk:"ram"`
- NodeType types.String `tfsdk:"node_type"`
-}
-
-// Types corresponding to flavorModel
-var flavorTypes = map[string]attr.Type{
- "id": basetypes.StringType{},
- "description": basetypes.StringType{},
- "cpu": basetypes.Int64Type{},
- "ram": basetypes.Int64Type{},
- "node_type": basetypes.StringType{},
-}
-
// Struct corresponding to Model.Storage
type storageModel struct {
Class types.String `tfsdk:"class"`
@@ -198,11 +179,13 @@ func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, r
"instance_id": "ID of the SQLServer Flex instance.",
"project_id": "STACKIT project ID to which the instance is associated.",
"name": "Instance name.",
- "access_scope": "The access scope of the instance. (e.g. SNA)",
+ "access_scope": "The access scope of the instance. (SNA | PUBLIC)",
+ "flavor_id": "The flavor ID of the instance.",
"acl": "The Access Control List (ACL) for the SQLServer Flex instance.",
"backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`,
"region": "The resource region. If not defined, the provider region is used.",
"encryption": "The encryption block.",
+ "replicas": "The number of replicas of the SQLServer Flex instance.",
"network": "The network block.",
"keyring_id": "STACKIT KMS - KeyRing ID of the encryption key to use.",
"key_id": "STACKIT KMS - Key ID of the encryption key to use.",
@@ -271,78 +254,12 @@ func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, r
boolplanmodifier.UseStateForUnknown(),
},
},
- // TODO - make it either flavor_id or ram, cpu and node_type
- "flavor": schema.SingleNestedAttribute{
- PlanModifiers: []planmodifier.Object{
- objectplanmodifier.RequiresReplace(),
- objectplanmodifier.UseStateForUnknown(),
+ "flavor_id": schema.StringAttribute{
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ stringplanmodifier.UseStateForUnknown(),
},
Required: true,
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- "description": schema.StringAttribute{
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- "node_type": schema.StringAttribute{
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- stringplanmodifier.UseStateForUnknown(),
- },
- Validators: []validator.String{
- stringvalidator.ConflictsWith([]path.Expression{
- path.MatchRelative().AtParent().AtName("id"),
- }...),
- stringvalidator.OneOfCaseInsensitive(validNodeTypes...),
- stringvalidator.AlsoRequires([]path.Expression{
- path.MatchRelative().AtParent().AtName("cpu"),
- path.MatchRelative().AtParent().AtName("ram"),
- }...),
- },
- },
- "cpu": schema.Int64Attribute{
- Required: true,
- PlanModifiers: []planmodifier.Int64{
- int64planmodifier.RequiresReplace(),
- int64planmodifier.UseStateForUnknown(),
- },
- Validators: []validator.Int64{
- int64validator.ConflictsWith([]path.Expression{
- path.MatchRelative().AtParent().AtName("id"),
- }...),
- int64validator.AlsoRequires([]path.Expression{
- path.MatchRelative().AtParent().AtName("node_type"),
- path.MatchRelative().AtParent().AtName("ram"),
- }...),
- },
- },
- "ram": schema.Int64Attribute{
- Required: true,
- PlanModifiers: []planmodifier.Int64{
- int64planmodifier.RequiresReplace(),
- int64planmodifier.UseStateForUnknown(),
- },
- Validators: []validator.Int64{
- int64validator.ConflictsWith([]path.Expression{
- path.MatchRelative().AtParent().AtName("id"),
- }...),
- int64validator.AlsoRequires([]path.Expression{
- path.MatchRelative().AtParent().AtName("node_type"),
- path.MatchRelative().AtParent().AtName("cpu"),
- }...),
- },
- },
- },
},
"replicas": schema.Int64Attribute{
Computed: true,
@@ -506,7 +423,6 @@ func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, r
// Create creates the resource and sets the initial Terraform state.
func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
- // Retrieve values from plan
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
@@ -548,37 +464,6 @@ func (r *instanceResource) Create(ctx context.Context, req resource.CreateReques
}
}
- flavor := &flavorModel{}
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- if flavor.Id.IsNull() || flavor.Id.IsUnknown() {
- err := loadFlavorId(ctx, r.client, &model, flavor, storage)
- if err != nil {
- resp.Diagnostics.AddError(err.Error(), err.Error())
- return
- }
- flavorValues := map[string]attr.Value{
- "id": flavor.Id,
- "description": flavor.Description,
- "cpu": flavor.CPU,
- "ram": flavor.RAM,
- "node_type": flavor.NodeType,
- }
- var flavorObject basetypes.ObjectValue
- flavorObject, diags = types.ObjectValue(flavorTypes, flavorValues)
- resp.Diagnostics.Append(diags...)
- if diags.HasError() {
- return
- }
- model.Flavor = flavorObject
- }
-
// Generate API request body from model
payload, err := toCreatePayload(&model, storage, encryption, network)
if err != nil {
@@ -596,6 +481,7 @@ func (r *instanceResource) Create(ctx context.Context, req resource.CreateReques
instanceId := *createResp.Id
utils.SetAndLogStateFields(ctx, &resp.Diagnostics, &resp.State, map[string]any{
+ "id": utils.BuildInternalTerraformId(projectId, region, instanceId),
"instance_id": instanceId,
})
if resp.Diagnostics.HasError() {
@@ -615,25 +501,8 @@ func (r *instanceResource) Create(ctx context.Context, req resource.CreateReques
return
}
- if *waitResp.FlavorId != flavor.Id.ValueString() {
- core.LogAndAddError(
- ctx,
- &resp.Diagnostics,
- "Error creating instance",
- fmt.Sprintf("Instance creation waiting: returned flavor id differs (expected: %s, current: %s)", flavor.Id.ValueString(), *waitResp.FlavorId),
- )
- return
- }
-
- if flavor.CPU.IsNull() || flavor.CPU.IsUnknown() {
- core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", "Instance creation waiting: flavor cpu is null or unknown")
- }
- if flavor.RAM.IsNull() || flavor.RAM.IsUnknown() {
- core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", "Instance creation waiting: flavor ram is null or unknown")
- }
-
// Map response body to schema
- err = mapFields(ctx, waitResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API payload: %v", err))
return
@@ -672,27 +541,6 @@ func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, r
ctx = tflog.SetField(ctx, "instance_id", instanceId)
ctx = tflog.SetField(ctx, "region", region)
- var flavor = &flavorModel{}
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
-
- err := getFlavorModelById(ctx, r.client, &model, flavor)
- if err != nil {
- resp.Diagnostics.AddError(err.Error(), err.Error())
- return
- }
-
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
var storage = &storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
@@ -734,7 +582,7 @@ func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, r
ctx = core.LogResponse(ctx)
// Map response body to schema
- err = mapFields(ctx, instanceResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
return
@@ -795,37 +643,6 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
}
}
- flavor := &flavorModel{}
- if !model.Flavor.IsNull() && !model.Flavor.IsUnknown() {
- diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- if flavor.Id.IsNull() || flavor.Id.IsUnknown() {
- err := loadFlavorId(ctx, r.client, &model, flavor, storage)
- if err != nil {
- resp.Diagnostics.AddError(err.Error(), err.Error())
- return
- }
- flavorValues := map[string]attr.Value{
- "id": flavor.Id,
- "description": flavor.Description,
- "cpu": flavor.CPU,
- "ram": flavor.RAM,
- "node_type": flavor.NodeType,
- }
- var flavorObject basetypes.ObjectValue
- flavorObject, diags = types.ObjectValue(flavorTypes, flavorValues)
- resp.Diagnostics.Append(diags...)
- if diags.HasError() {
- return
- }
- model.Flavor = flavorObject
- }
-
// Generate API request body from model
payload, err := toUpdatePayload(&model, storage, network)
if err != nil {
@@ -833,7 +650,8 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
return
}
// Update existing instance
- err = r.client.UpdateInstancePartiallyRequest(ctx, projectId, region, instanceId).UpdateInstancePartiallyRequestPayload(*payload).Execute()
+ err = r.client.UpdateInstanceRequest(ctx, projectId, region, instanceId).UpdateInstanceRequestPayload(*payload).Execute()
+ // err = r.client.UpdateInstancePartiallyRequest(ctx, projectId, region, instanceId).UpdateInstancePartiallyRequestPayload(*payload).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error())
return
@@ -848,7 +666,7 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
}
// Map response body to schema
- err = mapFields(ctx, waitResp, &model, flavor, storage, encryption, network, region)
+ err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API payload: %v", err))
return
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/resource_test.go b/stackit/internal/services/sqlserverflexalpha/instance/resource_test.go
index 8ee86665..b6e727dd 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/resource_test.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/resource_test.go
@@ -34,7 +34,6 @@ func TestMapFields(t *testing.T) {
description string
state Model
input *sqlserverflex.GetInstanceResponse
- flavor *flavorModel
storage *storageModel
encryption *encryptionModel
network *networkModel
@@ -53,13 +52,6 @@ func TestMapFields(t *testing.T) {
Edition: types.StringValue("edition 1"),
Status: types.StringValue("status"),
IsDeletable: types.BoolValue(true),
- Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
- "id": types.StringValue("flavor_id"),
- "description": types.StringNull(),
- "cpu": types.Int64Null(),
- "ram": types.Int64Null(),
- "node_type": types.StringNull(),
- }),
},
&sqlserverflex.GetInstanceResponse{
FlavorId: utils.Ptr("flavor_id"),
@@ -70,9 +62,6 @@ func TestMapFields(t *testing.T) {
Status: sqlserverflex.GetInstanceResponseGetStatusAttributeType(utils.Ptr("status")),
IsDeletable: utils.Ptr(true),
},
- &flavorModel{
- Id: types.StringValue("flavor_id"),
- },
&storageModel{},
&encryptionModel{},
&networkModel{
@@ -85,14 +74,7 @@ func TestMapFields(t *testing.T) {
ProjectId: types.StringValue("pid"),
Name: types.StringNull(),
BackupSchedule: types.StringNull(),
- Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
- "id": types.StringValue("flavor_id"),
- "description": types.StringNull(),
- "cpu": types.Int64Null(),
- "ram": types.Int64Null(),
- "node_type": types.StringNull(),
- }),
- Replicas: types.Int64Value(1),
+ Replicas: types.Int64Value(1),
Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{
"class": types.StringNull(),
"size": types.Int64Null(),
@@ -151,13 +133,6 @@ func TestMapFields(t *testing.T) {
RouterAddress: nil,
},
},
- &flavorModel{
- Id: basetypes.NewStringValue("flavor_id"),
- Description: basetypes.NewStringValue("description"),
- CPU: basetypes.NewInt64Value(12),
- RAM: basetypes.NewInt64Value(34),
- NodeType: basetypes.NewStringValue("node_type"),
- },
&storageModel{},
&encryptionModel{},
&networkModel{
@@ -174,14 +149,7 @@ func TestMapFields(t *testing.T) {
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
BackupSchedule: types.StringValue("schedule"),
- Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
- "id": types.StringValue("flavor_id"),
- "description": types.StringValue("description"),
- "cpu": types.Int64Value(12),
- "ram": types.Int64Value(34),
- "node_type": types.StringValue("node_type"),
- }),
- Replicas: types.Int64Value(56),
+ Replicas: types.Int64Value(56),
Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{
"class": types.StringValue("class"),
"size": types.Int64Value(78),
@@ -380,7 +348,7 @@ func TestMapFields(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
- err := mapFields(context.Background(), tt.input, &tt.state, tt.flavor, tt.storage, tt.encryption, tt.network, tt.region)
+ err := mapFields(context.Background(), tt.input, &tt.state, tt.storage, tt.encryption, tt.network, tt.region)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}
diff --git a/stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go b/stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go
index 4d43a684..c98de26f 100644
--- a/stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go
+++ b/stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go
@@ -16,7 +16,7 @@ import (
"github.com/hashicorp/terraform-plugin-testing/terraform"
"github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/core"
"github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/testutil"
- core_config "github.com/stackitcloud/stackit-sdk-go/core/config"
+ coreconfig "github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/core/utils"
"github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex"
"github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/wait"
@@ -28,6 +28,7 @@ var (
//go:embed testdata/resource-min.tf
resourceMinConfig string
)
+
var testConfigVarsMin = config.Variables{
"project_id": config.StringVariable(testutil.ProjectId),
"name": config.StringVariable(fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum))),
@@ -440,7 +441,7 @@ func testAccChecksqlserverflexDestroy(s *terraform.State) error {
client, err = sqlserverflex.NewAPIClient()
} else {
client, err = sqlserverflex.NewAPIClient(
- core_config.WithEndpoint(testutil.SQLServerFlexCustomEndpoint),
+ coreconfig.WithEndpoint(testutil.SQLServerFlexCustomEndpoint),
)
}
if err != nil {
diff --git a/stackit/provider.go b/stackit/provider.go
index e19acf91..ce2d3327 100644
--- a/stackit/provider.go
+++ b/stackit/provider.go
@@ -18,14 +18,15 @@ import (
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/core"
"github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/features"
- sdkauth "github.com/stackitcloud/stackit-sdk-go/core/auth"
- "github.com/stackitcloud/stackit-sdk-go/core/config"
-
postgresFlexAlphaDatabase "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/database"
+ "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/flavor"
postgresFlexAlphaInstance "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/instance"
postgresFlexAlphaUser "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/user"
+ sqlserverFlexAlphaFlavor "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/flavor"
sqlServerFlexAlphaInstance "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance"
sqlserverFlexAlphaUser "github.com/mhenselin/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/user"
+ sdkauth "github.com/stackitcloud/stackit-sdk-go/core/auth"
+ "github.com/stackitcloud/stackit-sdk-go/core/config"
)
// Ensure the implementation satisfies the expected interfaces
@@ -488,9 +489,13 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest,
// DataSources defines the data sources implemented in the provider.
func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource {
return []func() datasource.DataSource{
+ postgresFlexAlphaFlavor.NewFlavorDataSource,
+ //postgresFlexAlphaFlavor.NewFlavorListDataSource,
postgresFlexAlphaDatabase.NewDatabaseDataSource,
postgresFlexAlphaInstance.NewInstanceDataSource,
postgresFlexAlphaUser.NewUserDataSource,
+
+ sqlserverFlexAlphaFlavor.NewFlavorDataSource,
sqlServerFlexAlphaInstance.NewInstanceDataSource,
sqlserverFlexAlphaUser.NewUserDataSource,
}