diff --git a/docs/data-sources/sqlserverflex_instance.md b/docs/data-sources/sqlserverflex_instance.md
new file mode 100644
index 00000000..118c5d79
--- /dev/null
+++ b/docs/data-sources/sqlserverflex_instance.md
@@ -0,0 +1,61 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sqlserverflex_instance Data Source - stackit"
+subcategory: ""
+description: |-
+ MongoDB Flex instance data source schema. Must have a region specified in the provider configuration.
+---
+
+# stackit_sqlserverflex_instance (Data Source)
+
+MongoDB Flex instance data source schema. Must have a `region` specified in the provider configuration.
+
+
+
+
+## Schema
+
+### Required
+
+- `instance_id` (String) ID of the MongoDB Flex instance.
+- `project_id` (String) STACKIT project ID to which the instance is associated.
+
+### Read-Only
+
+- `acl` (List of String) The Access Control List (ACL) for the MongoDB Flex instance.
+- `backup_schedule` (String) The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *").
+- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor))
+- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`instance_id`".
+- `name` (String) Instance name.
+- `options` (Attributes) Custom parameters for the MongoDB Flex instance. (see [below for nested schema](#nestedatt--options))
+- `replicas` (Number)
+- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage))
+- `version` (String)
+
+
+### Nested Schema for `flavor`
+
+Read-Only:
+
+- `cpu` (Number)
+- `description` (String)
+- `id` (String)
+- `ram` (Number)
+
+
+
+### Nested Schema for `options`
+
+Read-Only:
+
+- `edition` (String)
+- `retention_days` (Number)
+
+
+
+### Nested Schema for `storage`
+
+Read-Only:
+
+- `class` (String)
+- `size` (Number)
diff --git a/docs/index.md b/docs/index.md
index 88326178..84906b10 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -165,4 +165,5 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de
- `service_account_key_path` (String) Path for the service account key used for authentication. If set, the key flow will be used to authenticate all operations.
- `service_account_token` (String) Token used for authentication. If set, the token flow will be used to authenticate all operations.
- `ske_custom_endpoint` (String) Custom endpoint for the Kubernetes Engine (SKE) service
+- `sqlserverflex_custom_endpoint` (String) Custom endpoint for the SQL Server Flex service
- `token_custom_endpoint` (String) Custom endpoint for the token API, which is used to request access tokens when using the key flow
diff --git a/docs/resources/ske_cluster.md b/docs/resources/ske_cluster.md
index 9c049cfa..8e2b44e1 100644
--- a/docs/resources/ske_cluster.md
+++ b/docs/resources/ske_cluster.md
@@ -81,7 +81,7 @@ Optional:
- `max_surge` (Number) Maximum number of additional VMs that are created during an update.
- `max_unavailable` (Number) Maximum number of VMs that that can be unavailable during an update.
- `os_name` (String) The name of the OS image. Defaults to `flatcar`.
-- `os_version` (String, Deprecated) This field is deprecated, use `os_version_min` to configure the version and `os_version_used` to get the currently used version instead
+- `os_version` (String, Deprecated) This field is deprecated, use `os_version_min` to configure the version and `os_version_used` to get the currently used version instead.
- `os_version_min` (String) The minimum OS image version. This field will be used to set the minimum OS image version on creation/update of the cluster. If unset, the latest supported OS image version will be used. SKE automatically updates the cluster Kubernetes version if you have set `maintenance.enable_kubernetes_version_updates` to true or if there is a mandatory update, as described in [Updates for Kubernetes versions and Operating System versions in SKE](https://docs.stackit.cloud/stackit/en/version-updates-in-ske-10125631.html). To get the current OS image version being used for the node pool, use the read-only `os_version_used` field.
- `taints` (Attributes List) Specifies a taint list as defined below. (see [below for nested schema](#nestedatt--node_pools--taints))
- `volume_size` (Number) The volume size in GB. Defaults to `20`
diff --git a/docs/resources/sqlserverflex_instance.md b/docs/resources/sqlserverflex_instance.md
new file mode 100644
index 00000000..6e4d0984
--- /dev/null
+++ b/docs/resources/sqlserverflex_instance.md
@@ -0,0 +1,67 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "stackit_sqlserverflex_instance Resource - stackit"
+subcategory: ""
+description: |-
+ SQL Server Flex instance resource schema. Must have a region specified in the provider configuration.
+---
+
+# stackit_sqlserverflex_instance (Resource)
+
+SQL Server Flex instance resource schema. Must have a `region` specified in the provider configuration.
+
+
+
+
+## Schema
+
+### Required
+
+- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor))
+- `name` (String) Instance name.
+- `project_id` (String) STACKIT project ID to which the instance is associated.
+
+### Optional
+
+- `acl` (List of String) The Access Control List (ACL) for the SQL Server Flex instance.
+- `backup_schedule` (String) The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")
+- `options` (Attributes) (see [below for nested schema](#nestedatt--options))
+- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage))
+- `version` (String)
+
+### Read-Only
+
+- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`instance_id`".
+- `instance_id` (String) ID of the SQL Server Flex instance.
+- `replicas` (Number)
+
+
+### Nested Schema for `flavor`
+
+Required:
+
+- `cpu` (Number)
+- `ram` (Number)
+
+Read-Only:
+
+- `description` (String)
+- `id` (String)
+
+
+
+### Nested Schema for `options`
+
+Optional:
+
+- `edition` (String)
+- `retention_days` (Number)
+
+
+
+### Nested Schema for `storage`
+
+Optional:
+
+- `class` (String)
+- `size` (Number)
diff --git a/go.mod b/go.mod
index 4256b3a7..57749532 100644
--- a/go.mod
+++ b/go.mod
@@ -28,6 +28,7 @@ require (
github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.8.0
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.8.0
github.com/stackitcloud/stackit-sdk-go/services/ske v0.16.0
+ github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v0.2.0
golang.org/x/mod v0.17.0
)
diff --git a/go.sum b/go.sum
index 9aa31ba1..9cd92905 100644
--- a/go.sum
+++ b/go.sum
@@ -178,6 +178,8 @@ github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.8.0 h1:pJBG455
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.8.0/go.mod h1:LX0Mcyr7/QP77zf7e05fHCJO38RMuTxr7nEDUDZ3oPQ=
github.com/stackitcloud/stackit-sdk-go/services/ske v0.16.0 h1:trrJuRMzgXu6fiiMZiUx6+A1FNKEFhA1vGq5cr5Qn3U=
github.com/stackitcloud/stackit-sdk-go/services/ske v0.16.0/go.mod h1:0fFs4R7kg+gU7FNAIzzFvlCZJz6gyZ8CFhbK3eSrAwQ=
+github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v0.2.0 h1:aIXxXx6u4+6C02MPb+hdItigeKeen7m+hEEG+Ej9sNs=
+github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v0.2.0/go.mod h1:fQJOQMfasStZ8J9iGX0vTjyJoQtLqMXJ5Npb03QJk84=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
diff --git a/stackit/internal/core/core.go b/stackit/internal/core/core.go
index afc043c6..68240b74 100644
--- a/stackit/internal/core/core.go
+++ b/stackit/internal/core/core.go
@@ -32,6 +32,7 @@ type ProviderData struct {
RedisCustomEndpoint string
ResourceManagerCustomEndpoint string
SecretsManagerCustomEndpoint string
+ SQLServerFlexCustomEndpoint string
SKECustomEndpoint string
}
diff --git a/stackit/internal/services/mongodbflex/instance/resource.go b/stackit/internal/services/mongodbflex/instance/resource.go
index 4c4f3375..bf93c15e 100644
--- a/stackit/internal/services/mongodbflex/instance/resource.go
+++ b/stackit/internal/services/mongodbflex/instance/resource.go
@@ -643,7 +643,7 @@ func mapFields(ctx context.Context, resp *mongodbflex.GetInstanceResponse, model
return fmt.Errorf("creating options: %w", core.DiagsToError(diags))
}
- simplifiedModelBackupSchedule := simplifyBackupSchedule(model.BackupSchedule.ValueString())
+ simplifiedModelBackupSchedule := utils.SimplifyBackupSchedule(model.BackupSchedule.ValueString())
// If the value returned by the API is different from the one in the model after simplification,
// we update the model so that it causes an error in Terraform
if simplifiedModelBackupSchedule != types.StringPointerValue(instance.BackupSchedule).ValueString() {
@@ -785,7 +785,7 @@ func loadFlavorId(ctx context.Context, client mongoDBFlexClient, model *Model, f
flavor.Description = types.StringValue(*f.Description)
break
}
- avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Cpu)
+ avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Memory)
}
if flavor.Id.ValueString() == "" {
return fmt.Errorf("couldn't find flavor, available specs are:%s", avl)
@@ -793,17 +793,3 @@ func loadFlavorId(ctx context.Context, client mongoDBFlexClient, model *Model, f
return nil
}
-
-// Remove leading 0s from backup schedule numbers (e.g. "00 00 * * *" becomes "0 0 * * *")
-// Needed as the API does it internally and would otherwise cause inconsistent result in Terraform
-func simplifyBackupSchedule(schedule string) string {
- regex := regexp.MustCompile(`0+\d+`) // Matches series of one or more zeros followed by a series of one or more digits
- simplifiedSchedule := regex.ReplaceAllStringFunc(schedule, func(match string) string {
- simplified := strings.TrimLeft(match, "0")
- if simplified == "" {
- simplified = "0"
- }
- return simplified
- })
- return simplifiedSchedule
-}
diff --git a/stackit/internal/services/mongodbflex/instance/resource_test.go b/stackit/internal/services/mongodbflex/instance/resource_test.go
index 23c03e28..69710ac0 100644
--- a/stackit/internal/services/mongodbflex/instance/resource_test.go
+++ b/stackit/internal/services/mongodbflex/instance/resource_test.go
@@ -838,75 +838,3 @@ func TestLoadFlavorId(t *testing.T) {
})
}
}
-
-func TestSimplifyBackupSchedule(t *testing.T) {
- tests := []struct {
- description string
- input string
- expected string
- }{
- {
- "simple schedule",
- "0 0 * * *",
- "0 0 * * *",
- },
- {
- "schedule with leading zeros",
- "00 00 * * *",
- "0 0 * * *",
- },
- {
- "schedule with leading zeros 2",
- "00 001 * * *",
- "0 1 * * *",
- },
- {
- "schedule with leading zeros 3",
- "00 0010 * * *",
- "0 10 * * *",
- },
- {
- "simple schedule with slash",
- "0 0/6 * * *",
- "0 0/6 * * *",
- },
- {
- "schedule with leading zeros and slash",
- "00 00/6 * * *",
- "0 0/6 * * *",
- },
- {
- "schedule with leading zeros and slash 2",
- "00 001/06 * * *",
- "0 1/6 * * *",
- },
- {
- "simple schedule with comma",
- "0 10,15 * * *",
- "0 10,15 * * *",
- },
- {
- "schedule with leading zeros and comma",
- "0 010,0015 * * *",
- "0 10,15 * * *",
- },
- {
- "simple schedule with comma and slash",
- "0 0-11/10 * * *",
- "0 0-11/10 * * *",
- },
- {
- "schedule with leading zeros, comma, and slash",
- "00 000-011/010 * * *",
- "0 0-11/10 * * *",
- },
- }
- for _, tt := range tests {
- t.Run(tt.description, func(t *testing.T) {
- output := simplifyBackupSchedule(tt.input)
- if output != tt.expected {
- t.Fatalf("Data does not match: %s", output)
- }
- })
- }
-}
diff --git a/stackit/internal/services/postgresflex/instance/resource.go b/stackit/internal/services/postgresflex/instance/resource.go
index 276374fe..9474bdb7 100644
--- a/stackit/internal/services/postgresflex/instance/resource.go
+++ b/stackit/internal/services/postgresflex/instance/resource.go
@@ -694,7 +694,7 @@ func loadFlavorId(ctx context.Context, client postgresFlexClient, model *Model,
flavor.Description = types.StringValue(*f.Description)
break
}
- avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Cpu)
+ avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Memory)
}
if flavor.Id.ValueString() == "" {
return fmt.Errorf("couldn't find flavor, available specs are:%s", avl)
diff --git a/stackit/internal/services/sqlserverflex/instance/datasource.go b/stackit/internal/services/sqlserverflex/instance/datasource.go
new file mode 100644
index 00000000..dce75934
--- /dev/null
+++ b/stackit/internal/services/sqlserverflex/instance/datasource.go
@@ -0,0 +1,237 @@
+package sqlserverflex
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/config"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ datasource.DataSource = &instanceDataSource{}
+)
+
+// NewInstanceDataSource is a helper function to simplify the provider implementation.
+func NewInstanceDataSource() datasource.DataSource {
+ return &instanceDataSource{}
+}
+
+// instanceDataSource is the data source implementation.
+type instanceDataSource struct {
+ client *sqlserverflex.APIClient
+}
+
+// Metadata returns the data source type name.
+func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sqlserverflex_instance"
+}
+
+// Configure adds the provider configured client to the data source.
+func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ providerData, ok := req.ProviderData.(core.ProviderData)
+ if !ok {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
+ return
+ }
+
+ var apiClient *sqlserverflex.APIClient
+ var err error
+ if providerData.SQLServerFlexCustomEndpoint != "" {
+ apiClient, err = sqlserverflex.NewAPIClient(
+ config.WithCustomAuth(providerData.RoundTripper),
+ config.WithEndpoint(providerData.SQLServerFlexCustomEndpoint),
+ )
+ } else {
+ apiClient, err = sqlserverflex.NewAPIClient(
+ config.WithCustomAuth(providerData.RoundTripper),
+ config.WithRegion(providerData.Region),
+ )
+ }
+
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the data source configuration", err))
+ return
+ }
+
+ r.client = apiClient
+ tflog.Info(ctx, "MongoDB Flex instance client configured")
+}
+
+// Schema defines the schema for the data source.
+func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ descriptions := map[string]string{
+ "main": "MongoDB Flex instance data source schema. Must have a `region` specified in the provider configuration.",
+ "id": "Terraform's internal data source. ID. It is structured as \"`project_id`,`instance_id`\".",
+ "instance_id": "ID of the MongoDB Flex instance.",
+ "project_id": "STACKIT project ID to which the instance is associated.",
+ "name": "Instance name.",
+ "acl": "The Access Control List (ACL) for the MongoDB Flex instance.",
+ "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *").`,
+ "options": "Custom parameters for the MongoDB Flex instance.",
+ }
+
+ resp.Schema = schema.Schema{
+ Description: descriptions["main"],
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: descriptions["id"],
+ Computed: true,
+ },
+ "instance_id": schema.StringAttribute{
+ Description: descriptions["instance_id"],
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "project_id": schema.StringAttribute{
+ Description: descriptions["project_id"],
+ Required: true,
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "name": schema.StringAttribute{
+ Description: descriptions["name"],
+ Computed: true,
+ },
+ "acl": schema.ListAttribute{
+ Description: descriptions["acl"],
+ ElementType: types.StringType,
+ Computed: true,
+ },
+ "backup_schedule": schema.StringAttribute{
+ Description: descriptions["backup_schedule"],
+ Computed: true,
+ },
+ "flavor": schema.SingleNestedAttribute{
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ },
+ "cpu": schema.Int64Attribute{
+ Computed: true,
+ },
+ "ram": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ },
+ "replicas": schema.Int64Attribute{
+ Computed: true,
+ },
+ "storage": schema.SingleNestedAttribute{
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "class": schema.StringAttribute{
+ Computed: true,
+ },
+ "size": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ },
+ "version": schema.StringAttribute{
+ Computed: true,
+ },
+ "options": schema.SingleNestedAttribute{
+ Description: descriptions["options"],
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "edition": schema.StringAttribute{
+ Computed: true,
+ },
+ "retention_days": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ },
+ },
+ }
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
+ var model Model
+ diags := req.Config.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ projectId := model.ProjectId.ValueString()
+ instanceId := model.InstanceId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "instance_id", instanceId)
+ instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute()
+ if err != nil {
+ oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
+ if ok && oapiErr.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+
+ var flavor = &flavorModel{}
+ if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) {
+ diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+ var storage = &storageModel{}
+ if !(model.Storage.IsNull() || model.Storage.IsUnknown()) {
+ diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+ var options = &optionsModel{}
+ if !(model.Options.IsNull() || model.Options.IsUnknown()) {
+ diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ err = mapFields(ctx, instanceResp, &model, flavor, storage, options)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "MongoDB Flex instance read")
+}
diff --git a/stackit/internal/services/sqlserverflex/instance/resource.go b/stackit/internal/services/sqlserverflex/instance/resource.go
new file mode 100644
index 00000000..69129f1e
--- /dev/null
+++ b/stackit/internal/services/sqlserverflex/instance/resource.go
@@ -0,0 +1,841 @@
+package sqlserverflex
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/config"
+ "github.com/stackitcloud/stackit-sdk-go/core/oapierror"
+ coreUtils "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex"
+ "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/wait"
+)
+
+// Ensure the implementation satisfies the expected interfaces.
+var (
+ _ resource.Resource = &instanceResource{}
+ _ resource.ResourceWithConfigure = &instanceResource{}
+ _ resource.ResourceWithImportState = &instanceResource{}
+)
+
+type Model struct {
+ Id types.String `tfsdk:"id"` // needed by TF
+ InstanceId types.String `tfsdk:"instance_id"`
+ ProjectId types.String `tfsdk:"project_id"`
+ Name types.String `tfsdk:"name"`
+ ACL types.List `tfsdk:"acl"`
+ BackupSchedule types.String `tfsdk:"backup_schedule"`
+ Flavor types.Object `tfsdk:"flavor"`
+ Storage types.Object `tfsdk:"storage"`
+ Version types.String `tfsdk:"version"`
+ Replicas types.Int64 `tfsdk:"replicas"`
+ Options types.Object `tfsdk:"options"`
+}
+
+// Struct corresponding to Model.Flavor
+type flavorModel struct {
+ Id types.String `tfsdk:"id"`
+ Description types.String `tfsdk:"description"`
+ CPU types.Int64 `tfsdk:"cpu"`
+ RAM types.Int64 `tfsdk:"ram"`
+}
+
+// Types corresponding to flavorModel
+var flavorTypes = map[string]attr.Type{
+ "id": basetypes.StringType{},
+ "description": basetypes.StringType{},
+ "cpu": basetypes.Int64Type{},
+ "ram": basetypes.Int64Type{},
+}
+
+// Struct corresponding to Model.Storage
+type storageModel struct {
+ Class types.String `tfsdk:"class"`
+ Size types.Int64 `tfsdk:"size"`
+}
+
+// Types corresponding to storageModel
+var storageTypes = map[string]attr.Type{
+ "class": basetypes.StringType{},
+ "size": basetypes.Int64Type{},
+}
+
+// Struct corresponding to Model.Options
+type optionsModel struct {
+ Edition types.String `tfsdk:"edition"`
+ RetentionDays types.Int64 `tfsdk:"retention_days"`
+}
+
+// Types corresponding to optionsModel
+var optionsTypes = map[string]attr.Type{
+ "edition": basetypes.StringType{},
+ "retention_days": basetypes.Int64Type{},
+}
+
+// NewInstanceResource is a helper function to simplify the provider implementation.
+func NewInstanceResource() resource.Resource {
+ return &instanceResource{}
+}
+
+// instanceResource is the resource implementation.
+type instanceResource struct {
+ client *sqlserverflex.APIClient
+}
+
+// Metadata returns the resource type name.
+func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_sqlserverflex_instance"
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ providerData, ok := req.ProviderData.(core.ProviderData)
+ if !ok {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
+ return
+ }
+
+ var apiClient *sqlserverflex.APIClient
+ var err error
+ if providerData.SQLServerFlexCustomEndpoint != "" {
+ apiClient, err = sqlserverflex.NewAPIClient(
+ config.WithCustomAuth(providerData.RoundTripper),
+ config.WithEndpoint(providerData.SQLServerFlexCustomEndpoint),
+ )
+ } else {
+ apiClient, err = sqlserverflex.NewAPIClient(
+ config.WithCustomAuth(providerData.RoundTripper),
+ config.WithRegion(providerData.Region),
+ )
+ }
+
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err))
+ return
+ }
+
+ r.client = apiClient
+ tflog.Info(ctx, "SQLServer Flex instance client configured")
+}
+
+// Schema defines the schema for the resource.
+func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ descriptions := map[string]string{
+ "main": "SQLServer Flex instance resource schema. Must have a `region` specified in the provider configuration.",
+ "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`\".",
+ "instance_id": "ID of the SQLServer Flex instance.",
+ "project_id": "STACKIT project ID to which the instance is associated.",
+ "name": "Instance name.",
+ "acl": "The Access Control List (ACL) for the SQLServer Flex instance.",
+ "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`,
+ "options": "Custom parameters for the SQLServer Flex instance.",
+ }
+
+ resp.Schema = schema.Schema{
+ Description: descriptions["main"],
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Description: descriptions["id"],
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "instance_id": schema.StringAttribute{
+ Description: descriptions["instance_id"],
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "project_id": schema.StringAttribute{
+ Description: descriptions["project_id"],
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ Validators: []validator.String{
+ validate.UUID(),
+ validate.NoSeparator(),
+ },
+ },
+ "name": schema.StringAttribute{
+ Description: descriptions["name"],
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.LengthAtLeast(1),
+ stringvalidator.RegexMatches(
+ regexp.MustCompile("^[a-z]([-a-z0-9]*[a-z0-9])?$"),
+ "must start with a letter, must have lower case letters, numbers or hyphens, and no hyphen at the end",
+ ),
+ },
+ },
+ "acl": schema.ListAttribute{
+ Description: descriptions["acl"],
+ ElementType: types.StringType,
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.List{
+ listplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "backup_schedule": schema.StringAttribute{
+ Description: descriptions["backup_schedule"],
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "flavor": schema.SingleNestedAttribute{
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "description": schema.StringAttribute{
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "cpu": schema.Int64Attribute{
+ Required: true,
+ },
+ "ram": schema.Int64Attribute{
+ Required: true,
+ },
+ },
+ },
+ "replicas": schema.Int64Attribute{
+ Computed: true,
+ PlanModifiers: []planmodifier.Int64{
+ int64planmodifier.UseStateForUnknown(),
+ },
+ },
+ "storage": schema.SingleNestedAttribute{
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.RequiresReplace(),
+ objectplanmodifier.UseStateForUnknown(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "class": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "size": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Int64{
+ int64planmodifier.RequiresReplace(),
+ int64planmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ },
+ "version": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "options": schema.SingleNestedAttribute{
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.RequiresReplace(),
+ objectplanmodifier.UseStateForUnknown(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "edition": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "retention_days": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Int64{
+ int64planmodifier.RequiresReplace(),
+ int64planmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+// Create creates the resource and sets the initial Terraform state.
+func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from plan
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+
+ var acl []string
+ if !(model.ACL.IsNull() || model.ACL.IsUnknown()) {
+ diags = model.ACL.ElementsAs(ctx, &acl, false)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+ var flavor = &flavorModel{}
+ if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) {
+ diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ err := loadFlavorId(ctx, r.client, &model, flavor)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Loading flavor ID: %v", err))
+ return
+ }
+ }
+ var storage = &storageModel{}
+ if !(model.Storage.IsNull() || model.Storage.IsUnknown()) {
+ diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ var options = &optionsModel{}
+ if !(model.Options.IsNull() || model.Options.IsUnknown()) {
+ diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ // Generate API request body from model
+ payload, err := toCreatePayload(&model, acl, flavor, storage, options)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err))
+ return
+ }
+ // Create new instance
+ createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+ instanceId := *createResp.Id
+ ctx = tflog.SetField(ctx, "instance_id", instanceId)
+ // The creation waiter sometimes returns an error from the API: "instance with id xxx has unexpected status Failure"
+ // which can be avoided by sleeping before wait
+ waitResp, err := wait.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetSleepBeforeWait(30 * time.Second).WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err))
+ return
+ }
+
+ // Map response body to schema
+ err = mapFields(ctx, waitResp, &model, flavor, storage, options)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set state to fully populated data
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SQLServer Flex instance created")
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ instanceId := model.InstanceId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "instance_id", instanceId)
+
+ var flavor = &flavorModel{}
+ if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) {
+ diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+ var storage = &storageModel{}
+ if !(model.Storage.IsNull() || model.Storage.IsUnknown()) {
+ diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ var options = &optionsModel{}
+ if !(model.Options.IsNull() || model.Options.IsUnknown()) {
+ diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute()
+ if err != nil {
+ oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
+ if ok && oapiErr.StatusCode == http.StatusNotFound {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", err.Error())
+ return
+ }
+
+ // Map response body to schema
+ err = mapFields(ctx, instanceResp, &model, flavor, storage, options)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ // Set refreshed state
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SQLServer Flex instance read")
+}
+
+// Update updates the resource and sets the updated Terraform state on success.
+func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from plan
+ var model Model
+ diags := req.Plan.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ instanceId := model.InstanceId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "instance_id", instanceId)
+
+ var acl []string
+ if !(model.ACL.IsNull() || model.ACL.IsUnknown()) {
+ diags = model.ACL.ElementsAs(ctx, &acl, false)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+ var flavor = &flavorModel{}
+ if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) {
+ diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ err := loadFlavorId(ctx, r.client, &model, flavor)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Loading flavor ID: %v", err))
+ return
+ }
+ }
+ var storage = &storageModel{}
+ if !(model.Storage.IsNull() || model.Storage.IsUnknown()) {
+ diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ var options = &optionsModel{}
+ if !(model.Options.IsNull() || model.Options.IsUnknown()) {
+ diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{})
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ }
+
+ // Generate API request body from model
+ payload, err := toUpdatePayload(&model, acl, flavor)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Creating API payload: %v", err))
+ return
+ }
+ // Update existing instance
+ _, err = r.client.PartialUpdateInstance(ctx, projectId, instanceId).PartialUpdateInstancePayload(*payload).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error())
+ return
+ }
+ waitResp, err := wait.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err))
+ return
+ }
+
+ // Map response body to schema
+ err = mapFields(ctx, waitResp, &model, flavor, storage, options)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API payload: %v", err))
+ return
+ }
+ diags = resp.State.Set(ctx, model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ tflog.Info(ctx, "SQLServer Flex instance updated")
+}
+
+// Delete deletes the resource and removes the Terraform state on success.
+func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
+ // Retrieve values from state
+ var model Model
+ diags := req.State.Get(ctx, &model)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ projectId := model.ProjectId.ValueString()
+ instanceId := model.InstanceId.ValueString()
+ ctx = tflog.SetField(ctx, "project_id", projectId)
+ ctx = tflog.SetField(ctx, "instance_id", instanceId)
+
+ // Delete existing instance
+ err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute()
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Calling API: %v", err))
+ return
+ }
+ _, err = wait.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).WaitWithContext(ctx)
+ if err != nil {
+ core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err))
+ return
+ }
+ tflog.Info(ctx, "SQLServer Flex instance deleted")
+}
+
+// ImportState imports a resource into the Terraform state on success.
+// The expected format of the resource import identifier is: project_id,instance_id
+func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ idParts := strings.Split(req.ID, core.Separator)
+
+ if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" {
+ core.LogAndAddError(ctx, &resp.Diagnostics,
+ "Error importing instance",
+ fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID),
+ )
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...)
+ tflog.Info(ctx, "SQLServer Flex instance state imported")
+}
+
+func mapFields(ctx context.Context, resp *sqlserverflex.GetInstanceResponse, model *Model, flavor *flavorModel, storage *storageModel, options *optionsModel) error {
+ if resp == nil {
+ return fmt.Errorf("response input is nil")
+ }
+ if resp.Item == nil {
+ return fmt.Errorf("no instance provided")
+ }
+ if model == nil {
+ return fmt.Errorf("model input is nil")
+ }
+ instance := resp.Item
+
+ var instanceId string
+ if model.InstanceId.ValueString() != "" {
+ instanceId = model.InstanceId.ValueString()
+ } else if instance.Id != nil {
+ instanceId = *instance.Id
+ } else {
+ return fmt.Errorf("instance id not present")
+ }
+
+ var aclList basetypes.ListValue
+ var diags diag.Diagnostics
+ if instance.Acl == nil || instance.Acl.Items == nil {
+ aclList = types.ListNull(types.StringType)
+ } else {
+ respACL := *instance.Acl.Items
+ modelACL, err := utils.ListValuetoStringSlice(model.ACL)
+ if err != nil {
+ return err
+ }
+
+ reconciledACL := utils.ReconcileStringSlices(modelACL, respACL)
+
+ aclList, diags = types.ListValueFrom(ctx, types.StringType, reconciledACL)
+ if diags.HasError() {
+ return fmt.Errorf("mapping ACL: %w", core.DiagsToError(diags))
+ }
+ }
+
+ var flavorValues map[string]attr.Value
+ if instance.Flavor == nil {
+ flavorValues = map[string]attr.Value{
+ "id": flavor.Id,
+ "description": flavor.Description,
+ "cpu": flavor.CPU,
+ "ram": flavor.RAM,
+ }
+ } else {
+ flavorValues = map[string]attr.Value{
+ "id": types.StringValue(*instance.Flavor.Id),
+ "description": types.StringValue(*instance.Flavor.Description),
+ "cpu": types.Int64PointerValue(instance.Flavor.Cpu),
+ "ram": types.Int64PointerValue(instance.Flavor.Memory),
+ }
+ }
+ flavorObject, diags := types.ObjectValue(flavorTypes, flavorValues)
+ if diags.HasError() {
+ return fmt.Errorf("creating flavor: %w", core.DiagsToError(diags))
+ }
+
+ var storageValues map[string]attr.Value
+ if instance.Storage == nil {
+ storageValues = map[string]attr.Value{
+ "class": storage.Class,
+ "size": storage.Size,
+ }
+ } else {
+ storageValues = map[string]attr.Value{
+ "class": types.StringValue(*instance.Storage.Class),
+ "size": types.Int64PointerValue(instance.Storage.Size),
+ }
+ }
+ storageObject, diags := types.ObjectValue(storageTypes, storageValues)
+ if diags.HasError() {
+ return fmt.Errorf("creating storage: %w", core.DiagsToError(diags))
+ }
+
+ var optionsValues map[string]attr.Value
+ if instance.Options == nil {
+ optionsValues = map[string]attr.Value{
+ "edition": options.Edition,
+ "retention_days": options.RetentionDays,
+ }
+ } else {
+ retentionDays := options.RetentionDays
+ retentionDaysString, ok := (*instance.Options)["retentionDays"]
+ if ok {
+ retentionDaysValue, err := strconv.ParseInt(retentionDaysString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("parse retentionDays to int64: %w", err)
+ }
+ retentionDays = types.Int64Value(retentionDaysValue)
+ }
+
+ edition := options.Edition
+ editionValue, ok := (*instance.Options)["edition"]
+ if ok {
+ edition = types.StringValue(editionValue)
+ }
+
+ optionsValues = map[string]attr.Value{
+ "edition": edition,
+ "retention_days": retentionDays,
+ }
+ }
+ optionsObject, diags := types.ObjectValue(optionsTypes, optionsValues)
+ if diags.HasError() {
+ return fmt.Errorf("creating options: %w", core.DiagsToError(diags))
+ }
+
+ simplifiedModelBackupSchedule := utils.SimplifyBackupSchedule(model.BackupSchedule.ValueString())
+ // If the value returned by the API is different from the one in the model after simplification,
+ // we update the model so that it causes an error in Terraform
+ if simplifiedModelBackupSchedule != types.StringPointerValue(instance.BackupSchedule).ValueString() {
+ model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule)
+ }
+
+ idParts := []string{
+ model.ProjectId.ValueString(),
+ instanceId,
+ }
+ model.Id = types.StringValue(
+ strings.Join(idParts, core.Separator),
+ )
+ model.InstanceId = types.StringValue(instanceId)
+ model.Name = types.StringPointerValue(instance.Name)
+ model.ACL = aclList
+ model.Flavor = flavorObject
+ model.Replicas = types.Int64PointerValue(instance.Replicas)
+ model.Storage = storageObject
+ model.Version = types.StringPointerValue(instance.Version)
+ model.Options = optionsObject
+ return nil
+}
+
+func toCreatePayload(model *Model, acl []string, flavor *flavorModel, storage *storageModel, options *optionsModel) (*sqlserverflex.CreateInstancePayload, error) {
+ if model == nil {
+ return nil, fmt.Errorf("nil model")
+ }
+ aclPayload := &sqlserverflex.CreateInstancePayloadAcl{}
+ if acl != nil {
+ aclPayload.Items = &acl
+ }
+ if flavor == nil {
+ return nil, fmt.Errorf("nil flavor")
+ }
+ storagePayload := &sqlserverflex.CreateInstancePayloadStorage{}
+ if storage != nil {
+ storagePayload.Class = conversion.StringValueToPointer(storage.Class)
+ storagePayload.Size = conversion.Int64ValueToPointer(storage.Size)
+ }
+ optionsPayload := &sqlserverflex.CreateInstancePayloadOptions{}
+ if options != nil {
+ optionsPayload.Edition = conversion.StringValueToPointer(options.Edition)
+ retentionDaysInt := conversion.Int64ValueToPointer(options.RetentionDays)
+ var retentionDays *string
+ if retentionDaysInt != nil {
+ retentionDays = coreUtils.Ptr(strconv.FormatInt(*retentionDaysInt, 10))
+ }
+ optionsPayload.RetentionDays = retentionDays
+ }
+
+ return &sqlserverflex.CreateInstancePayload{
+ Acl: aclPayload,
+ BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
+ FlavorId: conversion.StringValueToPointer(flavor.Id),
+ Name: conversion.StringValueToPointer(model.Name),
+ Storage: storagePayload,
+ Version: conversion.StringValueToPointer(model.Version),
+ Options: optionsPayload,
+ }, nil
+}
+
+func toUpdatePayload(model *Model, acl []string, flavor *flavorModel) (*sqlserverflex.PartialUpdateInstancePayload, error) {
+ if model == nil {
+ return nil, fmt.Errorf("nil model")
+ }
+ aclPayload := &sqlserverflex.CreateInstancePayloadAcl{}
+ if acl != nil {
+ aclPayload.Items = &acl
+ }
+ if flavor == nil {
+ return nil, fmt.Errorf("nil flavor")
+ }
+
+ return &sqlserverflex.PartialUpdateInstancePayload{
+ Acl: aclPayload,
+ BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
+ FlavorId: conversion.StringValueToPointer(flavor.Id),
+ Name: conversion.StringValueToPointer(model.Name),
+ Version: conversion.StringValueToPointer(model.Version),
+ }, nil
+}
+
+type sqlserverflexClient interface {
+ ListFlavorsExecute(ctx context.Context, projectId string) (*sqlserverflex.ListFlavorsResponse, error)
+}
+
+func loadFlavorId(ctx context.Context, client sqlserverflexClient, model *Model, flavor *flavorModel) error {
+ if model == nil {
+ return fmt.Errorf("nil model")
+ }
+ if flavor == nil {
+ return fmt.Errorf("nil flavor")
+ }
+ cpu := conversion.Int64ValueToPointer(flavor.CPU)
+ if cpu == nil {
+ return fmt.Errorf("nil CPU")
+ }
+ ram := conversion.Int64ValueToPointer(flavor.RAM)
+ if ram == nil {
+ return fmt.Errorf("nil RAM")
+ }
+
+ projectId := model.ProjectId.ValueString()
+ res, err := client.ListFlavorsExecute(ctx, projectId)
+ if err != nil {
+ return fmt.Errorf("listing sqlserverflex flavors: %w", err)
+ }
+
+ avl := ""
+ if res.Flavors == nil {
+ return fmt.Errorf("finding flavors for project %s", projectId)
+ }
+ for _, f := range *res.Flavors {
+ if f.Id == nil || f.Cpu == nil || f.Memory == nil {
+ continue
+ }
+ if *f.Cpu == *cpu && *f.Memory == *ram {
+ flavor.Id = types.StringValue(*f.Id)
+ flavor.Description = types.StringValue(*f.Description)
+ break
+ }
+ avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Memory)
+ }
+ if flavor.Id.ValueString() == "" {
+ return fmt.Errorf("couldn't find flavor, available specs are:%s", avl)
+ }
+
+ return nil
+}
diff --git a/stackit/internal/services/sqlserverflex/instance/resource_test.go b/stackit/internal/services/sqlserverflex/instance/resource_test.go
new file mode 100644
index 00000000..1d15aa63
--- /dev/null
+++ b/stackit/internal/services/sqlserverflex/instance/resource_test.go
@@ -0,0 +1,809 @@
+package sqlserverflex
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+ "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex"
+)
+
+type sqlserverflexClientMocked struct {
+ returnError bool
+ listFlavorsResp *sqlserverflex.ListFlavorsResponse
+}
+
+func (c *sqlserverflexClientMocked) ListFlavorsExecute(_ context.Context, _ string) (*sqlserverflex.ListFlavorsResponse, error) {
+ if c.returnError {
+ return nil, fmt.Errorf("get flavors failed")
+ }
+
+ return c.listFlavorsResp, nil
+}
+
+func TestMapFields(t *testing.T) {
+ tests := []struct {
+ description string
+ state Model
+ input *sqlserverflex.GetInstanceResponse
+ flavor *flavorModel
+ storage *storageModel
+ options *optionsModel
+ expected Model
+ isValid bool
+ }{
+ {
+ "default_values",
+ Model{
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ },
+ &sqlserverflex.GetInstanceResponse{
+ Item: &sqlserverflex.Instance{},
+ },
+ &flavorModel{},
+ &storageModel{},
+ &optionsModel{},
+ Model{
+ Id: types.StringValue("pid,iid"),
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ Name: types.StringNull(),
+ ACL: types.ListNull(types.StringType),
+ BackupSchedule: types.StringNull(),
+ Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
+ "id": types.StringNull(),
+ "description": types.StringNull(),
+ "cpu": types.Int64Null(),
+ "ram": types.Int64Null(),
+ }),
+ Replicas: types.Int64Null(),
+ Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{
+ "class": types.StringNull(),
+ "size": types.Int64Null(),
+ }),
+ Options: types.ObjectValueMust(optionsTypes, map[string]attr.Value{
+ "edition": types.StringNull(),
+ "retention_days": types.Int64Null(),
+ }),
+ Version: types.StringNull(),
+ },
+ true,
+ },
+ {
+ "simple_values",
+ Model{
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ },
+ &sqlserverflex.GetInstanceResponse{
+ Item: &sqlserverflex.Instance{
+ Acl: &sqlserverflex.ACL{
+ Items: &[]string{
+ "ip1",
+ "ip2",
+ "",
+ },
+ },
+ BackupSchedule: utils.Ptr("schedule"),
+ Flavor: &sqlserverflex.Flavor{
+ Cpu: utils.Ptr(int64(12)),
+ Description: utils.Ptr("description"),
+ Id: utils.Ptr("flavor_id"),
+ Memory: utils.Ptr(int64(34)),
+ },
+ Id: utils.Ptr("iid"),
+ Name: utils.Ptr("name"),
+ Replicas: utils.Ptr(int64(56)),
+ Status: utils.Ptr("status"),
+ Storage: &sqlserverflex.Storage{
+ Class: utils.Ptr("class"),
+ Size: utils.Ptr(int64(78)),
+ },
+ Options: &map[string]string{
+ "edition": "edition",
+ "retentionDays": "1",
+ },
+ Version: utils.Ptr("version"),
+ },
+ },
+ &flavorModel{},
+ &storageModel{},
+ &optionsModel{},
+ Model{
+ Id: types.StringValue("pid,iid"),
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ Name: types.StringValue("name"),
+ ACL: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("ip1"),
+ types.StringValue("ip2"),
+ types.StringValue(""),
+ }),
+ BackupSchedule: types.StringValue("schedule"),
+ Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
+ "id": types.StringValue("flavor_id"),
+ "description": types.StringValue("description"),
+ "cpu": types.Int64Value(12),
+ "ram": types.Int64Value(34),
+ }),
+ Replicas: types.Int64Value(56),
+ Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{
+ "class": types.StringValue("class"),
+ "size": types.Int64Value(78),
+ }),
+ Options: types.ObjectValueMust(optionsTypes, map[string]attr.Value{
+ "edition": types.StringValue("edition"),
+ "retention_days": types.Int64Value(1),
+ }),
+ Version: types.StringValue("version"),
+ },
+ true,
+ },
+ {
+ "simple_values_no_flavor_and_storage",
+ Model{
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ },
+ &sqlserverflex.GetInstanceResponse{
+ Item: &sqlserverflex.Instance{
+ Acl: &sqlserverflex.ACL{
+ Items: &[]string{
+ "ip1",
+ "ip2",
+ "",
+ },
+ },
+ BackupSchedule: utils.Ptr("schedule"),
+ Flavor: nil,
+ Id: utils.Ptr("iid"),
+ Name: utils.Ptr("name"),
+ Replicas: utils.Ptr(int64(56)),
+ Status: utils.Ptr("status"),
+ Storage: nil,
+ Options: &map[string]string{
+ "edition": "edition",
+ "retentionDays": "1",
+ },
+ Version: utils.Ptr("version"),
+ },
+ },
+ &flavorModel{
+ CPU: types.Int64Value(12),
+ RAM: types.Int64Value(34),
+ },
+ &storageModel{
+ Class: types.StringValue("class"),
+ Size: types.Int64Value(78),
+ },
+ &optionsModel{
+ Edition: types.StringValue("edition"),
+ RetentionDays: types.Int64Value(1),
+ },
+ Model{
+ Id: types.StringValue("pid,iid"),
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ Name: types.StringValue("name"),
+ ACL: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("ip1"),
+ types.StringValue("ip2"),
+ types.StringValue(""),
+ }),
+ BackupSchedule: types.StringValue("schedule"),
+ Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
+ "id": types.StringNull(),
+ "description": types.StringNull(),
+ "cpu": types.Int64Value(12),
+ "ram": types.Int64Value(34),
+ }),
+ Replicas: types.Int64Value(56),
+ Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{
+ "class": types.StringValue("class"),
+ "size": types.Int64Value(78),
+ }),
+ Options: types.ObjectValueMust(optionsTypes, map[string]attr.Value{
+ "edition": types.StringValue("edition"),
+ "retention_days": types.Int64Value(1),
+ }),
+ Version: types.StringValue("version"),
+ },
+ true,
+ },
+ {
+ "acls_unordered",
+ Model{
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ ACL: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("ip2"),
+ types.StringValue(""),
+ types.StringValue("ip1"),
+ }),
+ },
+ &sqlserverflex.GetInstanceResponse{
+ Item: &sqlserverflex.Instance{
+ Acl: &sqlserverflex.ACL{
+ Items: &[]string{
+ "",
+ "ip1",
+ "ip2",
+ },
+ },
+ BackupSchedule: utils.Ptr("schedule"),
+ Flavor: nil,
+ Id: utils.Ptr("iid"),
+ Name: utils.Ptr("name"),
+ Replicas: utils.Ptr(int64(56)),
+ Status: utils.Ptr("status"),
+ Storage: nil,
+ Options: &map[string]string{
+ "edition": "edition",
+ "retentionDays": "1",
+ },
+ Version: utils.Ptr("version"),
+ },
+ },
+ &flavorModel{
+ CPU: types.Int64Value(12),
+ RAM: types.Int64Value(34),
+ },
+ &storageModel{
+ Class: types.StringValue("class"),
+ Size: types.Int64Value(78),
+ },
+ &optionsModel{},
+ Model{
+ Id: types.StringValue("pid,iid"),
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ Name: types.StringValue("name"),
+ ACL: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("ip2"),
+ types.StringValue(""),
+ types.StringValue("ip1"),
+ }),
+ BackupSchedule: types.StringValue("schedule"),
+ Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{
+ "id": types.StringNull(),
+ "description": types.StringNull(),
+ "cpu": types.Int64Value(12),
+ "ram": types.Int64Value(34),
+ }),
+ Replicas: types.Int64Value(56),
+ Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{
+ "class": types.StringValue("class"),
+ "size": types.Int64Value(78),
+ }),
+ Options: types.ObjectValueMust(optionsTypes, map[string]attr.Value{
+ "edition": types.StringValue("edition"),
+ "retention_days": types.Int64Value(1),
+ }),
+ Version: types.StringValue("version"),
+ },
+ true,
+ },
+ {
+ "nil_response",
+ Model{
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ },
+ nil,
+ &flavorModel{},
+ &storageModel{},
+ &optionsModel{},
+ Model{},
+ false,
+ },
+ {
+ "no_resource_id",
+ Model{
+ InstanceId: types.StringValue("iid"),
+ ProjectId: types.StringValue("pid"),
+ },
+ &sqlserverflex.GetInstanceResponse{},
+ &flavorModel{},
+ &storageModel{},
+ &optionsModel{},
+ Model{},
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ err := mapFields(context.Background(), tt.input, &tt.state, tt.flavor, tt.storage, tt.options)
+ if !tt.isValid && err == nil {
+ t.Fatalf("Should have failed")
+ }
+ if tt.isValid && err != nil {
+ t.Fatalf("Should not have failed: %v", err)
+ }
+ if tt.isValid {
+ diff := cmp.Diff(tt.state, tt.expected)
+ if diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
+
+func TestToCreatePayload(t *testing.T) {
+ tests := []struct {
+ description string
+ input *Model
+ inputAcl []string
+ inputFlavor *flavorModel
+ inputStorage *storageModel
+ inputOptions *optionsModel
+ expected *sqlserverflex.CreateInstancePayload
+ isValid bool
+ }{
+ {
+ "default_values",
+ &Model{},
+ []string{},
+ &flavorModel{},
+ &storageModel{},
+ &optionsModel{},
+ &sqlserverflex.CreateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{},
+ },
+ Storage: &sqlserverflex.CreateInstancePayloadStorage{},
+ Options: &sqlserverflex.CreateInstancePayloadOptions{},
+ },
+ true,
+ },
+ {
+ "simple_values",
+ &Model{
+ BackupSchedule: types.StringValue("schedule"),
+ Name: types.StringValue("name"),
+ Replicas: types.Int64Value(12),
+ Version: types.StringValue("version"),
+ },
+ []string{
+ "ip_1",
+ "ip_2",
+ },
+ &flavorModel{
+ Id: types.StringValue("flavor_id"),
+ },
+ &storageModel{
+ Class: types.StringValue("class"),
+ Size: types.Int64Value(34),
+ },
+ &optionsModel{
+ Edition: types.StringValue("edition"),
+ RetentionDays: types.Int64Value(1),
+ },
+ &sqlserverflex.CreateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{
+ "ip_1",
+ "ip_2",
+ },
+ },
+ BackupSchedule: utils.Ptr("schedule"),
+ FlavorId: utils.Ptr("flavor_id"),
+ Name: utils.Ptr("name"),
+ Storage: &sqlserverflex.CreateInstancePayloadStorage{
+ Class: utils.Ptr("class"),
+ Size: utils.Ptr(int64(34)),
+ },
+ Options: &sqlserverflex.CreateInstancePayloadOptions{
+ Edition: utils.Ptr("edition"),
+ RetentionDays: utils.Ptr("1"),
+ },
+ Version: utils.Ptr("version"),
+ },
+ true,
+ },
+ {
+ "null_fields_and_int_conversions",
+ &Model{
+ BackupSchedule: types.StringNull(),
+ Name: types.StringNull(),
+ Replicas: types.Int64Value(2123456789),
+ Version: types.StringNull(),
+ },
+ []string{
+ "",
+ },
+ &flavorModel{
+ Id: types.StringNull(),
+ },
+ &storageModel{
+ Class: types.StringNull(),
+ Size: types.Int64Null(),
+ },
+ &optionsModel{
+ Edition: types.StringNull(),
+ RetentionDays: types.Int64Null(),
+ },
+ &sqlserverflex.CreateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{
+ "",
+ },
+ },
+ BackupSchedule: nil,
+ FlavorId: nil,
+ Name: nil,
+ Storage: &sqlserverflex.CreateInstancePayloadStorage{
+ Class: nil,
+ Size: nil,
+ },
+ Options: &sqlserverflex.CreateInstancePayloadOptions{},
+ Version: nil,
+ },
+ true,
+ },
+ {
+ "nil_model",
+ nil,
+ []string{},
+ &flavorModel{},
+ &storageModel{},
+ &optionsModel{},
+ nil,
+ false,
+ },
+ {
+ "nil_acl",
+ &Model{},
+ nil,
+ &flavorModel{},
+ &storageModel{},
+ &optionsModel{},
+ &sqlserverflex.CreateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{},
+ Storage: &sqlserverflex.CreateInstancePayloadStorage{},
+ Options: &sqlserverflex.CreateInstancePayloadOptions{},
+ },
+ true,
+ },
+ {
+ "nil_flavor",
+ &Model{},
+ []string{},
+ nil,
+ &storageModel{},
+ &optionsModel{},
+ nil,
+ false,
+ },
+ {
+ "nil_storage",
+ &Model{},
+ []string{},
+ &flavorModel{},
+ nil,
+ &optionsModel{},
+ &sqlserverflex.CreateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{},
+ },
+ Storage: &sqlserverflex.CreateInstancePayloadStorage{},
+ Options: &sqlserverflex.CreateInstancePayloadOptions{},
+ },
+ true,
+ },
+ {
+ "nil_options",
+ &Model{},
+ []string{},
+ &flavorModel{},
+ &storageModel{},
+ nil,
+ &sqlserverflex.CreateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{},
+ },
+ Storage: &sqlserverflex.CreateInstancePayloadStorage{},
+ Options: &sqlserverflex.CreateInstancePayloadOptions{},
+ },
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ output, err := toCreatePayload(tt.input, tt.inputAcl, tt.inputFlavor, tt.inputStorage, tt.inputOptions)
+ if !tt.isValid && err == nil {
+ t.Fatalf("Should have failed")
+ }
+ if tt.isValid && err != nil {
+ t.Fatalf("Should not have failed: %v", err)
+ }
+ if tt.isValid {
+ diff := cmp.Diff(output, tt.expected)
+ if diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
+
+func TestToUpdatePayload(t *testing.T) {
+ tests := []struct {
+ description string
+ input *Model
+ inputAcl []string
+ inputFlavor *flavorModel
+ expected *sqlserverflex.PartialUpdateInstancePayload
+ isValid bool
+ }{
+ {
+ "default_values",
+ &Model{},
+ []string{},
+ &flavorModel{},
+ &sqlserverflex.PartialUpdateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{},
+ },
+ },
+ true,
+ },
+ {
+ "simple_values",
+ &Model{
+ BackupSchedule: types.StringValue("schedule"),
+ Name: types.StringValue("name"),
+ Replicas: types.Int64Value(12),
+ Version: types.StringValue("version"),
+ },
+ []string{
+ "ip_1",
+ "ip_2",
+ },
+ &flavorModel{
+ Id: types.StringValue("flavor_id"),
+ },
+ &sqlserverflex.PartialUpdateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{
+ "ip_1",
+ "ip_2",
+ },
+ },
+ BackupSchedule: utils.Ptr("schedule"),
+ FlavorId: utils.Ptr("flavor_id"),
+ Name: utils.Ptr("name"),
+ Version: utils.Ptr("version"),
+ },
+ true,
+ },
+ {
+ "null_fields_and_int_conversions",
+ &Model{
+ BackupSchedule: types.StringNull(),
+ Name: types.StringNull(),
+ Replicas: types.Int64Value(2123456789),
+ Version: types.StringNull(),
+ },
+ []string{
+ "",
+ },
+ &flavorModel{
+ Id: types.StringNull(),
+ },
+ &sqlserverflex.PartialUpdateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{
+ Items: &[]string{
+ "",
+ },
+ },
+ BackupSchedule: nil,
+ FlavorId: nil,
+ Name: nil,
+ Version: nil,
+ },
+ true,
+ },
+ {
+ "nil_model",
+ nil,
+ []string{},
+ &flavorModel{},
+ nil,
+ false,
+ },
+ {
+ "nil_acl",
+ &Model{},
+ nil,
+ &flavorModel{},
+ &sqlserverflex.PartialUpdateInstancePayload{
+ Acl: &sqlserverflex.CreateInstancePayloadAcl{},
+ },
+ true,
+ },
+ {
+ "nil_flavor",
+ &Model{},
+ []string{},
+ nil,
+ nil,
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ output, err := toUpdatePayload(tt.input, tt.inputAcl, tt.inputFlavor)
+ if !tt.isValid && err == nil {
+ t.Fatalf("Should have failed")
+ }
+ if tt.isValid && err != nil {
+ t.Fatalf("Should not have failed: %v", err)
+ }
+ if tt.isValid {
+ diff := cmp.Diff(output, tt.expected)
+ if diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
+
+func TestLoadFlavorId(t *testing.T) {
+ tests := []struct {
+ description string
+ inputFlavor *flavorModel
+ mockedResp *sqlserverflex.ListFlavorsResponse
+ expected *flavorModel
+ getFlavorsFails bool
+ isValid bool
+ }{
+ {
+ "ok_flavor",
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ &sqlserverflex.ListFlavorsResponse{
+ Flavors: &[]sqlserverflex.InstanceFlavorEntry{
+ {
+ Id: utils.Ptr("fid-1"),
+ Cpu: utils.Ptr(int64(2)),
+ Description: utils.Ptr("description"),
+ Memory: utils.Ptr(int64(8)),
+ },
+ },
+ },
+ &flavorModel{
+ Id: types.StringValue("fid-1"),
+ Description: types.StringValue("description"),
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ false,
+ true,
+ },
+ {
+ "ok_flavor_2",
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ &sqlserverflex.ListFlavorsResponse{
+ Flavors: &[]sqlserverflex.InstanceFlavorEntry{
+ {
+ Id: utils.Ptr("fid-1"),
+ Cpu: utils.Ptr(int64(2)),
+ Description: utils.Ptr("description"),
+ Memory: utils.Ptr(int64(8)),
+ },
+ {
+ Id: utils.Ptr("fid-2"),
+ Cpu: utils.Ptr(int64(1)),
+ Description: utils.Ptr("description"),
+ Memory: utils.Ptr(int64(4)),
+ },
+ },
+ },
+ &flavorModel{
+ Id: types.StringValue("fid-1"),
+ Description: types.StringValue("description"),
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ false,
+ true,
+ },
+ {
+ "no_matching_flavor",
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ &sqlserverflex.ListFlavorsResponse{
+ Flavors: &[]sqlserverflex.InstanceFlavorEntry{
+ {
+ Id: utils.Ptr("fid-1"),
+ Cpu: utils.Ptr(int64(1)),
+ Description: utils.Ptr("description"),
+ Memory: utils.Ptr(int64(8)),
+ },
+ {
+ Id: utils.Ptr("fid-2"),
+ Cpu: utils.Ptr(int64(1)),
+ Description: utils.Ptr("description"),
+ Memory: utils.Ptr(int64(4)),
+ },
+ },
+ },
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ false,
+ false,
+ },
+ {
+ "nil_response",
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ &sqlserverflex.ListFlavorsResponse{},
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ false,
+ false,
+ },
+ {
+ "error_response",
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ &sqlserverflex.ListFlavorsResponse{},
+ &flavorModel{
+ CPU: types.Int64Value(2),
+ RAM: types.Int64Value(8),
+ },
+ true,
+ false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ client := &sqlserverflexClientMocked{
+ returnError: tt.getFlavorsFails,
+ listFlavorsResp: tt.mockedResp,
+ }
+ model := &Model{
+ ProjectId: types.StringValue("pid"),
+ }
+ flavorModel := &flavorModel{
+ CPU: tt.inputFlavor.CPU,
+ RAM: tt.inputFlavor.RAM,
+ }
+ err := loadFlavorId(context.Background(), client, model, flavorModel)
+ if !tt.isValid && err == nil {
+ t.Fatalf("Should have failed")
+ }
+ if tt.isValid && err != nil {
+ t.Fatalf("Should not have failed: %v", err)
+ }
+ if tt.isValid {
+ diff := cmp.Diff(flavorModel, tt.expected)
+ if diff != "" {
+ t.Fatalf("Data does not match: %s", diff)
+ }
+ }
+ })
+ }
+}
diff --git a/stackit/internal/services/sqlserverflex/sqlserverflex_acc_test.go b/stackit/internal/services/sqlserverflex/sqlserverflex_acc_test.go
new file mode 100644
index 00000000..f6ce798b
--- /dev/null
+++ b/stackit/internal/services/sqlserverflex/sqlserverflex_acc_test.go
@@ -0,0 +1,246 @@
+package sqlserverflex_test
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+ "github.com/hashicorp/terraform-plugin-testing/terraform"
+ "github.com/stackitcloud/stackit-sdk-go/core/utils"
+
+ "github.com/stackitcloud/stackit-sdk-go/core/config"
+ "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex"
+ "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/wait"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
+ "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/testutil"
+)
+
+// Instance resource data
+var instanceResource = map[string]string{
+ "project_id": testutil.ProjectId,
+ "name": fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum)),
+ "acl": "192.168.0.0/16",
+ "flavor_cpu": "4",
+ "flavor_ram": "16",
+ "flavor_description": "SQLServer-Flex-4.16-Standard-EU01",
+ "storage_class": "premium-perf2-stackit",
+ "storage_size": "40",
+ "version": "2022",
+ "replicas": "1",
+ "options_edition": "developer",
+ "options_retention_days": "64",
+ "flavor_id": "4.16-Single",
+ "backup_schedule": "00 6 * * *",
+ "backup_schedule_updated": "00 12 * * *",
+}
+
+func configResources(backupSchedule string) string {
+ return fmt.Sprintf(`
+ %s
+
+ resource "stackit_sqlserverflex_instance" "instance" {
+ project_id = "%s"
+ name = "%s"
+ acl = ["%s"]
+ flavor = {
+ cpu = %s
+ ram = %s
+ }
+ storage = {
+ class = "%s"
+ size = %s
+ }
+ version = "%s"
+ options = {
+ edition = "%s"
+ retention_days = %s
+ }
+ backup_schedule = "%s"
+ }
+ `,
+ testutil.SQLServerFlexProviderConfig(),
+ instanceResource["project_id"],
+ instanceResource["name"],
+ instanceResource["acl"],
+ instanceResource["flavor_cpu"],
+ instanceResource["flavor_ram"],
+ instanceResource["storage_class"],
+ instanceResource["storage_size"],
+ instanceResource["version"],
+ instanceResource["options_edition"],
+ instanceResource["options_retention_days"],
+ backupSchedule,
+ )
+}
+
+func TestAccSQLServerFlexResource(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
+ CheckDestroy: testAccChecksqlserverflexDestroy,
+ Steps: []resource.TestStep{
+ // Creation
+ {
+ Config: configResources(instanceResource["backup_schedule"]),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ // Instance
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "project_id", instanceResource["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sqlserverflex_instance.instance", "instance_id"),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "name", instanceResource["name"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "acl.#", "1"),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "acl.0", instanceResource["acl"]),
+ resource.TestCheckResourceAttrSet("stackit_sqlserverflex_instance.instance", "flavor.id"),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "flavor.description", instanceResource["flavor_description"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "replicas", instanceResource["replicas"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "storage.class", instanceResource["storage_class"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "storage.size", instanceResource["storage_size"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "version", instanceResource["version"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "options.edition", instanceResource["options_edition"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "options.retention_days", instanceResource["options_retention_days"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "backup_schedule", instanceResource["backup_schedule"]),
+ ),
+ },
+ // data source
+ {
+ Config: fmt.Sprintf(`
+ %s
+
+ data "stackit_sqlserverflex_instance" "instance" {
+ project_id = stackit_sqlserverflex_instance.instance.project_id
+ instance_id = stackit_sqlserverflex_instance.instance.instance_id
+ }
+ `,
+ configResources(instanceResource["backup_schedule"]),
+ ),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ // Instance data
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "project_id", instanceResource["project_id"]),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "name", instanceResource["name"]),
+ resource.TestCheckResourceAttrPair(
+ "data.stackit_sqlserverflex_instance.instance", "project_id",
+ "stackit_sqlserverflex_instance.instance", "project_id",
+ ),
+ resource.TestCheckResourceAttrPair(
+ "data.stackit_sqlserverflex_instance.instance", "instance_id",
+ "stackit_sqlserverflex_instance.instance", "instance_id",
+ ),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "acl.#", "1"),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "acl.0", instanceResource["acl"]),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "flavor.id", instanceResource["flavor_id"]),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "flavor.description", instanceResource["flavor_description"]),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "replicas", instanceResource["replicas"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "options.edition", instanceResource["options_edition"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "options.retention_days", instanceResource["options_retention_days"]),
+ resource.TestCheckResourceAttr("data.stackit_sqlserverflex_instance.instance", "backup_schedule", instanceResource["backup_schedule"]),
+ ),
+ },
+ // Import
+ {
+ ResourceName: "stackit_sqlserverflex_instance.instance",
+ ImportStateIdFunc: func(s *terraform.State) (string, error) {
+ r, ok := s.RootModule().Resources["stackit_sqlserverflex_instance.instance"]
+ if !ok {
+ return "", fmt.Errorf("couldn't find resource stackit_sqlserverflex_instance.instance")
+ }
+ instanceId, ok := r.Primary.Attributes["instance_id"]
+ if !ok {
+ return "", fmt.Errorf("couldn't find attribute instance_id")
+ }
+
+ return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil
+ },
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"backup_schedule"},
+ ImportStateCheck: func(s []*terraform.InstanceState) error {
+ if len(s) != 1 {
+ return fmt.Errorf("expected 1 state, got %d", len(s))
+ }
+ if s[0].Attributes["backup_schedule"] != instanceResource["backup_schedule"] {
+ return fmt.Errorf("expected backup_schedule %s, got %s", instanceResource["backup_schedule"], s[0].Attributes["backup_schedule"])
+ }
+ return nil
+ },
+ },
+ // Update
+ {
+ Config: configResources(instanceResource["backup_schedule_updated"]),
+ Check: resource.ComposeAggregateTestCheckFunc(
+ // Instance data
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "project_id", instanceResource["project_id"]),
+ resource.TestCheckResourceAttrSet("stackit_sqlserverflex_instance.instance", "instance_id"),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "name", instanceResource["name"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "acl.#", "1"),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "acl.0", instanceResource["acl"]),
+ resource.TestCheckResourceAttrSet("stackit_sqlserverflex_instance.instance", "flavor.id"),
+ resource.TestCheckResourceAttrSet("stackit_sqlserverflex_instance.instance", "flavor.description"),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "replicas", instanceResource["replicas"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "storage.class", instanceResource["storage_class"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "storage.size", instanceResource["storage_size"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "version", instanceResource["version"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "options.edition", instanceResource["options_edition"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "options.retention_days", instanceResource["options_retention_days"]),
+ resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "backup_schedule", instanceResource["backup_schedule_updated"]),
+ ),
+ },
+ // Deletion is done by the framework implicitly
+ },
+ })
+}
+
+func testAccChecksqlserverflexDestroy(s *terraform.State) error {
+ ctx := context.Background()
+ var client *sqlserverflex.APIClient
+ var err error
+ if testutil.SQLServerFlexCustomEndpoint == "" {
+ client, err = sqlserverflex.NewAPIClient()
+ } else {
+ client, err = sqlserverflex.NewAPIClient(
+ config.WithEndpoint(testutil.SQLServerFlexCustomEndpoint),
+ )
+ }
+ if err != nil {
+ return fmt.Errorf("creating client: %w", err)
+ }
+
+ instancesToDestroy := []string{}
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "stackit_sqlserverflex_instance" {
+ continue
+ }
+ // instance terraform ID: = "[project_id],[instance_id]"
+ instanceId := strings.Split(rs.Primary.ID, core.Separator)[1]
+ instancesToDestroy = append(instancesToDestroy, instanceId)
+ }
+
+ instancesResp, err := client.ListInstances(ctx, testutil.ProjectId).Execute()
+ if err != nil {
+ return fmt.Errorf("getting instancesResp: %w", err)
+ }
+
+ items := *instancesResp.Items
+ for i := range items {
+ if items[i].Id == nil {
+ continue
+ }
+ if utils.Contains(instancesToDestroy, *items[i].Id) {
+ err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *items[i].Id)
+ if err != nil {
+ return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *items[i].Id, err)
+ }
+ _, err = wait.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *items[i].Id).WaitWithContext(ctx)
+ if err != nil {
+ return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *items[i].Id, err)
+ }
+ }
+ }
+ return nil
+}
diff --git a/stackit/internal/testutil/testutil.go b/stackit/internal/testutil/testutil.go
index 7add8b97..d7d93380 100644
--- a/stackit/internal/testutil/testutil.go
+++ b/stackit/internal/testutil/testutil.go
@@ -52,6 +52,7 @@ var (
RedisCustomEndpoint = os.Getenv("TF_ACC_REDIS_CUSTOM_ENDPOINT")
ResourceManagerCustomEndpoint = os.Getenv("TF_ACC_RESOURCEMANAGER_CUSTOM_ENDPOINT")
SecretsManagerCustomEndpoint = os.Getenv("TF_ACC_SECRETSMANAGER_CUSTOM_ENDPOINT")
+ SQLServerFlexCustomEndpoint = os.Getenv("TF_ACC_SQLSERVERFLEX_CUSTOM_ENDPOINT")
SKECustomEndpoint = os.Getenv("TF_ACC_SKE_CUSTOM_ENDPOINT")
// OpenStack user domain name
@@ -294,6 +295,21 @@ func SecretsManagerProviderConfig() string {
)
}
+func SQLServerFlexProviderConfig() string {
+ if MongoDBFlexCustomEndpoint == "" {
+ return `
+ provider "stackit" {
+ region = "eu01"
+ }`
+ }
+ return fmt.Sprintf(`
+ provider "stackit" {
+ sqlserverflex_custom_endpoint = "%s"
+ }`,
+ SQLServerFlexCustomEndpoint,
+ )
+}
+
func SKEProviderConfig() string {
if SKECustomEndpoint == "" {
return `
diff --git a/stackit/internal/utils/utils.go b/stackit/internal/utils/utils.go
index bf200258..38664b67 100644
--- a/stackit/internal/utils/utils.go
+++ b/stackit/internal/utils/utils.go
@@ -2,6 +2,8 @@ package utils
import (
"fmt"
+ "regexp"
+ "strings"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
@@ -60,3 +62,17 @@ func ListValuetoStringSlice(list basetypes.ListValue) ([]string, error) {
return result, nil
}
+
+// Remove leading 0s from backup schedule numbers (e.g. "00 00 * * *" becomes "0 0 * * *")
+// Needed as the API does it internally and would otherwise cause inconsistent result in Terraform
+func SimplifyBackupSchedule(schedule string) string {
+ regex := regexp.MustCompile(`0+\d+`) // Matches series of one or more zeros followed by a series of one or more digits
+ simplifiedSchedule := regex.ReplaceAllStringFunc(schedule, func(match string) string {
+ simplified := strings.TrimLeft(match, "0")
+ if simplified == "" {
+ simplified = "0"
+ }
+ return simplified
+ })
+ return simplifiedSchedule
+}
diff --git a/stackit/internal/utils/utils_test.go b/stackit/internal/utils/utils_test.go
index 206647b3..a62e9651 100644
--- a/stackit/internal/utils/utils_test.go
+++ b/stackit/internal/utils/utils_test.go
@@ -120,3 +120,75 @@ func TestListValuetoStrSlice(t *testing.T) {
})
}
}
+
+func TestSimplifyBackupSchedule(t *testing.T) {
+ tests := []struct {
+ description string
+ input string
+ expected string
+ }{
+ {
+ "simple schedule",
+ "0 0 * * *",
+ "0 0 * * *",
+ },
+ {
+ "schedule with leading zeros",
+ "00 00 * * *",
+ "0 0 * * *",
+ },
+ {
+ "schedule with leading zeros 2",
+ "00 001 * * *",
+ "0 1 * * *",
+ },
+ {
+ "schedule with leading zeros 3",
+ "00 0010 * * *",
+ "0 10 * * *",
+ },
+ {
+ "simple schedule with slash",
+ "0 0/6 * * *",
+ "0 0/6 * * *",
+ },
+ {
+ "schedule with leading zeros and slash",
+ "00 00/6 * * *",
+ "0 0/6 * * *",
+ },
+ {
+ "schedule with leading zeros and slash 2",
+ "00 001/06 * * *",
+ "0 1/6 * * *",
+ },
+ {
+ "simple schedule with comma",
+ "0 10,15 * * *",
+ "0 10,15 * * *",
+ },
+ {
+ "schedule with leading zeros and comma",
+ "0 010,0015 * * *",
+ "0 10,15 * * *",
+ },
+ {
+ "simple schedule with comma and slash",
+ "0 0-11/10 * * *",
+ "0 0-11/10 * * *",
+ },
+ {
+ "schedule with leading zeros, comma, and slash",
+ "00 000-011/010 * * *",
+ "0 0-11/10 * * *",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ output := SimplifyBackupSchedule(tt.input)
+ if output != tt.expected {
+ t.Fatalf("Data does not match: %s", output)
+ }
+ })
+ }
+}
diff --git a/stackit/provider.go b/stackit/provider.go
index b684b4f5..b3513c19 100644
--- a/stackit/provider.go
+++ b/stackit/provider.go
@@ -43,6 +43,7 @@ import (
skeCluster "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/cluster"
skeKubeconfig "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/kubeconfig"
skeProject "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/project"
+ sqlServerFlexInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sqlserverflex/instance"
sdkauth "github.com/stackitcloud/stackit-sdk-go/core/auth"
"github.com/stackitcloud/stackit-sdk-go/core/config"
@@ -83,6 +84,7 @@ type providerModel struct {
PrivateKeyPath types.String `tfsdk:"private_key_path"`
Token types.String `tfsdk:"service_account_token"`
Region types.String `tfsdk:"region"`
+ ArgusCustomEndpoint types.String `tfsdk:"argus_custom_endpoint"`
DNSCustomEndpoint types.String `tfsdk:"dns_custom_endpoint"`
IaaSCustomEndpoint types.String `tfsdk:"iaas_custom_endpoint"`
PostgreSQLCustomEndpoint types.String `tfsdk:"postgresql_custom_endpoint"`
@@ -96,7 +98,7 @@ type providerModel struct {
OpenSearchCustomEndpoint types.String `tfsdk:"opensearch_custom_endpoint"`
RedisCustomEndpoint types.String `tfsdk:"redis_custom_endpoint"`
SecretsManagerCustomEndpoint types.String `tfsdk:"secretsmanager_custom_endpoint"`
- ArgusCustomEndpoint types.String `tfsdk:"argus_custom_endpoint"`
+ SQLServerFlexCustomEndpoint types.String `tfsdk:"sqlserverflex_custom_endpoint"`
SKECustomEndpoint types.String `tfsdk:"ske_custom_endpoint"`
ResourceManagerCustomEndpoint types.String `tfsdk:"resourcemanager_custom_endpoint"`
TokenCustomEndpoint types.String `tfsdk:"token_custom_endpoint"`
@@ -127,9 +129,10 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
"postgresql_custom_endpoint": "Custom endpoint for the PostgreSQL service",
"postgresflex_custom_endpoint": "Custom endpoint for the PostgresFlex service",
"redis_custom_endpoint": "Custom endpoint for the Redis service",
- "ske_custom_endpoint": "Custom endpoint for the Kubernetes Engine (SKE) service",
"resourcemanager_custom_endpoint": "Custom endpoint for the Resource Manager service",
"secretsmanager_custom_endpoint": "Custom endpoint for the Secrets Manager service",
+ "sqlserverflex_custom_endpoint": "Custom endpoint for the SQL Server Flex service",
+ "ske_custom_endpoint": "Custom endpoint for the Kubernetes Engine (SKE) service",
"token_custom_endpoint": "Custom endpoint for the token API, which is used to request access tokens when using the key flow",
"jwks_custom_endpoint": "Custom endpoint for the jwks API, which is used to get the json web key sets (jwks) to validate tokens when using the key flow",
}
@@ -168,6 +171,10 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
Optional: true,
Description: descriptions["region"],
},
+ "argus_custom_endpoint": schema.StringAttribute{
+ Optional: true,
+ Description: descriptions["argus_custom_endpoint"],
+ },
"dns_custom_endpoint": schema.StringAttribute{
Optional: true,
Description: descriptions["dns_custom_endpoint"],
@@ -216,22 +223,22 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
Optional: true,
Description: descriptions["redis_custom_endpoint"],
},
+ "resourcemanager_custom_endpoint": schema.StringAttribute{
+ Optional: true,
+ Description: descriptions["resourcemanager_custom_endpoint"],
+ },
"secretsmanager_custom_endpoint": schema.StringAttribute{
Optional: true,
Description: descriptions["secretsmanager_custom_endpoint"],
},
- "argus_custom_endpoint": schema.StringAttribute{
+ "sqlserverflex_custom_endpoint": schema.StringAttribute{
Optional: true,
- Description: descriptions["argus_custom_endpoint"],
+ Description: descriptions["sqlserverflex_custom_endpoint"],
},
"ske_custom_endpoint": schema.StringAttribute{
Optional: true,
Description: descriptions["ske_custom_endpoint"],
},
- "resourcemanager_custom_endpoint": schema.StringAttribute{
- Optional: true,
- Description: descriptions["resourcemanager_custom_endpoint"],
- },
"token_custom_endpoint": schema.StringAttribute{
Optional: true,
Description: descriptions["token_custom_endpoint"],
@@ -283,6 +290,9 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest,
if !(providerConfig.Region.IsUnknown() || providerConfig.Region.IsNull()) {
providerData.Region = providerConfig.Region.ValueString()
}
+ if !(providerConfig.ArgusCustomEndpoint.IsUnknown() || providerConfig.ArgusCustomEndpoint.IsNull()) {
+ providerData.ArgusCustomEndpoint = providerConfig.ArgusCustomEndpoint.ValueString()
+ }
if !(providerConfig.DNSCustomEndpoint.IsUnknown() || providerConfig.DNSCustomEndpoint.IsNull()) {
providerData.DnsCustomEndpoint = providerConfig.DNSCustomEndpoint.ValueString()
}
@@ -319,18 +329,18 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest,
if !(providerConfig.RedisCustomEndpoint.IsUnknown() || providerConfig.RedisCustomEndpoint.IsNull()) {
providerData.RedisCustomEndpoint = providerConfig.RedisCustomEndpoint.ValueString()
}
- if !(providerConfig.ArgusCustomEndpoint.IsUnknown() || providerConfig.ArgusCustomEndpoint.IsNull()) {
- providerData.ArgusCustomEndpoint = providerConfig.ArgusCustomEndpoint.ValueString()
- }
- if !(providerConfig.SKECustomEndpoint.IsUnknown() || providerConfig.SKECustomEndpoint.IsNull()) {
- providerData.SKECustomEndpoint = providerConfig.SKECustomEndpoint.ValueString()
- }
if !(providerConfig.ResourceManagerCustomEndpoint.IsUnknown() || providerConfig.ResourceManagerCustomEndpoint.IsNull()) {
providerData.ResourceManagerCustomEndpoint = providerConfig.ResourceManagerCustomEndpoint.ValueString()
}
if !(providerConfig.SecretsManagerCustomEndpoint.IsUnknown() || providerConfig.SecretsManagerCustomEndpoint.IsNull()) {
providerData.SecretsManagerCustomEndpoint = providerConfig.SecretsManagerCustomEndpoint.ValueString()
}
+ if !(providerConfig.SQLServerFlexCustomEndpoint.IsUnknown() || providerConfig.SQLServerFlexCustomEndpoint.IsNull()) {
+ providerData.SQLServerFlexCustomEndpoint = providerConfig.SQLServerFlexCustomEndpoint.ValueString()
+ }
+ if !(providerConfig.SKECustomEndpoint.IsUnknown() || providerConfig.SKECustomEndpoint.IsNull()) {
+ providerData.SKECustomEndpoint = providerConfig.SKECustomEndpoint.ValueString()
+ }
if !(providerConfig.TokenCustomEndpoint.IsUnknown() || providerConfig.TokenCustomEndpoint.IsNull()) {
sdkConfig.TokenCustomUrl = providerConfig.TokenCustomEndpoint.ValueString()
}
@@ -378,6 +388,7 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource
resourceManagerProject.NewProjectDataSource,
secretsManagerInstance.NewInstanceDataSource,
secretsManagerUser.NewUserDataSource,
+ sqlServerFlexInstance.NewInstanceDataSource,
skeProject.NewProjectDataSource,
skeCluster.NewClusterDataSource,
}
@@ -417,6 +428,7 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource {
resourceManagerProject.NewProjectResource,
secretsManagerInstance.NewInstanceResource,
secretsManagerUser.NewUserResource,
+ sqlServerFlexInstance.NewInstanceResource,
skeProject.NewProjectResource,
skeCluster.NewClusterResource,
skeKubeconfig.NewKubeconfigResource,