From a203e6ff935bd89135b1a27154c3f54f8c7f4c9f Mon Sep 17 00:00:00 2001 From: "Marcel S. Henselin" Date: Fri, 6 Mar 2026 17:16:13 +0100 Subject: [PATCH] chore: work save --- go.mod | 1 + go.sum | 2 + .../sqlserverflexalpha/database/datasource.go | 174 ++ .../sqlserverflexalpha/database/mapper.go | 104 + .../database/mapper_test.go | 232 ++ .../database/planModifiers.yaml | 51 + .../sqlserverflexalpha/database/resource.go | 539 +++++ .../sqlserverflexalpha/flavor/datasource.go | 355 +++ .../datasources_gen/flavor_data_source_gen.go | 1909 +++++++++++++++++ .../sqlserverflexalpha/flavor/functions.go | 65 + .../flavor/functions_test.go | 135 ++ .../sqlserverflexalpha/flavors/datasource.go | 156 ++ .../sqlserverflexalpha/instance/datasource.go | 146 ++ .../sqlserverflexalpha/instance/functions.go | 283 +++ .../instance/planModifiers.yaml | 124 ++ .../sqlserverflexalpha/instance/resource.go | 554 +++++ .../sqlserverflex_acc_test.go | 433 ++++ .../testdata/instance_template.gompl | 60 + .../sqlserverflexalpha/user/datasource.go | 139 ++ .../sqlserverflexalpha/user/mapper.go | 193 ++ .../sqlserverflexalpha/user/mapper_test.go | 533 +++++ .../user/planModifiers.yaml | 49 + .../sqlserverflexalpha/user/resource.go | 553 +++++ .../services/sqlserverflexalpha/utils/util.go | 48 + .../sqlserverflexalpha/utils/util_test.go | 98 + .../version_data_source_gen.go | 569 +++++ .../sqlserverflexbeta/database/datasource.go | 174 ++ .../sqlserverflexbeta/database/mapper.go | 104 + .../sqlserverflexbeta/database/mapper_test.go | 232 ++ .../database/planModifiers.yaml | 56 + .../sqlserverflexbeta/database/resource.go | 558 +++++ .../sqlserverflexbeta/flavor/datasource.go | 355 +++ .../datasources_gen/flavor_data_source_gen.go | 1909 +++++++++++++++++ .../sqlserverflexbeta/flavor/functions.go | 65 + .../flavor/functions_test.go | 135 ++ .../sqlserverflexbeta/flavors/datasource.go | 156 ++ .../sqlserverflexbeta/instance/datasource.go | 146 ++ .../sqlserverflexbeta/instance/functions.go | 270 +++ .../instance/functions_test.go | 283 +++ .../instance/planModifiers.yaml | 124 ++ .../sqlserverflexbeta/instance/resource.go | 546 +++++ .../sqlserverflex_acc_test.go | 448 ++++ .../testdata/instance_template.gompl | 60 + .../sqlserverflexbeta/user/datasource.go | 139 ++ .../services/sqlserverflexbeta/user/mapper.go | 192 ++ .../sqlserverflexbeta/user/mapper_test.go | 527 +++++ .../sqlserverflexbeta/user/planModifiers.yaml | 53 + .../sqlserverflexbeta/user/resource.go | 578 +++++ .../services/sqlserverflexbeta/utils/util.go | 48 + .../sqlserverflexbeta/utils/util_test.go | 98 + stackit/provider.go | 23 +- 51 files changed, 14771 insertions(+), 13 deletions(-) create mode 100644 stackit/internal/services/sqlserverflexalpha/database/datasource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/database/mapper.go create mode 100644 stackit/internal/services/sqlserverflexalpha/database/mapper_test.go create mode 100644 stackit/internal/services/sqlserverflexalpha/database/planModifiers.yaml create mode 100644 stackit/internal/services/sqlserverflexalpha/database/resource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/flavor/datasource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/flavor/datasources_gen/flavor_data_source_gen.go create mode 100644 stackit/internal/services/sqlserverflexalpha/flavor/functions.go create mode 100644 stackit/internal/services/sqlserverflexalpha/flavor/functions_test.go create mode 100644 stackit/internal/services/sqlserverflexalpha/flavors/datasource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/instance/datasource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/instance/functions.go create mode 100644 stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml create mode 100644 stackit/internal/services/sqlserverflexalpha/instance/resource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go create mode 100644 stackit/internal/services/sqlserverflexalpha/testdata/instance_template.gompl create mode 100644 stackit/internal/services/sqlserverflexalpha/user/datasource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/user/mapper.go create mode 100644 stackit/internal/services/sqlserverflexalpha/user/mapper_test.go create mode 100644 stackit/internal/services/sqlserverflexalpha/user/planModifiers.yaml create mode 100644 stackit/internal/services/sqlserverflexalpha/user/resource.go create mode 100644 stackit/internal/services/sqlserverflexalpha/utils/util.go create mode 100644 stackit/internal/services/sqlserverflexalpha/utils/util_test.go create mode 100644 stackit/internal/services/sqlserverflexalpha/versions/datasources_gen/version_data_source_gen.go create mode 100644 stackit/internal/services/sqlserverflexbeta/database/datasource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/database/mapper.go create mode 100644 stackit/internal/services/sqlserverflexbeta/database/mapper_test.go create mode 100644 stackit/internal/services/sqlserverflexbeta/database/planModifiers.yaml create mode 100644 stackit/internal/services/sqlserverflexbeta/database/resource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/flavor/datasource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/flavor/datasources_gen/flavor_data_source_gen.go create mode 100644 stackit/internal/services/sqlserverflexbeta/flavor/functions.go create mode 100644 stackit/internal/services/sqlserverflexbeta/flavor/functions_test.go create mode 100644 stackit/internal/services/sqlserverflexbeta/flavors/datasource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/instance/datasource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/instance/functions.go create mode 100644 stackit/internal/services/sqlserverflexbeta/instance/functions_test.go create mode 100644 stackit/internal/services/sqlserverflexbeta/instance/planModifiers.yaml create mode 100644 stackit/internal/services/sqlserverflexbeta/instance/resource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/sqlserverflex_acc_test.go create mode 100644 stackit/internal/services/sqlserverflexbeta/testdata/instance_template.gompl create mode 100644 stackit/internal/services/sqlserverflexbeta/user/datasource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/user/mapper.go create mode 100644 stackit/internal/services/sqlserverflexbeta/user/mapper_test.go create mode 100644 stackit/internal/services/sqlserverflexbeta/user/planModifiers.yaml create mode 100644 stackit/internal/services/sqlserverflexbeta/user/resource.go create mode 100644 stackit/internal/services/sqlserverflexbeta/utils/util.go create mode 100644 stackit/internal/services/sqlserverflexbeta/utils/util_test.go diff --git a/go.mod b/go.mod index 292cf33e..0d5e6de4 100644 --- a/go.mod +++ b/go.mod @@ -65,6 +65,7 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.2.0 // indirect github.com/spf13/pflag v1.0.10 // indirect + github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.5.0 // indirect github.com/stretchr/testify v1.11.1 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect diff --git a/go.sum b/go.sum index fc0e4252..96faf20d 100644 --- a/go.sum +++ b/go.sum @@ -184,6 +184,8 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stackitcloud/stackit-sdk-go/core v0.22.0 h1:6rViz7GnNwXSh51Lur5xuDzO8EWSZfN9J0HvEkBKq6c= github.com/stackitcloud/stackit-sdk-go/core v0.22.0/go.mod h1:osMglDby4csGZ5sIfhNyYq1bS1TxIdPY88+skE/kkmI= +github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.5.0 h1:JeSnhioDCfV5K4V4mOjKtKgkgNtrkrU9bkt7JBs57lA= +github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.5.0/go.mod h1:3NQNKhHYIjIHTmf6RAcYLdnq17a8AZKkqFCu9Q/Y/3Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= diff --git a/stackit/internal/services/sqlserverflexalpha/database/datasource.go b/stackit/internal/services/sqlserverflexalpha/database/datasource.go new file mode 100644 index 00000000..5155b41c --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/database/datasource.go @@ -0,0 +1,174 @@ +package sqlserverflexalpha + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaPkg "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + sqlserverflexalphaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/database/datasources_gen" +) + +var _ datasource.DataSource = (*databaseDataSource)(nil) + +const errorPrefix = "[sqlserverflexalpha - Database]" + +func NewDatabaseDataSource() datasource.DataSource { + return &databaseDataSource{} +} + +type dataSourceModel struct { + sqlserverflexalphaGen.DatabaseModel + TerraformId types.String `tfsdk:"id"` +} + +type databaseDataSource struct { + client *sqlserverflexalphaPkg.APIClient + providerData core.ProviderData +} + +func (d *databaseDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_database" +} + +func (d *databaseDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexalphaGen.DatabaseDataSourceSchema(ctx) + resp.Schema.Attributes["id"] = schema.StringAttribute{ + Computed: true, + Description: "The terraform internal identifier.", + MarkdownDescription: "The terraform internal identifier.", + } +} + +// Configure adds the provider configured client to the data source. +func (d *databaseDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(d.providerData.RoundTripper), + utils.UserAgentConfigOption(d.providerData.Version), + } + if d.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(d.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(d.providerData.GetRegion()), + ) + } + apiClient, err := sqlserverflexalphaPkg.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + d.client = apiClient + tflog.Info(ctx, fmt.Sprintf("%s client configured", errorPrefix)) +} + +func (d *databaseDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dataSourceModel + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + // Extract identifiers from the plan + projectId := data.ProjectId.ValueString() + region := d.providerData.GetRegionWithOverride(data.Region) + instanceId := data.InstanceId.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + databaseName := data.DatabaseName.ValueString() + + databaseResp, err := d.client.GetDatabaseRequest(ctx, projectId, region, instanceId, databaseName).Execute() + if err != nil { + handleReadError(ctx, &resp.Diagnostics, err, projectId, instanceId) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + // Map response body to schema and populate Computed attribute values + err = mapFields(databaseResp, &data, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading database", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + + tflog.Info(ctx, "SQL Server Flex beta database read") +} + +// handleReadError centralizes API error handling for the Read operation. +func handleReadError(ctx context.Context, diags *diag.Diagnostics, err error, projectId, instanceId string) { + utils.LogError( + ctx, + diags, + err, + "Reading database", + fmt.Sprintf( + "Could not retrieve database for instance %q in project %q.", + instanceId, + projectId, + ), + map[int]string{ + http.StatusBadRequest: fmt.Sprintf( + "Invalid request parameters for project %q and instance %q.", + projectId, + instanceId, + ), + http.StatusNotFound: fmt.Sprintf( + "Database, instance %q, or project %q not found.", + instanceId, + projectId, + ), + http.StatusForbidden: fmt.Sprintf("Forbidden access to project %q.", projectId), + }, + ) +} diff --git a/stackit/internal/services/sqlserverflexalpha/database/mapper.go b/stackit/internal/services/sqlserverflexalpha/database/mapper.go new file mode 100644 index 00000000..55d0e5ae --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/database/mapper.go @@ -0,0 +1,104 @@ +package sqlserverflexalpha + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/types" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +// mapFields maps fields from a ListDatabase API response to a resourceModel for the data source. +func mapFields(source *sqlserverflexalpha.GetDatabaseResponse, model *dataSourceModel, region string) error { + if source == nil { + return fmt.Errorf("response is nil") + } + if source.Id == nil || *source.Id == 0 { + return fmt.Errorf("id not present") + } + if model == nil { + return fmt.Errorf("model given is nil") + } + + var databaseId int64 + if model.Id.ValueInt64() != 0 { + databaseId = model.Id.ValueInt64() + } else if source.Id != nil { + databaseId = *source.Id + } else { + return fmt.Errorf("database id not present") + } + + model.Id = types.Int64Value(databaseId) + model.DatabaseName = types.StringValue(source.GetName()) + model.Name = types.StringValue(source.GetName()) + model.Owner = types.StringValue(strings.Trim(source.GetOwner(), "\"")) + model.Region = types.StringValue(region) + model.ProjectId = types.StringValue(model.ProjectId.ValueString()) + model.InstanceId = types.StringValue(model.InstanceId.ValueString()) + model.CompatibilityLevel = types.Int64Value(source.GetCompatibilityLevel()) + model.CollationName = types.StringValue(source.GetCollationName()) + + model.TerraformId = utils.BuildInternalTerraformId( + model.ProjectId.ValueString(), + region, + model.InstanceId.ValueString(), + model.DatabaseName.ValueString(), + ) + + return nil +} + +// mapResourceFields maps fields from a ListDatabase API response to a resourceModel for the resource. +func mapResourceFields(source *sqlserverflexalpha.GetDatabaseResponse, model *resourceModel, region string) error { + if source == nil { + return fmt.Errorf("response is nil") + } + if source.Id == nil || *source.Id == 0 { + return fmt.Errorf("id not present") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var databaseId int64 + if model.Id.ValueInt64() != 0 { + databaseId = model.Id.ValueInt64() + } else if source.Id != nil { + databaseId = *source.Id + } else { + return fmt.Errorf("database id not present") + } + + model.Id = types.Int64Value(databaseId) + model.DatabaseName = types.StringValue(source.GetName()) + model.Name = types.StringValue(source.GetName()) + model.Owner = types.StringValue(strings.Trim(source.GetOwner(), "\"")) + model.Region = types.StringValue(region) + model.ProjectId = types.StringValue(model.ProjectId.ValueString()) + model.InstanceId = types.StringValue(model.InstanceId.ValueString()) + + model.Compatibility = types.Int64Value(source.GetCompatibilityLevel()) + model.CompatibilityLevel = types.Int64Value(source.GetCompatibilityLevel()) + + model.Collation = types.StringValue(source.GetCollationName()) // it does not come back from api + model.CollationName = types.StringValue(source.GetCollationName()) + + return nil +} + +// toCreatePayload converts the resource model to an API create payload. +func toCreatePayload(model *resourceModel) (*sqlserverflexalpha.CreateDatabaseRequestPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + return &sqlserverflexalpha.CreateDatabaseRequestPayload{ + Name: model.Name.ValueStringPointer(), + Owner: model.Owner.ValueStringPointer(), + Collation: model.Collation.ValueStringPointer(), + Compatibility: model.Compatibility.ValueInt64Pointer(), + }, nil +} diff --git a/stackit/internal/services/sqlserverflexalpha/database/mapper_test.go b/stackit/internal/services/sqlserverflexalpha/database/mapper_test.go new file mode 100644 index 00000000..b0daa742 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/database/mapper_test.go @@ -0,0 +1,232 @@ +package sqlserverflexalpha + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + datasource "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/database/datasources_gen" +) + +func TestMapFields(t *testing.T) { + type given struct { + source *sqlserverflexalpha.GetDatabaseResponse + model *dataSourceModel + region string + } + type expected struct { + model *dataSourceModel + err bool + } + + testcases := []struct { + name string + given given + expected expected + }{ + { + name: "should map fields correctly", + given: given{ + source: &sqlserverflexalpha.GetDatabaseResponse{ + Id: utils.Ptr(int64(1)), + Name: utils.Ptr("my-db"), + CollationName: utils.Ptr("collation"), + CompatibilityLevel: utils.Ptr(int64(150)), + Owner: utils.Ptr("my-owner"), + }, + model: &dataSourceModel{ + DatabaseModel: datasource.DatabaseModel{ + ProjectId: types.StringValue("my-project"), + InstanceId: types.StringValue("my-instance"), + }, + }, + region: "eu01", + }, + expected: expected{ + model: &dataSourceModel{ + DatabaseModel: datasource.DatabaseModel{ + Id: types.Int64Value(1), + Name: types.StringValue("my-db"), + DatabaseName: types.StringValue("my-db"), + Owner: types.StringValue("my-owner"), + Region: types.StringValue("eu01"), + InstanceId: types.StringValue("my-instance"), + ProjectId: types.StringValue("my-project"), + CompatibilityLevel: types.Int64Value(150), + CollationName: types.StringValue("collation"), + }, + TerraformId: types.StringValue("my-project,eu01,my-instance,my-db"), + }, + }, + }, + { + name: "should fail on nil source", + given: given{ + source: nil, + model: &dataSourceModel{}, + }, + expected: expected{err: true}, + }, + { + name: "should fail on nil source ID", + given: given{ + source: &sqlserverflexalpha.GetDatabaseResponse{Id: nil}, + model: &dataSourceModel{}, + }, + expected: expected{err: true}, + }, + { + name: "should fail on nil model", + given: given{ + source: &sqlserverflexalpha.GetDatabaseResponse{Id: utils.Ptr(int64(1))}, + model: nil, + }, + expected: expected{err: true}, + }, + } + + for _, tc := range testcases { + t.Run( + tc.name, func(t *testing.T) { + err := mapFields(tc.given.source, tc.given.model, tc.given.region) + if (err != nil) != tc.expected.err { + t.Fatalf("expected error: %v, got: %v", tc.expected.err, err) + } + if err == nil { + if diff := cmp.Diff(tc.expected.model, tc.given.model); diff != "" { + t.Errorf("model mismatch (-want +got):\n%s", diff) + } + } + }, + ) + } +} + +func TestMapResourceFields(t *testing.T) { + type given struct { + source *sqlserverflexalpha.GetDatabaseResponse + model *resourceModel + region string + } + type expected struct { + model *resourceModel + err bool + } + + testcases := []struct { + name string + given given + expected expected + }{ + { + name: "should map fields correctly", + given: given{ + source: &sqlserverflexalpha.GetDatabaseResponse{ + Id: utils.Ptr(int64(1)), + Name: utils.Ptr("my-db"), + Owner: utils.Ptr("my-owner"), + }, + model: &resourceModel{ + ProjectId: types.StringValue("my-project"), + InstanceId: types.StringValue("my-instance"), + }, + region: "eu01", + }, + expected: expected{ + model: &resourceModel{ + Id: types.Int64Value(1), + Name: types.StringValue("my-db"), + Compatibility: types.Int64Value(0), + CompatibilityLevel: types.Int64Value(0), + Collation: types.StringValue(""), + CollationName: types.StringValue(""), + DatabaseName: types.StringValue("my-db"), + InstanceId: types.StringValue("my-instance"), + ProjectId: types.StringValue("my-project"), + Region: types.StringValue("eu01"), + Owner: types.StringValue("my-owner"), + }, + }, + }, + { + name: "should fail on nil source", + given: given{ + source: nil, + model: &resourceModel{}, + }, + expected: expected{err: true}, + }, + } + + for _, tc := range testcases { + t.Run( + tc.name, func(t *testing.T) { + err := mapResourceFields(tc.given.source, tc.given.model, tc.given.region) + if (err != nil) != tc.expected.err { + t.Fatalf("expected error: %v, got: %v", tc.expected.err, err) + } + if err == nil { + if diff := cmp.Diff(tc.expected.model, tc.given.model); diff != "" { + t.Errorf("model mismatch (-want +got):\n%s", diff) + } + } + }, + ) + } +} + +func TestToCreatePayload(t *testing.T) { + type given struct { + model *resourceModel + } + type expected struct { + payload *sqlserverflexalpha.CreateDatabaseRequestPayload + err bool + } + + testcases := []struct { + name string + given given + expected expected + }{ + { + name: "should convert model to payload", + given: given{ + model: &resourceModel{ + Name: types.StringValue("my-db"), + Owner: types.StringValue("my-owner"), + }, + }, + expected: expected{ + payload: &sqlserverflexalpha.CreateDatabaseRequestPayload{ + Name: utils.Ptr("my-db"), + Owner: utils.Ptr("my-owner"), + }, + }, + }, + { + name: "should fail on nil model", + given: given{model: nil}, + expected: expected{err: true}, + }, + } + + for _, tc := range testcases { + t.Run( + tc.name, func(t *testing.T) { + actual, err := toCreatePayload(tc.given.model) + if (err != nil) != tc.expected.err { + t.Fatalf("expected error: %v, got: %v", tc.expected.err, err) + } + if err == nil { + if diff := cmp.Diff(tc.expected.payload, actual); diff != "" { + t.Errorf("payload mismatch (-want +got):\n%s", diff) + } + } + }, + ) + } +} diff --git a/stackit/internal/services/sqlserverflexalpha/database/planModifiers.yaml b/stackit/internal/services/sqlserverflexalpha/database/planModifiers.yaml new file mode 100644 index 00000000..1d010ed7 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/database/planModifiers.yaml @@ -0,0 +1,51 @@ +fields: + - name: 'id' + modifiers: + - 'UseStateForUnknown' + + - name: 'instance_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'RequiresReplace' + + - name: 'project_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'RequiresReplace' + + - name: 'region' + modifiers: + - 'RequiresReplace' + + - name: 'name' + modifiers: + - 'RequiresReplace' + + - name: 'collation' + modifiers: + - 'RequiresReplace' + + - name: 'owner' + modifiers: + - 'UseStateForUnknown' + + - name: 'database_name' + modifiers: + - 'UseStateForUnknown' + + - name: 'collation_name' + modifiers: + - 'UseStateForUnknown' + + - name: 'compatibility' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'compatibility_level' + modifiers: + - 'UseStateForUnknown' diff --git a/stackit/internal/services/sqlserverflexalpha/database/resource.go b/stackit/internal/services/sqlserverflexalpha/database/resource.go new file mode 100644 index 00000000..5b46c52c --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/database/resource.go @@ -0,0 +1,539 @@ +package sqlserverflexalpha + +import ( + "context" + _ "embed" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + wait "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/sqlserverflexalpha" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/database/resources_gen" +) + +var ( + _ resource.Resource = &databaseResource{} + _ resource.ResourceWithConfigure = &databaseResource{} + _ resource.ResourceWithImportState = &databaseResource{} + _ resource.ResourceWithModifyPlan = &databaseResource{} + _ resource.ResourceWithIdentity = &databaseResource{} + + // Define errors + errDatabaseNotFound = errors.New("database not found") +) + +func NewDatabaseResource() resource.Resource { + return &databaseResource{} +} + +// resourceModel describes the resource data model. +type resourceModel = sqlserverflexalphaResGen.DatabaseModel + +type databaseResource struct { + client *sqlserverflexalpha.APIClient + providerData core.ProviderData +} + +type DatabaseResourceIdentityModel struct { + ProjectID types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + InstanceID types.String `tfsdk:"instance_id"` + DatabaseName types.String `tfsdk:"database_name"` +} + +func (r *databaseResource) Metadata( + _ context.Context, + req resource.MetadataRequest, + resp *resource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_database" +} + +//go:embed planModifiers.yaml +var modifiersFileByte []byte + +func (r *databaseResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + s := sqlserverflexalphaResGen.DatabaseResourceSchema(ctx) + + fields, err := utils.ReadModifiersConfig(modifiersFileByte) + if err != nil { + resp.Diagnostics.AddError("error during read modifiers config file", err.Error()) + return + } + + err = utils.AddPlanModifiersToResourceSchema(fields, &s) + if err != nil { + resp.Diagnostics.AddError("error adding plan modifiers", err.Error()) + return + } + resp.Schema = s +} + +func (r *databaseResource) IdentitySchema( + _ context.Context, + _ resource.IdentitySchemaRequest, + resp *resource.IdentitySchemaResponse, +) { + resp.IdentitySchema = identityschema.Schema{ + Attributes: map[string]identityschema.Attribute{ + "project_id": identityschema.StringAttribute{ + RequiredForImport: true, // must be set during import by the practitioner + }, + "region": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "instance_id": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "database_name": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + }, + } +} + +// Configure adds the provider configured client to the resource. +func (r *databaseResource) Configure( + ctx context.Context, + req resource.ConfigureRequest, + resp *resource.ConfigureResponse, +) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(r.providerData.RoundTripper), + utils.UserAgentConfigOption(r.providerData.Version), + } + if r.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(r.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append(apiClientConfigOptions, config.WithRegion(r.providerData.GetRegion())) + } + apiClient, err := sqlserverflexalpha.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + r.client = apiClient + tflog.Info(ctx, "sqlserverflexalpha.Database client configured") +} + +func (r *databaseResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data resourceModel + createErr := "DB create error" + + // Read Terraform plan data into the model + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + instanceId := data.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + databaseName := data.Name.ValueString() + ctx = tflog.SetField(ctx, "database_name", databaseName) + + payLoad := sqlserverflexalpha.CreateDatabaseRequestPayload{} + if !data.Collation.IsNull() && !data.Collation.IsUnknown() { + payLoad.Collation = data.Collation.ValueStringPointer() + } + + if !data.Compatibility.IsNull() && !data.Compatibility.IsUnknown() { + payLoad.Compatibility = data.Compatibility.ValueInt64Pointer() + } + + payLoad.Name = data.Name.ValueStringPointer() + payLoad.Owner = data.Owner.ValueStringPointer() + + createResp, err := r.client.CreateDatabaseRequest(ctx, projectId, region, instanceId). + CreateDatabaseRequestPayload(payLoad). + Execute() + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + fmt.Sprintf("Calling API: %v", err), + ) + return + } + + if createResp == nil || createResp.Id == nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating database", + "API didn't return database Id. A database might have been created", + ) + return + } + + databaseId := *createResp.Id + + ctx = tflog.SetField(ctx, "database_id", databaseId) + + ctx = core.LogResponse(ctx) + + // Set data returned by API in identity + identity := DatabaseResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + DatabaseName: types.StringValue(databaseName), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // TODO: is this necessary to wait for the database-> API say 200 ? + waitResp, err := wait.CreateDatabaseWaitHandler( + ctx, + r.client, + projectId, + instanceId, + region, + databaseName, + ).SetSleepBeforeWait( + 30 * time.Second, + ).SetTimeout( + 15 * time.Minute, + ).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + fmt.Sprintf("Database creation waiting: %v", err), + ) + return + } + + if waitResp.Id == nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned id is nil", + ) + return + } + + if *waitResp.Id != databaseId { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned id is different", + ) + return + } + + if *waitResp.Owner != data.Owner.ValueString() { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned owner is different", + ) + return + } + + if *waitResp.Name != data.Name.ValueString() { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned name is different", + ) + return + } + + database, err := r.client.GetDatabaseRequest(ctx, projectId, region, instanceId, databaseName).Execute() + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating database", + fmt.Sprintf("Getting database details after creation: %v", err), + ) + return + } + + // Map response body to schema + err = mapResourceFields(database, &data, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating database", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) + if resp.Diagnostics.HasError() { + return + } + + // Save data into Terraform state + + tflog.Info(ctx, "sqlserverflexalpha.Database created") +} + +func (r *databaseResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + region := model.Region.ValueString() + instanceId := model.InstanceId.ValueString() + databaseName := model.DatabaseName.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "database_name", databaseName) + + databaseResp, err := r.client.GetDatabaseRequest(ctx, projectId, region, instanceId, databaseName).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if (ok && oapiErr.StatusCode == http.StatusNotFound) || errors.Is(err, errDatabaseNotFound) { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading database", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema + err = mapResourceFields(databaseResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading database", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save identity into Terraform state + identity := DatabaseResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + DatabaseName: types.StringValue(databaseName), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &model)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "sqlserverflexalpha.Database read") +} + +func (r *databaseResource) Update(ctx context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { + // TODO: Check update api endpoint - not available at the moment, so return an error for now + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating database", "Database can't be updated") +} + +func (r *databaseResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // nolint:gocritic // function signature required by Terraform + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Read identity data + var identityData DatabaseResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + region := model.Region.ValueString() + instanceId := model.InstanceId.ValueString() + databaseName := model.DatabaseName.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "database_name", databaseName) + + // Delete existing record set + err := r.client.DeleteDatabaseRequestExecute(ctx, projectId, region, instanceId, databaseName) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error deleting database", + fmt.Sprintf( + "Calling API: %v\nname: %s, region: %s, instanceId: %s", err, databaseName, region, instanceId, + ), + ) + return + } + + ctx = core.LogResponse(ctx) + resp.State.RemoveResource(ctx) + + tflog.Info(ctx, "sqlserverflexalpha.Database deleted") +} + +// ModifyPlan implements resource.ResourceWithModifyPlan. +// Use the modifier to set the effective region in the current plan. +func (r *databaseResource) ModifyPlan( + ctx context.Context, + req resource.ModifyPlanRequest, + resp *resource.ModifyPlanResponse, +) { // nolint:gocritic // function signature required by Terraform + // skip initial empty configuration to avoid follow-up errors + if req.Config.Raw.IsNull() { + return + } + + var configModel resourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...) + if resp.Diagnostics.HasError() { + return + } + + var planModel resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) + if resp.Diagnostics.HasError() { + return + } + + utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...) + if resp.Diagnostics.HasError() { + return + } +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *databaseResource) ImportState( + ctx context.Context, + req resource.ImportStateRequest, + resp *resource.ImportStateResponse, +) { + ctx = core.InitProviderContext(ctx) + + if req.ID != "" { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" { + core.LogAndAddError( + ctx, &resp.Diagnostics, + "Error importing database", + fmt.Sprintf( + "Expected import identifier with format [project_id],[region],[instance_id],[database_name] Got: %q", + req.ID, + ), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[2])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database_name"), idParts[3])...) + + var identityData DatabaseResourceIdentityModel + identityData.ProjectID = types.StringValue(idParts[0]) + identityData.Region = types.StringValue(idParts[1]) + identityData.InstanceID = types.StringValue(idParts[2]) + identityData.DatabaseName = types.StringValue(idParts[3]) + + resp.Diagnostics.Append(resp.Identity.Set(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "sqlserverflexalpha database state imported") + return + } + + // If no ID is provided, attempt to read identity attributes from the import configuration + var identityData DatabaseResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + instanceId := identityData.InstanceID.ValueString() + databaseName := identityData.DatabaseName.ValueString() + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), region)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), instanceId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database_name"), databaseName)...) + + tflog.Info(ctx, "sqlserverflexalpha database state imported") +} diff --git a/stackit/internal/services/sqlserverflexalpha/flavor/datasource.go b/stackit/internal/services/sqlserverflexalpha/flavor/datasource.go new file mode 100644 index 00000000..d56aafa5 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/flavor/datasource.go @@ -0,0 +1,355 @@ +package sqlserverflexalphaFlavor + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaPkg "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + sqlserverflexalphaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/flavor/datasources_gen" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &flavorDataSource{} + _ datasource.DataSourceWithConfigure = &flavorDataSource{} +) + +type FlavorModel struct { + ProjectId types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + StorageClass types.String `tfsdk:"storage_class"` + Cpu types.Int64 `tfsdk:"cpu"` + Description types.String `tfsdk:"description"` + Id types.String `tfsdk:"id"` + FlavorId types.String `tfsdk:"flavor_id"` + MaxGb types.Int64 `tfsdk:"max_gb"` + Memory types.Int64 `tfsdk:"ram"` + MinGb types.Int64 `tfsdk:"min_gb"` + NodeType types.String `tfsdk:"node_type"` + StorageClasses types.List `tfsdk:"storage_classes"` +} + +// NewFlavorDataSource is a helper function to simplify the provider implementation. +func NewFlavorDataSource() datasource.DataSource { + return &flavorDataSource{} +} + +// flavorDataSource is the data source implementation. +type flavorDataSource struct { + client *sqlserverflexalphaPkg.APIClient + providerData core.ProviderData +} + +// Metadata returns the data source type name. +func (r *flavorDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_flavor" +} + +// Configure adds the provider configured client to the data source. +func (r *flavorDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(r.providerData.RoundTripper), + utils.UserAgentConfigOption(r.providerData.Version), + } + if r.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(r.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(r.providerData.GetRegion()), + ) + } + apiClient, err := sqlserverflexalphaPkg.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + r.client = apiClient + tflog.Info(ctx, "SQL Server Flex instance client configured") +} + +func (r *flavorDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "project_id": schema.StringAttribute{ + Required: true, + Description: "The project ID of the flavor.", + MarkdownDescription: "The project ID of the flavor.", + }, + "region": schema.StringAttribute{ + Required: true, + Description: "The region of the flavor.", + MarkdownDescription: "The region of the flavor.", + }, + "cpu": schema.Int64Attribute{ + Required: true, + Description: "The cpu count of the instance.", + MarkdownDescription: "The cpu count of the instance.", + }, + "ram": schema.Int64Attribute{ + Required: true, + Description: "The memory of the instance in Gibibyte.", + MarkdownDescription: "The memory of the instance in Gibibyte.", + }, + "storage_class": schema.StringAttribute{ + Required: true, + Description: "The memory of the instance in Gibibyte.", + MarkdownDescription: "The memory of the instance in Gibibyte.", + }, + "node_type": schema.StringAttribute{ + Required: true, + Description: "defines the nodeType it can be either single or HA", + MarkdownDescription: "defines the nodeType it can be either single or HA", + }, + "flavor_id": schema.StringAttribute{ + Computed: true, + Description: "The id of the instance flavor.", + MarkdownDescription: "The id of the instance flavor.", + }, + "description": schema.StringAttribute{ + Computed: true, + Description: "The flavor description.", + MarkdownDescription: "The flavor description.", + }, + "id": schema.StringAttribute{ + Computed: true, + Description: "The id of the instance flavor.", + MarkdownDescription: "The id of the instance flavor.", + }, + "max_gb": schema.Int64Attribute{ + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + "min_gb": schema.Int64Attribute{ + Computed: true, + Description: "minimum storage which is required to order in Gigabyte.", + MarkdownDescription: "minimum storage which is required to order in Gigabyte.", + }, + "storage_classes": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Computed: true, + }, + "max_io_per_sec": schema.Int64Attribute{ + Computed: true, + }, + "max_through_in_mb": schema.Int64Attribute{ + Computed: true, + }, + }, + CustomType: sqlserverflexalphaGen.StorageClassesType{ + ObjectType: types.ObjectType{ + AttrTypes: sqlserverflexalphaGen.StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + }, + //Attributes: map[string]schema.Attribute{ + // "project_id": schema.StringAttribute{ + // Required: true, + // Description: "The cpu count of the instance.", + // MarkdownDescription: "The cpu count of the instance.", + // }, + // "region": schema.StringAttribute{ + // Required: true, + // Description: "The flavor description.", + // MarkdownDescription: "The flavor description.", + // }, + // "cpu": schema.Int64Attribute{ + // Required: true, + // Description: "The cpu count of the instance.", + // MarkdownDescription: "The cpu count of the instance.", + // }, + // "ram": schema.Int64Attribute{ + // Required: true, + // Description: "The memory of the instance in Gibibyte.", + // MarkdownDescription: "The memory of the instance in Gibibyte.", + // }, + // "storage_class": schema.StringAttribute{ + // Required: true, + // Description: "The memory of the instance in Gibibyte.", + // MarkdownDescription: "The memory of the instance in Gibibyte.", + // }, + // "description": schema.StringAttribute{ + // Computed: true, + // Description: "The flavor description.", + // MarkdownDescription: "The flavor description.", + // }, + // "id": schema.StringAttribute{ + // Computed: true, + // Description: "The terraform id of the instance flavor.", + // MarkdownDescription: "The terraform id of the instance flavor.", + // }, + // "flavor_id": schema.StringAttribute{ + // Computed: true, + // Description: "The flavor id of the instance flavor.", + // MarkdownDescription: "The flavor id of the instance flavor.", + // }, + // "max_gb": schema.Int64Attribute{ + // Computed: true, + // Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + // MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + // }, + // "min_gb": schema.Int64Attribute{ + // Computed: true, + // Description: "minimum storage which is required to order in Gigabyte.", + // MarkdownDescription: "minimum storage which is required to order in Gigabyte.", + // }, + // "node_type": schema.StringAttribute{ + // Required: true, + // Description: "defines the nodeType it can be either single or replica", + // MarkdownDescription: "defines the nodeType it can be either single or replica", + // }, + // "storage_classes": schema.ListNestedAttribute{ + // Computed: true, + // NestedObject: schema.NestedAttributeObject{ + // Attributes: map[string]schema.Attribute{ + // "class": schema.StringAttribute{ + // Computed: true, + // }, + // "max_io_per_sec": schema.Int64Attribute{ + // Computed: true, + // }, + // "max_through_in_mb": schema.Int64Attribute{ + // Computed: true, + // }, + // }, + // CustomType: sqlserverflexalphaGen.StorageClassesType{ + // ObjectType: types.ObjectType{ + // AttrTypes: sqlserverflexalphaGen.StorageClassesValue{}.AttributeTypes(ctx), + // }, + // }, + // }, + // }, + // }, + } +} + +func (r *flavorDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var model FlavorModel + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + region := r.providerData.GetRegionWithOverride(model.Region) + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + flavors, err := getAllFlavors(ctx, r.client, projectId, region) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading flavors", fmt.Sprintf("getAllFlavors: %v", err)) + return + } + + var foundFlavors []sqlserverflexalphaPkg.ListFlavors + for _, flavor := range flavors { + if model.Cpu.ValueInt64() != *flavor.Cpu { + continue + } + if model.Memory.ValueInt64() != *flavor.Memory { + continue + } + if model.NodeType.ValueString() != *flavor.NodeType { + continue + } + for _, sc := range *flavor.StorageClasses { + if model.StorageClass.ValueString() != *sc.Class { + continue + } + foundFlavors = append(foundFlavors, flavor) + } + } + if len(foundFlavors) == 0 { + resp.Diagnostics.AddError("get flavor", "could not find requested flavor") + return + } + if len(foundFlavors) > 1 { + resp.Diagnostics.AddError("get flavor", "found too many matching flavors") + return + } + + f := foundFlavors[0] + model.Description = types.StringValue(*f.Description) + model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, *f.Id) + model.FlavorId = types.StringValue(*f.Id) + model.MaxGb = types.Int64Value(*f.MaxGB) + model.MinGb = types.Int64Value(*f.MinGB) + + if f.StorageClasses == nil { + model.StorageClasses = types.ListNull(sqlserverflexalphaGen.StorageClassesType{ + ObjectType: basetypes.ObjectType{ + AttrTypes: sqlserverflexalphaGen.StorageClassesValue{}.AttributeTypes(ctx), + }, + }) + } else { + var scList []attr.Value + for _, sc := range *f.StorageClasses { + scList = append( + scList, + sqlserverflexalphaGen.NewStorageClassesValueMust( + sqlserverflexalphaGen.StorageClassesValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "class": types.StringValue(*sc.Class), + "max_io_per_sec": types.Int64Value(*sc.MaxIoPerSec), + "max_through_in_mb": types.Int64Value(*sc.MaxThroughInMb), + }, + ), + ) + } + storageClassesList := types.ListValueMust( + sqlserverflexalphaGen.StorageClassesType{ + ObjectType: basetypes.ObjectType{ + AttrTypes: sqlserverflexalphaGen.StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + scList, + ) + model.StorageClasses = storageClassesList + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQL Server Flex flavors read") +} diff --git a/stackit/internal/services/sqlserverflexalpha/flavor/datasources_gen/flavor_data_source_gen.go b/stackit/internal/services/sqlserverflexalpha/flavor/datasources_gen/flavor_data_source_gen.go new file mode 100644 index 00000000..d8654cf4 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/flavor/datasources_gen/flavor_data_source_gen.go @@ -0,0 +1,1909 @@ +// Code generated by terraform-plugin-framework-generator DO NOT EDIT. + +package sqlserverflexalpha + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func FlavorDataSourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "flavors": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + Description: "The cpu count of the instance.", + MarkdownDescription: "The cpu count of the instance.", + }, + "description": schema.StringAttribute{ + Computed: true, + Description: "The flavor description.", + MarkdownDescription: "The flavor description.", + }, + "id": schema.StringAttribute{ + Computed: true, + Description: "The id of the instance flavor.", + MarkdownDescription: "The id of the instance flavor.", + }, + "max_gb": schema.Int64Attribute{ + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + "memory": schema.Int64Attribute{ + Computed: true, + Description: "The memory of the instance in Gibibyte.", + MarkdownDescription: "The memory of the instance in Gibibyte.", + }, + "min_gb": schema.Int64Attribute{ + Computed: true, + Description: "minimum storage which is required to order in Gigabyte.", + MarkdownDescription: "minimum storage which is required to order in Gigabyte.", + }, + "node_type": schema.StringAttribute{ + Computed: true, + Description: "defines the nodeType it can be either single or HA", + MarkdownDescription: "defines the nodeType it can be either single or HA", + }, + "storage_classes": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Computed: true, + }, + "max_io_per_sec": schema.Int64Attribute{ + Computed: true, + }, + "max_through_in_mb": schema.Int64Attribute{ + Computed: true, + }, + }, + CustomType: StorageClassesType{ + ObjectType: types.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + }, + CustomType: FlavorsType{ + ObjectType: types.ObjectType{ + AttrTypes: FlavorsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of flavors available for the project.", + MarkdownDescription: "List of flavors available for the project.", + }, + "page": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of the page of items list to be returned.", + MarkdownDescription: "Number of the page of items list to be returned.", + }, + "pagination": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "page": schema.Int64Attribute{ + Computed: true, + }, + "size": schema.Int64Attribute{ + Computed: true, + }, + "sort": schema.StringAttribute{ + Computed: true, + }, + "total_pages": schema.Int64Attribute{ + Computed: true, + }, + "total_rows": schema.Int64Attribute{ + Computed: true, + }, + }, + CustomType: PaginationType{ + ObjectType: types.ObjectType{ + AttrTypes: PaginationValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "project_id": schema.StringAttribute{ + Required: true, + Description: "The STACKIT project ID.", + MarkdownDescription: "The STACKIT project ID.", + }, + "region": schema.StringAttribute{ + Required: true, + Description: "The region which should be addressed", + MarkdownDescription: "The region which should be addressed", + Validators: []validator.String{ + stringvalidator.OneOf( + "eu01", + ), + }, + }, + "size": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of items to be returned on each page.", + MarkdownDescription: "Number of items to be returned on each page.", + }, + "sort": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Sorting of the flavors to be returned on each page.", + MarkdownDescription: "Sorting of the flavors to be returned on each page.", + Validators: []validator.String{ + stringvalidator.OneOf( + "index.desc", + "index.asc", + "cpu.desc", + "cpu.asc", + "flavor_description.asc", + "flavor_description.desc", + "id.desc", + "id.asc", + "size_max.desc", + "size_max.asc", + "ram.desc", + "ram.asc", + "size_min.desc", + "size_min.asc", + "storage_class.asc", + "storage_class.desc", + "node_type.asc", + "node_type.desc", + ), + }, + }, + }, + } +} + +type FlavorModel struct { + Flavors types.List `tfsdk:"flavors"` + Page types.Int64 `tfsdk:"page"` + Pagination PaginationValue `tfsdk:"pagination"` + ProjectId types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + Size types.Int64 `tfsdk:"size"` + Sort types.String `tfsdk:"sort"` +} + +var _ basetypes.ObjectTypable = FlavorsType{} + +type FlavorsType struct { + basetypes.ObjectType +} + +func (t FlavorsType) Equal(o attr.Type) bool { + other, ok := o.(FlavorsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t FlavorsType) String() string { + return "FlavorsType" +} + +func (t FlavorsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + cpuAttribute, ok := attributes["cpu"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cpu is missing from object`) + + return nil, diags + } + + cpuVal, ok := cpuAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute)) + } + + descriptionAttribute, ok := attributes["description"] + + if !ok { + diags.AddError( + "Attribute Missing", + `description is missing from object`) + + return nil, diags + } + + descriptionVal, ok := descriptionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute)) + } + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return nil, diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + maxGbAttribute, ok := attributes["max_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_gb is missing from object`) + + return nil, diags + } + + maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute)) + } + + memoryAttribute, ok := attributes["memory"] + + if !ok { + diags.AddError( + "Attribute Missing", + `memory is missing from object`) + + return nil, diags + } + + memoryVal, ok := memoryAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute)) + } + + minGbAttribute, ok := attributes["min_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `min_gb is missing from object`) + + return nil, diags + } + + minGbVal, ok := minGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute)) + } + + nodeTypeAttribute, ok := attributes["node_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_type is missing from object`) + + return nil, diags + } + + nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute)) + } + + storageClassesAttribute, ok := attributes["storage_classes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `storage_classes is missing from object`) + + return nil, diags + } + + storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return FlavorsValue{ + Cpu: cpuVal, + Description: descriptionVal, + Id: idVal, + MaxGb: maxGbVal, + Memory: memoryVal, + MinGb: minGbVal, + NodeType: nodeTypeVal, + StorageClasses: storageClassesVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewFlavorsValueNull() FlavorsValue { + return FlavorsValue{ + state: attr.ValueStateNull, + } +} + +func NewFlavorsValueUnknown() FlavorsValue { + return FlavorsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewFlavorsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (FlavorsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing FlavorsValue Attribute Value", + "While creating a FlavorsValue value, a missing attribute value was detected. "+ + "A FlavorsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid FlavorsValue Attribute Type", + "While creating a FlavorsValue value, an invalid attribute value was detected. "+ + "A FlavorsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("FlavorsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra FlavorsValue Attribute Value", + "While creating a FlavorsValue value, an extra attribute value was detected. "+ + "A FlavorsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra FlavorsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewFlavorsValueUnknown(), diags + } + + cpuAttribute, ok := attributes["cpu"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cpu is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + cpuVal, ok := cpuAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute)) + } + + descriptionAttribute, ok := attributes["description"] + + if !ok { + diags.AddError( + "Attribute Missing", + `description is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + descriptionVal, ok := descriptionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute)) + } + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + maxGbAttribute, ok := attributes["max_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_gb is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute)) + } + + memoryAttribute, ok := attributes["memory"] + + if !ok { + diags.AddError( + "Attribute Missing", + `memory is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + memoryVal, ok := memoryAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute)) + } + + minGbAttribute, ok := attributes["min_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `min_gb is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + minGbVal, ok := minGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute)) + } + + nodeTypeAttribute, ok := attributes["node_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_type is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute)) + } + + storageClassesAttribute, ok := attributes["storage_classes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `storage_classes is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute)) + } + + if diags.HasError() { + return NewFlavorsValueUnknown(), diags + } + + return FlavorsValue{ + Cpu: cpuVal, + Description: descriptionVal, + Id: idVal, + MaxGb: maxGbVal, + Memory: memoryVal, + MinGb: minGbVal, + NodeType: nodeTypeVal, + StorageClasses: storageClassesVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewFlavorsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) FlavorsValue { + object, diags := NewFlavorsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewFlavorsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t FlavorsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewFlavorsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewFlavorsValueUnknown(), nil + } + + if in.IsNull() { + return NewFlavorsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewFlavorsValueMust(FlavorsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t FlavorsType) ValueType(ctx context.Context) attr.Value { + return FlavorsValue{} +} + +var _ basetypes.ObjectValuable = FlavorsValue{} + +type FlavorsValue struct { + Cpu basetypes.Int64Value `tfsdk:"cpu"` + Description basetypes.StringValue `tfsdk:"description"` + Id basetypes.StringValue `tfsdk:"id"` + MaxGb basetypes.Int64Value `tfsdk:"max_gb"` + Memory basetypes.Int64Value `tfsdk:"memory"` + MinGb basetypes.Int64Value `tfsdk:"min_gb"` + NodeType basetypes.StringValue `tfsdk:"node_type"` + StorageClasses basetypes.ListValue `tfsdk:"storage_classes"` + state attr.ValueState +} + +func (v FlavorsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 8) + + var val tftypes.Value + var err error + + attrTypes["cpu"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["description"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["max_gb"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["memory"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["min_gb"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["node_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["storage_classes"] = basetypes.ListType{ + ElemType: StorageClassesValue{}.Type(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 8) + + val, err = v.Cpu.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["cpu"] = val + + val, err = v.Description.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["description"] = val + + val, err = v.Id.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["id"] = val + + val, err = v.MaxGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["max_gb"] = val + + val, err = v.Memory.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["memory"] = val + + val, err = v.MinGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["min_gb"] = val + + val, err = v.NodeType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["node_type"] = val + + val, err = v.StorageClasses.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["storage_classes"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v FlavorsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v FlavorsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v FlavorsValue) String() string { + return "FlavorsValue" +} + +func (v FlavorsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + storageClasses := types.ListValueMust( + StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + v.StorageClasses.Elements(), + ) + + if v.StorageClasses.IsNull() { + storageClasses = types.ListNull( + StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.StorageClasses.IsUnknown() { + storageClasses = types.ListUnknown( + StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "cpu": basetypes.Int64Type{}, + "description": basetypes.StringType{}, + "id": basetypes.StringType{}, + "max_gb": basetypes.Int64Type{}, + "memory": basetypes.Int64Type{}, + "min_gb": basetypes.Int64Type{}, + "node_type": basetypes.StringType{}, + "storage_classes": basetypes.ListType{ + ElemType: StorageClassesValue{}.Type(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "cpu": v.Cpu, + "description": v.Description, + "id": v.Id, + "max_gb": v.MaxGb, + "memory": v.Memory, + "min_gb": v.MinGb, + "node_type": v.NodeType, + "storage_classes": storageClasses, + }) + + return objVal, diags +} + +func (v FlavorsValue) Equal(o attr.Value) bool { + other, ok := o.(FlavorsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Cpu.Equal(other.Cpu) { + return false + } + + if !v.Description.Equal(other.Description) { + return false + } + + if !v.Id.Equal(other.Id) { + return false + } + + if !v.MaxGb.Equal(other.MaxGb) { + return false + } + + if !v.Memory.Equal(other.Memory) { + return false + } + + if !v.MinGb.Equal(other.MinGb) { + return false + } + + if !v.NodeType.Equal(other.NodeType) { + return false + } + + if !v.StorageClasses.Equal(other.StorageClasses) { + return false + } + + return true +} + +func (v FlavorsValue) Type(ctx context.Context) attr.Type { + return FlavorsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v FlavorsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "cpu": basetypes.Int64Type{}, + "description": basetypes.StringType{}, + "id": basetypes.StringType{}, + "max_gb": basetypes.Int64Type{}, + "memory": basetypes.Int64Type{}, + "min_gb": basetypes.Int64Type{}, + "node_type": basetypes.StringType{}, + "storage_classes": basetypes.ListType{ + ElemType: StorageClassesValue{}.Type(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = StorageClassesType{} + +type StorageClassesType struct { + basetypes.ObjectType +} + +func (t StorageClassesType) Equal(o attr.Type) bool { + other, ok := o.(StorageClassesType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t StorageClassesType) String() string { + return "StorageClassesType" +} + +func (t StorageClassesType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + classAttribute, ok := attributes["class"] + + if !ok { + diags.AddError( + "Attribute Missing", + `class is missing from object`) + + return nil, diags + } + + classVal, ok := classAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute)) + } + + maxIoPerSecAttribute, ok := attributes["max_io_per_sec"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_io_per_sec is missing from object`) + + return nil, diags + } + + maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute)) + } + + maxThroughInMbAttribute, ok := attributes["max_through_in_mb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_through_in_mb is missing from object`) + + return nil, diags + } + + maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return StorageClassesValue{ + Class: classVal, + MaxIoPerSec: maxIoPerSecVal, + MaxThroughInMb: maxThroughInMbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewStorageClassesValueNull() StorageClassesValue { + return StorageClassesValue{ + state: attr.ValueStateNull, + } +} + +func NewStorageClassesValueUnknown() StorageClassesValue { + return StorageClassesValue{ + state: attr.ValueStateUnknown, + } +} + +func NewStorageClassesValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (StorageClassesValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing StorageClassesValue Attribute Value", + "While creating a StorageClassesValue value, a missing attribute value was detected. "+ + "A StorageClassesValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid StorageClassesValue Attribute Type", + "While creating a StorageClassesValue value, an invalid attribute value was detected. "+ + "A StorageClassesValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("StorageClassesValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra StorageClassesValue Attribute Value", + "While creating a StorageClassesValue value, an extra attribute value was detected. "+ + "A StorageClassesValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra StorageClassesValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewStorageClassesValueUnknown(), diags + } + + classAttribute, ok := attributes["class"] + + if !ok { + diags.AddError( + "Attribute Missing", + `class is missing from object`) + + return NewStorageClassesValueUnknown(), diags + } + + classVal, ok := classAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute)) + } + + maxIoPerSecAttribute, ok := attributes["max_io_per_sec"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_io_per_sec is missing from object`) + + return NewStorageClassesValueUnknown(), diags + } + + maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute)) + } + + maxThroughInMbAttribute, ok := attributes["max_through_in_mb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_through_in_mb is missing from object`) + + return NewStorageClassesValueUnknown(), diags + } + + maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute)) + } + + if diags.HasError() { + return NewStorageClassesValueUnknown(), diags + } + + return StorageClassesValue{ + Class: classVal, + MaxIoPerSec: maxIoPerSecVal, + MaxThroughInMb: maxThroughInMbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewStorageClassesValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) StorageClassesValue { + object, diags := NewStorageClassesValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewStorageClassesValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t StorageClassesType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewStorageClassesValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewStorageClassesValueUnknown(), nil + } + + if in.IsNull() { + return NewStorageClassesValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewStorageClassesValueMust(StorageClassesValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t StorageClassesType) ValueType(ctx context.Context) attr.Value { + return StorageClassesValue{} +} + +var _ basetypes.ObjectValuable = StorageClassesValue{} + +type StorageClassesValue struct { + Class basetypes.StringValue `tfsdk:"class"` + MaxIoPerSec basetypes.Int64Value `tfsdk:"max_io_per_sec"` + MaxThroughInMb basetypes.Int64Value `tfsdk:"max_through_in_mb"` + state attr.ValueState +} + +func (v StorageClassesValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 3) + + var val tftypes.Value + var err error + + attrTypes["class"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["max_io_per_sec"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["max_through_in_mb"] = basetypes.Int64Type{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 3) + + val, err = v.Class.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["class"] = val + + val, err = v.MaxIoPerSec.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["max_io_per_sec"] = val + + val, err = v.MaxThroughInMb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["max_through_in_mb"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v StorageClassesValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v StorageClassesValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v StorageClassesValue) String() string { + return "StorageClassesValue" +} + +func (v StorageClassesValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "class": basetypes.StringType{}, + "max_io_per_sec": basetypes.Int64Type{}, + "max_through_in_mb": basetypes.Int64Type{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "class": v.Class, + "max_io_per_sec": v.MaxIoPerSec, + "max_through_in_mb": v.MaxThroughInMb, + }) + + return objVal, diags +} + +func (v StorageClassesValue) Equal(o attr.Value) bool { + other, ok := o.(StorageClassesValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Class.Equal(other.Class) { + return false + } + + if !v.MaxIoPerSec.Equal(other.MaxIoPerSec) { + return false + } + + if !v.MaxThroughInMb.Equal(other.MaxThroughInMb) { + return false + } + + return true +} + +func (v StorageClassesValue) Type(ctx context.Context) attr.Type { + return StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v StorageClassesValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "class": basetypes.StringType{}, + "max_io_per_sec": basetypes.Int64Type{}, + "max_through_in_mb": basetypes.Int64Type{}, + } +} + +var _ basetypes.ObjectTypable = PaginationType{} + +type PaginationType struct { + basetypes.ObjectType +} + +func (t PaginationType) Equal(o attr.Type) bool { + other, ok := o.(PaginationType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t PaginationType) String() string { + return "PaginationType" +} + +func (t PaginationType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + pageAttribute, ok := attributes["page"] + + if !ok { + diags.AddError( + "Attribute Missing", + `page is missing from object`) + + return nil, diags + } + + pageVal, ok := pageAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute)) + } + + sizeAttribute, ok := attributes["size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `size is missing from object`) + + return nil, diags + } + + sizeVal, ok := sizeAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute)) + } + + sortAttribute, ok := attributes["sort"] + + if !ok { + diags.AddError( + "Attribute Missing", + `sort is missing from object`) + + return nil, diags + } + + sortVal, ok := sortAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute)) + } + + totalPagesAttribute, ok := attributes["total_pages"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_pages is missing from object`) + + return nil, diags + } + + totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute)) + } + + totalRowsAttribute, ok := attributes["total_rows"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_rows is missing from object`) + + return nil, diags + } + + totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return PaginationValue{ + Page: pageVal, + Size: sizeVal, + Sort: sortVal, + TotalPages: totalPagesVal, + TotalRows: totalRowsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewPaginationValueNull() PaginationValue { + return PaginationValue{ + state: attr.ValueStateNull, + } +} + +func NewPaginationValueUnknown() PaginationValue { + return PaginationValue{ + state: attr.ValueStateUnknown, + } +} + +func NewPaginationValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (PaginationValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing PaginationValue Attribute Value", + "While creating a PaginationValue value, a missing attribute value was detected. "+ + "A PaginationValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid PaginationValue Attribute Type", + "While creating a PaginationValue value, an invalid attribute value was detected. "+ + "A PaginationValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("PaginationValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra PaginationValue Attribute Value", + "While creating a PaginationValue value, an extra attribute value was detected. "+ + "A PaginationValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra PaginationValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewPaginationValueUnknown(), diags + } + + pageAttribute, ok := attributes["page"] + + if !ok { + diags.AddError( + "Attribute Missing", + `page is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + pageVal, ok := pageAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute)) + } + + sizeAttribute, ok := attributes["size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `size is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + sizeVal, ok := sizeAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute)) + } + + sortAttribute, ok := attributes["sort"] + + if !ok { + diags.AddError( + "Attribute Missing", + `sort is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + sortVal, ok := sortAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute)) + } + + totalPagesAttribute, ok := attributes["total_pages"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_pages is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute)) + } + + totalRowsAttribute, ok := attributes["total_rows"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_rows is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute)) + } + + if diags.HasError() { + return NewPaginationValueUnknown(), diags + } + + return PaginationValue{ + Page: pageVal, + Size: sizeVal, + Sort: sortVal, + TotalPages: totalPagesVal, + TotalRows: totalRowsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewPaginationValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) PaginationValue { + object, diags := NewPaginationValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewPaginationValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t PaginationType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewPaginationValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewPaginationValueUnknown(), nil + } + + if in.IsNull() { + return NewPaginationValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewPaginationValueMust(PaginationValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t PaginationType) ValueType(ctx context.Context) attr.Value { + return PaginationValue{} +} + +var _ basetypes.ObjectValuable = PaginationValue{} + +type PaginationValue struct { + Page basetypes.Int64Value `tfsdk:"page"` + Size basetypes.Int64Value `tfsdk:"size"` + Sort basetypes.StringValue `tfsdk:"sort"` + TotalPages basetypes.Int64Value `tfsdk:"total_pages"` + TotalRows basetypes.Int64Value `tfsdk:"total_rows"` + state attr.ValueState +} + +func (v PaginationValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["page"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["size"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["sort"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["total_pages"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["total_rows"] = basetypes.Int64Type{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.Page.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["page"] = val + + val, err = v.Size.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["size"] = val + + val, err = v.Sort.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["sort"] = val + + val, err = v.TotalPages.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["total_pages"] = val + + val, err = v.TotalRows.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["total_rows"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v PaginationValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v PaginationValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v PaginationValue) String() string { + return "PaginationValue" +} + +func (v PaginationValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "page": basetypes.Int64Type{}, + "size": basetypes.Int64Type{}, + "sort": basetypes.StringType{}, + "total_pages": basetypes.Int64Type{}, + "total_rows": basetypes.Int64Type{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "page": v.Page, + "size": v.Size, + "sort": v.Sort, + "total_pages": v.TotalPages, + "total_rows": v.TotalRows, + }) + + return objVal, diags +} + +func (v PaginationValue) Equal(o attr.Value) bool { + other, ok := o.(PaginationValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Page.Equal(other.Page) { + return false + } + + if !v.Size.Equal(other.Size) { + return false + } + + if !v.Sort.Equal(other.Sort) { + return false + } + + if !v.TotalPages.Equal(other.TotalPages) { + return false + } + + if !v.TotalRows.Equal(other.TotalRows) { + return false + } + + return true +} + +func (v PaginationValue) Type(ctx context.Context) attr.Type { + return PaginationType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v PaginationValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "page": basetypes.Int64Type{}, + "size": basetypes.Int64Type{}, + "sort": basetypes.StringType{}, + "total_pages": basetypes.Int64Type{}, + "total_rows": basetypes.Int64Type{}, + } +} diff --git a/stackit/internal/services/sqlserverflexalpha/flavor/functions.go b/stackit/internal/services/sqlserverflexalpha/flavor/functions.go new file mode 100644 index 00000000..469b7bce --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/flavor/functions.go @@ -0,0 +1,65 @@ +package sqlserverflexalphaFlavor + +import ( + "context" + "fmt" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" +) + +type flavorsClientReader interface { + GetFlavorsRequest( + ctx context.Context, + projectId, region string, + ) sqlserverflexalpha.ApiGetFlavorsRequestRequest +} + +func getAllFlavors(ctx context.Context, client flavorsClientReader, projectId, region string) ( + []sqlserverflexalpha.ListFlavors, + error, +) { + getAllFilter := func(_ sqlserverflexalpha.ListFlavors) bool { return true } + flavorList, err := getFlavorsByFilter(ctx, client, projectId, region, getAllFilter) + if err != nil { + return nil, err + } + return flavorList, nil +} + +// getFlavorsByFilter is a helper function to retrieve flavors using a filtern function. +// Hint: The API does not have a GetFlavors endpoint, only ListFlavors +func getFlavorsByFilter( + ctx context.Context, + client flavorsClientReader, + projectId, region string, + filter func(db sqlserverflexalpha.ListFlavors) bool, +) ([]sqlserverflexalpha.ListFlavors, error) { + if projectId == "" || region == "" { + return nil, fmt.Errorf("listing sqlserverflexalpha flavors: projectId and region are required") + } + + const pageSize = 25 + + var result = make([]sqlserverflexalpha.ListFlavors, 0) + + for page := int64(1); ; page++ { + res, err := client.GetFlavorsRequest(ctx, projectId, region). + Page(page).Size(pageSize).Sort(sqlserverflexalpha.FLAVORSORT_INDEX_ASC).Execute() + if err != nil { + return nil, fmt.Errorf("requesting flavors list (page %d): %w", page, err) + } + + // If the API returns no flavors, we have reached the end of the list. + if res.Flavors == nil || len(*res.Flavors) == 0 { + break + } + + for _, flavor := range *res.Flavors { + if filter(flavor) { + result = append(result, flavor) + } + } + } + + return result, nil +} diff --git a/stackit/internal/services/sqlserverflexalpha/flavor/functions_test.go b/stackit/internal/services/sqlserverflexalpha/flavor/functions_test.go new file mode 100644 index 00000000..bed6462c --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/flavor/functions_test.go @@ -0,0 +1,135 @@ +package sqlserverflexalphaFlavor + +import ( + "context" + "testing" + + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" +) + +type mockRequest struct { + executeFunc func() (*sqlserverflexalpha.GetFlavorsResponse, error) +} + +func (m *mockRequest) Page(_ int64) sqlserverflexalpha.ApiGetFlavorsRequestRequest { return m } +func (m *mockRequest) Size(_ int64) sqlserverflexalpha.ApiGetFlavorsRequestRequest { return m } +func (m *mockRequest) Sort(_ sqlserverflexalpha.FlavorSort) sqlserverflexalpha.ApiGetFlavorsRequestRequest { + return m +} +func (m *mockRequest) Execute() (*sqlserverflexalpha.GetFlavorsResponse, error) { + return m.executeFunc() +} + +type mockFlavorsClient struct { + executeRequest func() sqlserverflexalpha.ApiGetFlavorsRequestRequest +} + +func (m *mockFlavorsClient) GetFlavorsRequest(_ context.Context, _, _ string) sqlserverflexalpha.ApiGetFlavorsRequestRequest { + return m.executeRequest() +} + +var mockResp = func(page int64) (*sqlserverflexalpha.GetFlavorsResponse, error) { + if page == 1 { + return &sqlserverflexalpha.GetFlavorsResponse{ + Flavors: &[]sqlserverflexalpha.ListFlavors{ + {Id: utils.Ptr("flavor-1"), Description: utils.Ptr("first")}, + {Id: utils.Ptr("flavor-2"), Description: utils.Ptr("second")}, + }, + }, nil + } + if page == 2 { + return &sqlserverflexalpha.GetFlavorsResponse{ + Flavors: &[]sqlserverflexalpha.ListFlavors{ + {Id: utils.Ptr("flavor-3"), Description: utils.Ptr("three")}, + }, + }, nil + } + + return &sqlserverflexalpha.GetFlavorsResponse{ + Flavors: &[]sqlserverflexalpha.ListFlavors{}, + }, nil +} + +func TestGetFlavorsByFilter(t *testing.T) { + tests := []struct { + description string + projectId string + region string + mockErr error + filter func(sqlserverflexalpha.ListFlavors) bool + wantCount int + wantErr bool + }{ + { + description: "Success - Get all flavors (2 pages)", + projectId: "pid", region: "reg", + filter: func(_ sqlserverflexalpha.ListFlavors) bool { return true }, + wantCount: 3, + wantErr: false, + }, + { + description: "Success - Filter flavors by description", + projectId: "pid", region: "reg", + filter: func(f sqlserverflexalpha.ListFlavors) bool { return *f.Description == "first" }, + wantCount: 1, + wantErr: false, + }, + { + description: "Error - Missing parameters", + projectId: "", region: "reg", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + var currentPage int64 + client := &mockFlavorsClient{ + executeRequest: func() sqlserverflexalpha.ApiGetFlavorsRequestRequest { + return &mockRequest{ + executeFunc: func() (*sqlserverflexalpha.GetFlavorsResponse, error) { + currentPage++ + return mockResp(currentPage) + }, + } + }, + } + actual, err := getFlavorsByFilter(context.Background(), client, tt.projectId, tt.region, tt.filter) + + if (err != nil) != tt.wantErr { + t.Errorf("getFlavorsByFilter() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !tt.wantErr && len(actual) != tt.wantCount { + t.Errorf("getFlavorsByFilter() got %d flavors, want %d", len(actual), tt.wantCount) + } + }, + ) + } +} + +func TestGetAllFlavors(t *testing.T) { + var currentPage int64 + client := &mockFlavorsClient{ + executeRequest: func() sqlserverflexalpha.ApiGetFlavorsRequestRequest { + return &mockRequest{ + executeFunc: func() (*sqlserverflexalpha.GetFlavorsResponse, error) { + currentPage++ + return mockResp(currentPage) + }, + } + }, + } + + res, err := getAllFlavors(context.Background(), client, "pid", "reg") + if err != nil { + t.Errorf("getAllFlavors() unexpected error: %v", err) + } + if len(res) != 3 { + t.Errorf("getAllFlavors() expected 3 flavor, got %d", len(res)) + } +} diff --git a/stackit/internal/services/sqlserverflexalpha/flavors/datasource.go b/stackit/internal/services/sqlserverflexalpha/flavors/datasource.go new file mode 100644 index 00000000..2286e81b --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/flavors/datasource.go @@ -0,0 +1,156 @@ +package sqlserverflexalpha + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaPkg "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + + sqlserverflexalphaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/flavors/datasources_gen" +) + +var _ datasource.DataSource = (*flavorsDataSource)(nil) + +const errorPrefix = "[sqlserverflexalpha - Flavors]" + +func NewFlavorsDataSource() datasource.DataSource { + return &flavorsDataSource{} +} + +type dataSourceModel struct { + sqlserverflexalphaGen.FlavorsModel + TerraformId types.String `tfsdk:"id"` +} + +type flavorsDataSource struct { + client *sqlserverflexalphaPkg.APIClient + providerData core.ProviderData +} + +func (d *flavorsDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_flavors" +} + +func (d *flavorsDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexalphaGen.FlavorsDataSourceSchema(ctx) + resp.Schema.Attributes["id"] = schema.StringAttribute{ + Computed: true, + Description: "The terraform internal identifier.", + MarkdownDescription: "The terraform internal identifier.", + } +} + +// Configure adds the provider configured client to the data source. +func (d *flavorsDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(d.providerData.RoundTripper), + utils.UserAgentConfigOption(d.providerData.Version), + } + if d.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(d.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(d.providerData.GetRegion()), + ) + } + apiClient, err := sqlserverflexalphaPkg.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + d.client = apiClient + tflog.Info(ctx, fmt.Sprintf("%s client configured", errorPrefix)) +} + +func (d *flavorsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dataSourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := d.providerData.GetRegionWithOverride(data.Region) + // TODO: implement right identifier for flavors + flavorsId := data.Flavors + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + // TODO: implement needed fields + ctx = tflog.SetField(ctx, "flavors_id", flavorsId) + + // TODO: refactor to correct implementation + _, err := d.client.GetFlavorsRequest(ctx, projectId, region).Execute() + if err != nil { + utils.LogError( + ctx, + &resp.Diagnostics, + err, + "Reading flavors", + fmt.Sprintf("flavors with ID %q does not exist in project %q.", flavorsId, projectId), + map[int]string{ + http.StatusForbidden: fmt.Sprintf("Project with ID %q not found or forbidden access", projectId), + }, + ) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + + // TODO: refactor to correct implementation of internal tf id + data.TerraformId = utils.BuildInternalTerraformId(projectId, region) + + // TODO: fill remaining fields + // data.Flavors = types.Sometype(apiResponse.GetFlavors()) + // data.Page = types.Sometype(apiResponse.GetPage()) + // data.Pagination = types.Sometype(apiResponse.GetPagination()) + // data.ProjectId = types.Sometype(apiResponse.GetProjectId()) + // data.Region = types.Sometype(apiResponse.GetRegion()) + // data.Size = types.Sometype(apiResponse.GetSize()) + // data.Sort = types.Sometype(apiResponse.GetSort())// Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + + tflog.Info(ctx, fmt.Sprintf("%s read successful", errorPrefix)) +} diff --git a/stackit/internal/services/sqlserverflexalpha/instance/datasource.go b/stackit/internal/services/sqlserverflexalpha/instance/datasource.go new file mode 100644 index 00000000..123b1fe8 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/instance/datasource.go @@ -0,0 +1,146 @@ +package sqlserverflexalpha + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaPkg "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + + sqlserverflexalphaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen" +) + +var _ datasource.DataSource = (*instanceDataSource)(nil) + +const errorPrefix = "[sqlserverflexalpha - Instance]" + +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// dataSourceModel maps the data source schema data. +type dataSourceModel struct { + sqlserverflexalphaGen.InstanceModel + TerraformID types.String `tfsdk:"id"` +} + +type instanceDataSource struct { + client *sqlserverflexalphaPkg.APIClient + providerData core.ProviderData +} + +func (d *instanceDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_instance" +} + +func (d *instanceDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexalphaGen.InstanceDataSourceSchema(ctx) +} + +// Configure adds the provider configured client to the data source. +func (d *instanceDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(d.providerData.RoundTripper), + utils.UserAgentConfigOption(d.providerData.Version), + } + if d.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(d.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(d.providerData.GetRegion()), + ) + } + apiClient, err := sqlserverflexalphaPkg.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + d.client = apiClient + tflog.Info(ctx, fmt.Sprintf("%s client configured", errorPrefix)) +} + +func (d *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dataSourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := d.providerData.GetRegionWithOverride(data.Region) + instanceId := data.InstanceId.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := d.client.GetInstanceRequest(ctx, projectId, region, instanceId).Execute() + if err != nil { + utils.LogError( + ctx, + &resp.Diagnostics, + err, + "Reading instance", + fmt.Sprintf("instance with ID %q does not exist in project %q.", instanceId, projectId), + map[int]string{ + http.StatusForbidden: fmt.Sprintf("Project with ID %q not found or forbidden access", projectId), + }, + ) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + + err = mapDataResponseToModel(ctx, instanceResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + fmt.Sprintf("%s Read", errorPrefix), + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/stackit/internal/services/sqlserverflexalpha/instance/functions.go b/stackit/internal/services/sqlserverflexalpha/instance/functions.go new file mode 100644 index 00000000..a8567903 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/instance/functions.go @@ -0,0 +1,283 @@ +package sqlserverflexalpha + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + sqlserverflexalphaDataGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen" + sqlserverflexalphaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen" +) + +func mapResponseToModel( + ctx context.Context, + resp *sqlserverflexalpha.GetInstanceResponse, + m *sqlserverflexalphaResGen.InstanceModel, + tfDiags diag.Diagnostics, +) error { + m.BackupSchedule = types.StringValue(resp.GetBackupSchedule()) + m.Edition = types.StringValue(string(resp.GetEdition())) + m.Encryption = handleEncryption(m, resp) + m.FlavorId = types.StringValue(resp.GetFlavorId()) + m.Id = types.StringValue(resp.GetId()) + m.InstanceId = types.StringValue(resp.GetId()) + m.IsDeletable = types.BoolValue(resp.GetIsDeletable()) + m.Name = types.StringValue(resp.GetName()) + netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl()) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf( + "error converting network acl response value", + ) + } + net, diags := sqlserverflexalphaResGen.NewNetworkValue( + sqlserverflexalphaResGen.NetworkValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "access_scope": types.StringValue(string(resp.Network.GetAccessScope())), + "acl": netAcl, + "instance_address": types.StringValue(resp.Network.GetInstanceAddress()), + "router_address": types.StringValue(resp.Network.GetRouterAddress()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return errors.New("error converting network response value") + } + m.Network = net + m.Replicas = types.Int64Value(int64(resp.GetReplicas())) + m.RetentionDays = types.Int64Value(resp.GetRetentionDays()) + m.Status = types.StringValue(string(resp.GetStatus())) + + stor, diags := sqlserverflexalphaResGen.NewStorageValue( + sqlserverflexalphaResGen.StorageValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "class": types.StringValue(resp.Storage.GetClass()), + "size": types.Int64Value(resp.Storage.GetSize()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf("error converting storage response value") + } + m.Storage = stor + + m.Version = types.StringValue(string(resp.GetVersion())) + return nil +} + +func mapDataResponseToModel( + ctx context.Context, + resp *sqlserverflexalpha.GetInstanceResponse, + m *dataSourceModel, + tfDiags diag.Diagnostics, +) error { + m.BackupSchedule = types.StringValue(resp.GetBackupSchedule()) + m.Edition = types.StringValue(string(resp.GetEdition())) + m.Encryption = handleDSEncryption(m, resp) + m.FlavorId = types.StringValue(resp.GetFlavorId()) + m.Id = types.StringValue(resp.GetId()) + m.InstanceId = types.StringValue(resp.GetId()) + m.IsDeletable = types.BoolValue(resp.GetIsDeletable()) + m.Name = types.StringValue(resp.GetName()) + netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl()) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf( + "error converting network acl response value", + ) + } + net, diags := sqlserverflexalphaDataGen.NewNetworkValue( + sqlserverflexalphaDataGen.NetworkValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "access_scope": types.StringValue(string(resp.Network.GetAccessScope())), + "acl": netAcl, + "instance_address": types.StringValue(resp.Network.GetInstanceAddress()), + "router_address": types.StringValue(resp.Network.GetRouterAddress()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return errors.New("error converting network response value") + } + m.Network = net + m.Replicas = types.Int64Value(int64(resp.GetReplicas())) + m.RetentionDays = types.Int64Value(resp.GetRetentionDays()) + m.Status = types.StringValue(string(resp.GetStatus())) + + stor, diags := sqlserverflexalphaDataGen.NewStorageValue( + sqlserverflexalphaDataGen.StorageValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "class": types.StringValue(resp.Storage.GetClass()), + "size": types.Int64Value(resp.Storage.GetSize()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf("error converting storage response value") + } + m.Storage = stor + + m.Version = types.StringValue(string(resp.GetVersion())) + return nil +} + +func handleEncryption( + m *sqlserverflexalphaResGen.InstanceModel, + resp *sqlserverflexalpha.GetInstanceResponse, +) sqlserverflexalphaResGen.EncryptionValue { + if !resp.HasEncryption() || + resp.Encryption == nil || + resp.Encryption.KekKeyId == nil || + resp.Encryption.KekKeyRingId == nil || + resp.Encryption.KekKeyVersion == nil || + resp.Encryption.ServiceAccount == nil { + if m.Encryption.IsNull() || m.Encryption.IsUnknown() { + return sqlserverflexalphaResGen.NewEncryptionValueNull() + } + return m.Encryption + } + + enc := sqlserverflexalphaResGen.NewEncryptionValueNull() + if kVal, ok := resp.Encryption.GetKekKeyIdOk(); ok { + enc.KekKeyId = types.StringValue(kVal) + } + if kkVal, ok := resp.Encryption.GetKekKeyRingIdOk(); ok { + enc.KekKeyRingId = types.StringValue(kkVal) + } + if kkvVal, ok := resp.Encryption.GetKekKeyVersionOk(); ok { + enc.KekKeyVersion = types.StringValue(kkvVal) + } + if sa, ok := resp.Encryption.GetServiceAccountOk(); ok { + enc.ServiceAccount = types.StringValue(sa) + } + return enc +} + +func handleDSEncryption( + m *dataSourceModel, + resp *sqlserverflexalpha.GetInstanceResponse, +) sqlserverflexalphaDataGen.EncryptionValue { + if !resp.HasEncryption() || + resp.Encryption == nil || + resp.Encryption.KekKeyId == nil || + resp.Encryption.KekKeyRingId == nil || + resp.Encryption.KekKeyVersion == nil || + resp.Encryption.ServiceAccount == nil { + if m.Encryption.IsNull() || m.Encryption.IsUnknown() { + return sqlserverflexalphaDataGen.NewEncryptionValueNull() + } + return m.Encryption + } + + enc := sqlserverflexalphaDataGen.NewEncryptionValueNull() + if kVal, ok := resp.Encryption.GetKekKeyIdOk(); ok { + enc.KekKeyId = types.StringValue(kVal) + } + if kkVal, ok := resp.Encryption.GetKekKeyRingIdOk(); ok { + enc.KekKeyRingId = types.StringValue(kkVal) + } + if kkvVal, ok := resp.Encryption.GetKekKeyVersionOk(); ok { + enc.KekKeyVersion = types.StringValue(kkvVal) + } + if sa, ok := resp.Encryption.GetServiceAccountOk(); ok { + enc.ServiceAccount = types.StringValue(sa) + } + return enc +} + +func toCreatePayload( + ctx context.Context, + model *sqlserverflexalphaResGen.InstanceModel, +) (*sqlserverflexalpha.CreateInstanceRequestPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + storagePayload := &sqlserverflexalpha.CreateInstanceRequestPayloadGetStorageArgType{} + if !model.Storage.IsNull() && !model.Storage.IsUnknown() { + storagePayload.Class = model.Storage.Class.ValueStringPointer() + storagePayload.Size = model.Storage.Size.ValueInt64Pointer() + } + + var encryptionPayload *sqlserverflexalpha.CreateInstanceRequestPayloadGetEncryptionArgType = nil + if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() && + !model.Encryption.KekKeyId.IsNull() && model.Encryption.KekKeyId.IsUnknown() && model.Encryption.KekKeyId.ValueString() != "" && + !model.Encryption.KekKeyRingId.IsNull() && !model.Encryption.KekKeyRingId.IsUnknown() && model.Encryption.KekKeyRingId.ValueString() != "" && + !model.Encryption.KekKeyVersion.IsNull() && !model.Encryption.KekKeyVersion.IsUnknown() && model.Encryption.KekKeyVersion.ValueString() != "" && + !model.Encryption.ServiceAccount.IsNull() && !model.Encryption.ServiceAccount.IsUnknown() && model.Encryption.ServiceAccount.ValueString() != "" { + encryptionPayload = &sqlserverflexalpha.CreateInstanceRequestPayloadGetEncryptionArgType{ + KekKeyId: model.Encryption.KekKeyId.ValueStringPointer(), + KekKeyRingId: model.Encryption.KekKeyVersion.ValueStringPointer(), + KekKeyVersion: model.Encryption.KekKeyRingId.ValueStringPointer(), + ServiceAccount: model.Encryption.ServiceAccount.ValueStringPointer(), + } + } + + networkPayload := &sqlserverflexalpha.CreateInstanceRequestPayloadGetNetworkArgType{} + if !model.Network.IsNull() && !model.Network.IsUnknown() { + networkPayload.AccessScope = sqlserverflexalpha.CreateInstanceRequestPayloadNetworkGetAccessScopeAttributeType( + model.Network.AccessScope.ValueStringPointer(), + ) + + var resList []string + diags := model.Network.Acl.ElementsAs(ctx, &resList, false) + if diags.HasError() { + return nil, fmt.Errorf("error converting network acl list") + } + networkPayload.Acl = &resList + } + + return &sqlserverflexalpha.CreateInstanceRequestPayload{ + BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule), + Encryption: encryptionPayload, + FlavorId: conversion.StringValueToPointer(model.FlavorId), + Name: conversion.StringValueToPointer(model.Name), + Network: networkPayload, + RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays), + Storage: storagePayload, + Version: sqlserverflexalpha.CreateInstanceRequestPayloadGetVersionAttributeType( + conversion.StringValueToPointer(model.Version), + ), + }, nil +} + +func toUpdatePayload( + ctx context.Context, + m *sqlserverflexalphaResGen.InstanceModel, + resp *resource.UpdateResponse, +) (*sqlserverflexalpha.UpdateInstanceRequestPayload, error) { + if m == nil { + return nil, fmt.Errorf("nil model") + } + if m.Replicas.ValueInt64() > math.MaxUint32 { + return nil, fmt.Errorf("replicas value is too big for uint32") + } + replVal := sqlserverflexalpha.Replicas(uint32(m.Replicas.ValueInt64())) // nolint:gosec // check is performed above + + var netAcl []string + diags := m.Network.Acl.ElementsAs(ctx, &netAcl, false) + resp.Diagnostics.Append(diags...) + if diags.HasError() { + return nil, fmt.Errorf("error converting model network acl value") + } + return &sqlserverflexalpha.UpdateInstanceRequestPayload{ + BackupSchedule: m.BackupSchedule.ValueStringPointer(), + FlavorId: m.FlavorId.ValueStringPointer(), + Name: m.Name.ValueStringPointer(), + Network: sqlserverflexalpha.NewUpdateInstanceRequestPayloadNetwork(netAcl), + Replicas: &replVal, + RetentionDays: m.RetentionDays.ValueInt64Pointer(), + Storage: &sqlserverflexalpha.StorageUpdate{Size: m.Storage.Size.ValueInt64Pointer()}, + Version: sqlserverflexalpha.UpdateInstanceRequestPayloadGetVersionAttributeType( + m.Version.ValueStringPointer(), + ), + }, nil +} diff --git a/stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml b/stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml new file mode 100644 index 00000000..71d4cbe4 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml @@ -0,0 +1,124 @@ +fields: + - name: 'id' + modifiers: + - 'UseStateForUnknown' + + - name: 'instance_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + + - name: 'project_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'name' + modifiers: + - 'UseStateForUnknown' + + - name: 'backup_schedule' + modifiers: + - 'UseStateForUnknown' + + - name: 'encryption.kek_key_id' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'encryption.kek_key_version' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'encryption.kek_key_ring_id' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'encryption.service_account' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'network.access_scope' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'network.acl' + modifiers: + - 'UseStateForUnknown' + + - name: 'network.instance_address' + modifiers: + - 'UseStateForUnknown' + + - name: 'network.router_address' + modifiers: + - 'UseStateForUnknown' + + - name: 'status' + modifiers: + - 'UseStateForUnknown' + + - name: 'region' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'retention_days' + modifiers: + - 'UseStateForUnknown' + + - name: 'edition' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'version' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'replicas' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'storage' + modifiers: + - 'UseStateForUnknown' + + - name: 'storage.class' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'storage.size' + modifiers: + - 'UseStateForUnknown' + + - name: 'flavor_id' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'is_deletable' + modifiers: + - 'UseStateForUnknown' diff --git a/stackit/internal/services/sqlserverflexalpha/instance/resource.go b/stackit/internal/services/sqlserverflexalpha/instance/resource.go new file mode 100644 index 00000000..3b1f4fd3 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/instance/resource.go @@ -0,0 +1,554 @@ +package sqlserverflexalpha + +import ( + "context" + _ "embed" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + wait "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/sqlserverflexalpha" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen" +) + +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} + _ resource.ResourceWithModifyPlan = &instanceResource{} + _ resource.ResourceWithIdentity = &instanceResource{} +) + +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +type instanceResource struct { + client *sqlserverflexalpha.APIClient + providerData core.ProviderData +} + +// resourceModel describes the resource data model. +type resourceModel = sqlserverflexalphaResGen.InstanceModel + +type InstanceResourceIdentityModel struct { + ProjectID types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + InstanceID types.String `tfsdk:"instance_id"` +} + +func (r *instanceResource) Metadata( + _ context.Context, + req resource.MetadataRequest, + resp *resource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_instance" +} + +//go:embed planModifiers.yaml +var modifiersFileByte []byte + +func (r *instanceResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + s := sqlserverflexalphaResGen.InstanceResourceSchema(ctx) + + fields, err := utils.ReadModifiersConfig(modifiersFileByte) + if err != nil { + resp.Diagnostics.AddError("error during read modifiers config file", err.Error()) + return + } + + err = utils.AddPlanModifiersToResourceSchema(fields, &s) + if err != nil { + resp.Diagnostics.AddError("error adding plan modifiers", err.Error()) + return + } + resp.Schema = s +} + +func (r *instanceResource) IdentitySchema( + _ context.Context, + _ resource.IdentitySchemaRequest, + resp *resource.IdentitySchemaResponse, +) { + resp.IdentitySchema = identityschema.Schema{ + Attributes: map[string]identityschema.Attribute{ + "project_id": identityschema.StringAttribute{ + RequiredForImport: true, // must be set during import by the practitioner + }, + "region": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "instance_id": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + }, + } +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure( + ctx context.Context, + req resource.ConfigureRequest, + resp *resource.ConfigureResponse, +) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(r.providerData.RoundTripper), + utils.UserAgentConfigOption(r.providerData.Version), + } + if r.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(r.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append(apiClientConfigOptions, config.WithRegion(r.providerData.GetRegion())) + } + apiClient, err := sqlserverflexalpha.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + r.client = apiClient + tflog.Info(ctx, "sqlserverflexalpha.Instance client configured") +} + +// ModifyPlan implements resource.ResourceWithModifyPlan. +// Use the modifier to set the effective region in the current plan. +func (r *instanceResource) ModifyPlan( + ctx context.Context, + req resource.ModifyPlanRequest, + resp *resource.ModifyPlanResponse, +) { // nolint:gocritic // function signature required by Terraform + // skip initial empty configuration to avoid follow-up errors + if req.Config.Raw.IsNull() { + return + } + var configModel resourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...) + if resp.Diagnostics.HasError() { + return + } + + if req.Plan.Raw.IsNull() { + return + } + var planModel resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) + if resp.Diagnostics.HasError() { + return + } + + utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data resourceModel + crateErr := "[SQL Server Flex BETA - Create] error" + + // Read Terraform plan data into the model + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + // Generate API request body from model + payload, err := toCreatePayload(ctx, &data) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + fmt.Sprintf("Creating API payload: %v", err), + ) + return + } + // Create new Instance + createResp, err := r.client.CreateInstanceRequest( + ctx, + projectId, + region, + ).CreateInstanceRequestPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, crateErr, fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + InstanceId := *createResp.Id + + // Example data value setting + data.InstanceId = types.StringValue("id-from-response") + + identity := InstanceResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(InstanceId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + waitResp, err := wait.CreateInstanceWaitHandler( + ctx, + r.client, + projectId, + InstanceId, + region, + ).SetSleepBeforeWait( + 10 * time.Second, + ).SetTimeout( + 90 * time.Minute, + ).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + fmt.Sprintf("Instance creation waiting: %v", err), + ) + return + } + + if waitResp.Id == nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + "Instance creation waiting: returned id is nil", + ) + return + } + + // Map response body to schema + err = mapResponseToModel(ctx, waitResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + fmt.Sprintf("processing API payload: %v", err), + ) + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + + tflog.Info(ctx, "sqlserverflexalpha.Instance created") +} + +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data resourceModel + + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // Read identity data + var identityData InstanceResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + instanceId := data.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.GetInstanceRequest(ctx, projectId, region, instanceId).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", err.Error()) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema + err = mapResponseToModel(ctx, instanceResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading instance", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save identity into Terraform state + identity := InstanceResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Save updated data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "sqlserverflexalpha.Instance read") +} + +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var data resourceModel + updateInstanceError := "Error updating instance" + + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + instanceId := data.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Generate API request body from model + payload, err := toUpdatePayload(ctx, &data, resp) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + updateInstanceError, + fmt.Sprintf("Creating API payload: %v", err), + ) + return + } + // Update existing instance + err = r.client.UpdateInstanceRequest( + ctx, + projectId, + region, + instanceId, + ).UpdateInstanceRequestPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, updateInstanceError, err.Error()) + return + } + + ctx = core.LogResponse(ctx) + + waitResp, err := wait. + UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId, region). + SetSleepBeforeWait(15 * time.Second). + SetTimeout(45 * time.Minute). + WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + updateInstanceError, + fmt.Sprintf("Instance update waiting: %v", err), + ) + return + } + + // Map response body to schema + err = mapResponseToModel(ctx, waitResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + updateInstanceError, + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + identity := InstanceResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Save updated data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "sqlserverflexalpha.Instance updated") +} + +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data resourceModel + + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // Read identity data + var identityData InstanceResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + instanceId := identityData.InstanceID.ValueString() + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstanceRequest(ctx, projectId, region, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + delResp, err := wait.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId, region).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error deleting instance", + fmt.Sprintf("Instance deletion waiting: %v", err), + ) + return + } + + if delResp != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error deleting instance", + "wait handler returned non nil result", + ) + return + } + + resp.State.RemoveResource(ctx) + + tflog.Info(ctx, "sqlserverflexalpha.Instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *instanceResource) ImportState( + ctx context.Context, + req resource.ImportStateRequest, + resp *resource.ImportStateResponse, +) { + ctx = core.InitProviderContext(ctx) + + if req.ID != "" { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError( + ctx, &resp.Diagnostics, + "Error importing instance", + fmt.Sprintf( + "Expected import identifier with format [project_id],[region],[instance_id] Got: %q", + req.ID, + ), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[2])...) + return + } + + // If no ID is provided, attempt to read identity attributes from the import configuration + var identityData InstanceResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + instanceId := identityData.InstanceID.ValueString() + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), region)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), instanceId)...) + + tflog.Info(ctx, "sqlserverflexalpha instance state imported") +} diff --git a/stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go b/stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go new file mode 100644 index 00000000..9eebac99 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/sqlserverflex_acc_test.go @@ -0,0 +1,433 @@ +package sqlserverflexalpha_test + +import ( + "context" + _ "embed" + "fmt" + "log" + "os" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/internal/testutils" + sqlserverflexalphaPkgGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + sqlserverflexalpha "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance" + + // The fwresource import alias is so there is no collision + // with the more typical acceptance testing import: + // "github.com/hashicorp/terraform-plugin-testing/helper/resource" + fwresource "github.com/hashicorp/terraform-plugin-framework/resource" +) + +const providerPrefix = "stackitprivatepreview_sqlserverflexalpha" + +var testInstances []string + +func init() { + sweeperName := fmt.Sprintf("%s_%s", providerPrefix, "sweeper") + + resource.AddTestSweepers(sweeperName, &resource.Sweeper{ + Name: sweeperName, + F: func(region string) error { + ctx := context.Background() + apiClientConfigOptions := []config.ConfigurationOption{} + apiClient, err := sqlserverflexalphaPkgGen.NewAPIClient(apiClientConfigOptions...) + if err != nil { + log.Fatalln(err) + } + + instances, err := apiClient.ListInstancesRequest(ctx, testutils.ProjectId, region). + Size(100). + Execute() + if err != nil { + log.Fatalln(err) + } + + for _, inst := range instances.GetInstances() { + if strings.HasPrefix(inst.GetName(), "tf-acc-") { + for _, item := range testInstances { + if inst.GetName() == item { + delErr := apiClient.DeleteInstanceRequestExecute(ctx, testutils.ProjectId, region, inst.GetId()) + if delErr != nil { + // TODO: maybe just warn? + log.Fatalln(delErr) + } + } + } + } + } + return nil + }, + }) +} + +func TestInstanceResourceSchema(t *testing.T) { + t.Parallel() + + ctx := context.Background() + schemaRequest := fwresource.SchemaRequest{} + schemaResponse := &fwresource.SchemaResponse{} + + // Instantiate the resource.Resource and call its Schema method + sqlserverflexalpha.NewInstanceResource().Schema(ctx, schemaRequest, schemaResponse) + + if schemaResponse.Diagnostics.HasError() { + t.Fatalf("Schema method diagnostics: %+v", schemaResponse.Diagnostics) + } + + // Validate the schema + diagnostics := schemaResponse.Schema.ValidateImplementation(ctx) + + if diagnostics.HasError() { + t.Fatalf("Schema validation diagnostics: %+v", diagnostics) + } +} + +func TestMain(m *testing.M) { + testutils.Setup() + code := m.Run() + // shutdown() + os.Exit(code) +} + +func testAccPreCheck(t *testing.T) { + if _, ok := os.LookupEnv("TF_ACC_PROJECT_ID"); !ok { + t.Fatalf("could not find env var TF_ACC_PROJECT_ID") + } +} + +type resData struct { + ServiceAccountFilePath string + ProjectId string + Region string + Name string + TfName string + FlavorId string + BackupSchedule string + UseEncryption bool + KekKeyId string + KekKeyRingId string + KekKeyVersion uint8 + KekServiceAccount string + PerformanceClass string + Size uint32 + AclString string + AccessScope string + RetentionDays uint32 + Version string + Users []User + Databases []Database +} + +type User struct { + Name string + ProjectId string + Roles []string +} + +type Database struct { + Name string + ProjectId string + Owner string + Collation string + Compatibility string +} + +func resName(res, name string) string { + return fmt.Sprintf("%s_%s.%s", providerPrefix, res, name) +} + +func getExample() resData { + name := acctest.RandomWithPrefix("tf-acc") + return resData{ + Region: os.Getenv("TF_ACC_REGION"), + ServiceAccountFilePath: os.Getenv("TF_ACC_SERVICE_ACCOUNT_FILE"), + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Name: name, + TfName: name, + FlavorId: "4.16-Single", + BackupSchedule: "0 0 * * *", + UseEncryption: false, + RetentionDays: 33, + PerformanceClass: "premium-perf2-stackit", + Size: 10, + AclString: "0.0.0.0/0", + AccessScope: "PUBLIC", + Version: "2022", + } +} + +func TestAccInstance(t *testing.T) { + exData := getExample() + + updNameData := exData + updNameData.Name = "name-updated" + + updSizeData := exData + updSizeData.Size = 25 + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + t.Logf(" ... working on instance %s", exData.TfName) + testInstances = append(testInstances, exData.TfName) + }, + ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + exData, + ), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName("instance", exData.TfName), "name", exData.Name), + resource.TestCheckResourceAttrSet(resName("instance", exData.TfName), "id"), + // TODO: check all fields + ), + }, + // Update name and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + updNameData, + ), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resName("instance", exData.TfName), "name", updNameData.Name), + ), + }, + // Update size and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + updSizeData, + ), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + testutils.ResStr(providerPrefix, "instance", exData.TfName), + "storage.size", + strconv.Itoa(int(updSizeData.Size)), + ), + ), + }, + { + RefreshState: true, + }, + //// Import test + //{ + // ResourceName: resName("instance", exData.TfName), + // ImportState: true, + // ImportStateVerify: true, + // }, + }, + }) +} + +func TestAccInstanceNoEncryption(t *testing.T) { + data := getExample() + + dbName := "testDb" + userName := "testUser" + data.Users = []User{ + { + Name: userName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Roles: []string{ + "##STACKIT_DatabaseManager##", + "##STACKIT_LoginManager##", + "##STACKIT_ProcessManager##", + "##STACKIT_SQLAgentManager##", + "##STACKIT_SQLAgentUser##", + "##STACKIT_ServerManager##", + }, + }, + } + data.Databases = []Database{ + { + Name: dbName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Owner: userName, + }, + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + t.Logf(" ... working on instance %s", data.TfName) + testInstances = append(testInstances, data.TfName) + }, + ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + data, + ), + Check: resource.ComposeAggregateTestCheckFunc( + // check instance values are set + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "backup_schedule"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "edition"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "flavor_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "instance_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "is_deletable"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "name"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "replicas"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "retention_days"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "status"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "version"), + + resource.TestCheckNoResourceAttr(resName("instance", data.TfName), "encryption"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_version"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_ring_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.service_account"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.access_scope"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.acl"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.instance_address"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.router_address"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.class"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.size"), + + // check instance values are correct + resource.TestCheckResourceAttr(resName("instance", data.TfName), "name", data.Name), + + // check user values are set + resource.TestCheckResourceAttrSet(resName("user", userName), "id"), + resource.TestCheckResourceAttrSet(resName("user", userName), "username"), + // resource.TestCheckResourceAttrSet(resName("user", userName), "roles"), + + // func(s *terraform.State) error { + // return nil + // }, + + // check user values are correct + resource.TestCheckResourceAttr(resName("user", userName), "username", userName), + resource.TestCheckResourceAttr(resName("user", userName), "roles.#", strconv.Itoa(len(data.Users[0].Roles))), + + // check database values are set + resource.TestCheckResourceAttrSet(resName("database", dbName), "id"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "name"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "owner"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "compatibility"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "collation"), + + // check database values are correct + resource.TestCheckResourceAttr(resName("database", dbName), "name", dbName), + resource.TestCheckResourceAttr(resName("database", dbName), "owner", userName), + ), + }, + }, + }) +} + +func TestAccInstanceEncryption(t *testing.T) { + data := getExample() + + dbName := "testDb" + userName := "testUser" + data.Users = []User{ + { + Name: userName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Roles: []string{"##STACKIT_DatabaseManager##", "##STACKIT_LoginManager##"}, + }, + } + data.Databases = []Database{ + { + Name: dbName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Owner: userName, + }, + } + + data.UseEncryption = true + data.KekKeyId = "fe039bcf-8d7b-431a-801d-9e81371a6b7b" + data.KekKeyRingId = "6a2d95ab-3c4c-4963-a2bb-08d17a320e27" + data.KekKeyVersion = 1 + data.KekServiceAccount = "henselinm-u2v3ex1@sa.stackit.cloud" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + t.Logf(" ... working on instance %s", data.TfName) + testInstances = append(testInstances, data.TfName) + }, + ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + data, + ), + Check: resource.ComposeAggregateTestCheckFunc( + // check instance values are set + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "backup_schedule"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "edition"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "flavor_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "instance_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "is_deletable"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "name"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "replicas"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "retention_days"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "status"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "version"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_version"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_ring_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.service_account"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.access_scope"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.acl"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.instance_address"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.router_address"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.class"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.size"), + + // check instance values are correct + resource.TestCheckResourceAttr(resName("instance", data.TfName), "name", data.Name), + + // check user values are set + resource.TestCheckResourceAttrSet(resName("user", userName), "id"), + resource.TestCheckResourceAttrSet(resName("user", userName), "username"), + + // func(s *terraform.State) error { + // return nil + // }, + + // check user values are correct + resource.TestCheckResourceAttr(resName("user", userName), "username", userName), + resource.TestCheckResourceAttr(resName("user", userName), "roles.#", "2"), + + // check database values are set + resource.TestCheckResourceAttrSet(resName("database", dbName), "id"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "name"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "owner"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "compatibility"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "collation"), + + // check database values are correct + resource.TestCheckResourceAttr(resName("database", dbName), "name", dbName), + resource.TestCheckResourceAttr(resName("database", dbName), "owner", userName), + ), + }, + }, + }) +} diff --git a/stackit/internal/services/sqlserverflexalpha/testdata/instance_template.gompl b/stackit/internal/services/sqlserverflexalpha/testdata/instance_template.gompl new file mode 100644 index 00000000..0bf11c9c --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/testdata/instance_template.gompl @@ -0,0 +1,60 @@ +provider "stackitprivatepreview" { + default_region = "{{ .Region }}" + service_account_key_path = "{{ .ServiceAccountFilePath }}" +} + +resource "stackitprivatepreview_sqlserverflexalpha_instance" "{{ .TfName }}" { + project_id = "{{ .ProjectId }}" + name = "{{ .Name }}" + backup_schedule = "{{ .BackupSchedule }}" + retention_days = {{ .RetentionDays }} + flavor_id = "{{ .FlavorId }}" + storage = { + class = "{{ .PerformanceClass }}" + size = {{ .Size }} + } +{{ if .UseEncryption }} + encryption = { + kek_key_id = "{{ .KekKeyId }}" + kek_key_ring_id = "{{ .KekKeyRingId }}" + kek_key_version = {{ .KekKeyVersion }} + service_account = "{{ .KekServiceAccount }}" + } +{{ end }} + network = { + acl = ["{{ .AclString }}"] + access_scope = "{{ .AccessScope }}" + } + version = "{{ .Version }}" +} + +{{ if .Users }} +{{ $tfName := .TfName }} +{{ range $user := .Users }} +resource "stackitprivatepreview_sqlserverflexalpha_user" "{{ $user.Name }}" { + project_id = "{{ $user.ProjectId }}" + instance_id = stackitprivatepreview_sqlserverflexalpha_instance.{{ $tfName }}.instance_id + username = "{{ $user.Name }}" + roles = [{{ range $i, $v := $user.Roles }}{{if $i}},{{end}}"{{$v}}"{{end}}] +} +{{ end }} +{{ end }} + +{{ if .Databases }} +{{ $tfName := .TfName }} +{{ range $db := .Databases }} +resource "stackitprivatepreview_sqlserverflexalpha_database" "{{ $db.Name }}" { + depends_on = [stackitprivatepreview_sqlserverflexalpha_user.{{ $db.Owner }}] + project_id = "{{ $db.ProjectId }}" + instance_id = stackitprivatepreview_sqlserverflexalpha_instance.{{ $tfName }}.instance_id + name = "{{ $db.Name }}" + owner = "{{ $db.Owner }}" +{{ if $db.Collation }} + collation = "{{ $db.Collation }}" +{{ end }} +{{ if $db.Compatibility }} + compatibility = "{{ $db.Compatibility }}" +{{ end }} +} +{{ end }} +{{ end }} diff --git a/stackit/internal/services/sqlserverflexalpha/user/datasource.go b/stackit/internal/services/sqlserverflexalpha/user/datasource.go new file mode 100644 index 00000000..e191e5a7 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/user/datasource.go @@ -0,0 +1,139 @@ +package sqlserverflexalpha + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaPkg "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + sqlserverflexalphaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/user/datasources_gen" +) + +var _ datasource.DataSource = (*userDataSource)(nil) + +func NewUserDataSource() datasource.DataSource { + return &userDataSource{} +} + +type dataSourceModel struct { + DefaultDatabase types.String `tfsdk:"default_database"` + Host types.String `tfsdk:"host"` + Id types.String `tfsdk:"id"` + InstanceId types.String `tfsdk:"instance_id"` + Port types.Int64 `tfsdk:"port"` + ProjectId types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + Roles types.List `tfsdk:"roles"` + Status types.String `tfsdk:"status"` + UserId types.Int64 `tfsdk:"user_id"` + Username types.String `tfsdk:"username"` +} + +type userDataSource struct { + client *sqlserverflexalphaPkg.APIClient + providerData core.ProviderData +} + +func (d *userDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_user" +} + +func (d *userDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexalphaGen.UserDataSourceSchema(ctx) +} + +// Configure adds the provider configured client to the data source. +func (d *userDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClient := sqlserverflexUtils.ConfigureClient(ctx, &d.providerData, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + d.client = apiClient + tflog.Info(ctx, "SQL SERVER Flex alpha database client configured") +} + +func (d *userDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var model dataSourceModel + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueInt64() + region := d.providerData.GetRegionWithOverride(model.Region) + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + ctx = tflog.SetField(ctx, "region", region) + + recordSetResp, err := d.client.GetUserRequest(ctx, projectId, region, instanceId, userId).Execute() + if err != nil { + utils.LogError( + ctx, + &resp.Diagnostics, + err, + "Reading user", + fmt.Sprintf( + "User with ID %q or instance with ID %q does not exist in project %q.", + userId, + instanceId, + projectId, + ), + map[int]string{ + http.StatusForbidden: fmt.Sprintf("Project with ID %q not found or forbidden access", projectId), + }, + ) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema and populate Computed attribute values + err = mapDataSourceFields(recordSetResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQLServer Flex beta instance read") +} diff --git a/stackit/internal/services/sqlserverflexalpha/user/mapper.go b/stackit/internal/services/sqlserverflexalpha/user/mapper.go new file mode 100644 index 00000000..8e522d59 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/user/mapper.go @@ -0,0 +1,193 @@ +package sqlserverflexalpha + +import ( + "fmt" + "slices" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +// mapDataSourceFields maps the API response to a dataSourceModel. +func mapDataSourceFields(userResp *sqlserverflexalpha.GetUserResponse, model *dataSourceModel, region string) error { + if userResp == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp + + // Handle user ID + var userId int64 + if model.UserId.ValueInt64() != 0 { + userId = model.UserId.ValueInt64() + } else if user.Id != nil { + userId = *user.Id + } else { + return fmt.Errorf("user id not present") + } + + // Set main attributes + model.Id = utils.BuildInternalTerraformId( + model.ProjectId.ValueString(), region, model.InstanceId.ValueString(), strconv.FormatInt(userId, 10), + ) + model.UserId = types.Int64Value(userId) + model.Username = types.StringPointerValue(user.Username) + + // Map roles + if user.Roles == nil { + model.Roles = types.List(types.SetNull(types.StringType)) + } else { + resRoles := *user.Roles + slices.Sort(resRoles) + + var roles []attr.Value + for _, role := range resRoles { + roles = append(roles, types.StringValue(string(role))) + } + rolesSet, diags := types.SetValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = types.List(rolesSet) + } + + // Set remaining attributes + model.Host = types.StringPointerValue(user.Host) + model.Port = types.Int64PointerValue(user.Port) + model.Region = types.StringValue(region) + model.Status = types.StringPointerValue(user.Status) + model.DefaultDatabase = types.StringPointerValue(user.DefaultDatabase) + + return nil +} + +// mapFields maps the API response to a resourceModel. +func mapFields(userResp *sqlserverflexalpha.GetUserResponse, model *resourceModel, region string) error { + if userResp == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp + + // Handle user ID + var userId int64 + if model.UserId.ValueInt64() != 0 { + userId = model.UserId.ValueInt64() + } else if user.Id != nil { + userId = *user.Id + } else { + return fmt.Errorf("user id not present") + } + + // Set main attributes + model.Id = types.Int64Value(userId) + model.UserId = types.Int64Value(userId) + model.Username = types.StringPointerValue(user.Username) + + // Map roles + if user.Roles != nil { + resRoles := *user.Roles + slices.Sort(resRoles) + + var roles []attr.Value + for _, role := range resRoles { + roles = append(roles, types.StringValue(string(role))) + } + rolesSet, diags := types.SetValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = types.List(rolesSet) + } + + // Ensure roles is not null + if model.Roles.IsNull() || model.Roles.IsUnknown() { + model.Roles = types.List(types.SetNull(types.StringType)) + } + + // Set connection details + model.Host = types.StringPointerValue(user.Host) + model.Port = types.Int64PointerValue(user.Port) + model.Region = types.StringValue(region) + return nil +} + +// mapFieldsCreate maps the API response from creating a user to a resourceModel. +func mapFieldsCreate(userResp *sqlserverflexalpha.CreateUserResponse, model *resourceModel, region string) error { + if userResp == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp + + if user.Id == nil { + return fmt.Errorf("user id not present") + } + userId := *user.Id + model.Id = types.Int64Value(userId) + model.UserId = types.Int64Value(userId) + model.Username = types.StringPointerValue(user.Username) + + if user.Password == nil { + return fmt.Errorf("user password not present") + } + model.Password = types.StringValue(*user.Password) + + if user.Roles != nil { + resRoles := *user.Roles + slices.Sort(resRoles) + + var roles []attr.Value + for _, role := range resRoles { + roles = append(roles, types.StringValue(string(role))) + } + rolesSet, diags := types.SetValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = types.List(rolesSet) + } + + if model.Roles.IsNull() || model.Roles.IsUnknown() { + model.Roles = types.List(types.SetNull(types.StringType)) + } + + model.Password = types.StringPointerValue(user.Password) + model.Uri = types.StringPointerValue(user.Uri) + + model.Host = types.StringPointerValue(user.Host) + model.Port = types.Int64PointerValue(user.Port) + model.Region = types.StringValue(region) + model.Status = types.StringPointerValue(user.Status) + model.DefaultDatabase = types.StringPointerValue(user.DefaultDatabase) + + return nil +} + +// toCreatePayload converts a resourceModel to an API CreateUserRequestPayload. +func toCreatePayload( + model *resourceModel, + roles []string, +) (*sqlserverflexalpha.CreateUserRequestPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + return &sqlserverflexalpha.CreateUserRequestPayload{ + Username: conversion.StringValueToPointer(model.Username), + DefaultDatabase: conversion.StringValueToPointer(model.DefaultDatabase), + Roles: &roles, + }, nil +} diff --git a/stackit/internal/services/sqlserverflexalpha/user/mapper_test.go b/stackit/internal/services/sqlserverflexalpha/user/mapper_test.go new file mode 100644 index 00000000..4dbe7d03 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/user/mapper_test.go @@ -0,0 +1,533 @@ +package sqlserverflexalpha + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" +) + +func TestMapDataSourceFields(t *testing.T) { + const testRegion = "region" + tests := []struct { + description string + input *sqlserverflexalpha.GetUserResponse + region string + expected dataSourceModel + isValid bool + }{ + { + "default_values", + &sqlserverflexalpha.GetUserResponse{}, + testRegion, + dataSourceModel{ + Id: types.StringValue("pid,region,iid,1"), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetNull(types.StringType)), + Host: types.StringNull(), + Port: types.Int64Null(), + Region: types.StringValue(testRegion), + Status: types.StringNull(), + DefaultDatabase: types.StringNull(), + }, + true, + }, + { + "simple_values", + &sqlserverflexalpha.GetUserResponse{ + Roles: &[]string{ + "##STACKIT_SQLAgentUser##", + "##STACKIT_DatabaseManager##", + "##STACKIT_LoginManager##", + "##STACKIT_SQLAgentManager##", + "##STACKIT_ProcessManager##", + "##STACKIT_ServerManager##", + }, + Username: utils.Ptr("username"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int64(1234)), + Status: utils.Ptr("active"), + DefaultDatabase: utils.Ptr("default_db"), + }, + testRegion, + dataSourceModel{ + Id: types.StringValue("pid,region,iid,1"), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.List( + types.SetValueMust( + types.StringType, []attr.Value{ + types.StringValue("##STACKIT_DatabaseManager##"), + types.StringValue("##STACKIT_LoginManager##"), + types.StringValue("##STACKIT_ProcessManager##"), + types.StringValue("##STACKIT_SQLAgentManager##"), + types.StringValue("##STACKIT_SQLAgentUser##"), + types.StringValue("##STACKIT_ServerManager##"), + }, + ), + ), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + Region: types.StringValue(testRegion), + Status: types.StringValue("active"), + DefaultDatabase: types.StringValue("default_db"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &sqlserverflexalpha.GetUserResponse{ + Id: utils.Ptr(int64(1)), + Roles: &[]string{}, + Username: nil, + Host: nil, + Port: utils.Ptr(int64(2123456789)), + }, + testRegion, + dataSourceModel{ + Id: types.StringValue("pid,region,iid,1"), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetValueMust(types.StringType, []attr.Value{})), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "nil_response", + nil, + testRegion, + dataSourceModel{}, + false, + }, + { + "nil_response_2", + &sqlserverflexalpha.GetUserResponse{}, + testRegion, + dataSourceModel{}, + false, + }, + { + "no_resource_id", + &sqlserverflexalpha.GetUserResponse{}, + testRegion, + dataSourceModel{}, + false, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + state := &dataSourceModel{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + UserId: tt.expected.UserId, + } + err := mapDataSourceFields(tt.input, state, tt.region) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(&tt.expected, state) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} + +func TestMapFieldsCreate(t *testing.T) { + const testRegion = "region" + tests := []struct { + description string + input *sqlserverflexalpha.CreateUserResponse + region string + expected resourceModel + isValid bool + }{ + { + "default_values", + &sqlserverflexalpha.CreateUserResponse{ + Id: utils.Ptr(int64(1)), + Password: utils.Ptr(""), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(1), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetNull(types.StringType)), + Password: types.StringValue(""), + Host: types.StringNull(), + Port: types.Int64Null(), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "simple_values", + &sqlserverflexalpha.CreateUserResponse{ + Id: utils.Ptr(int64(2)), + Roles: &[]string{ + "role_2", + "role_1", + "", + }, + Username: utils.Ptr("username"), + Password: utils.Ptr("password"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int64(1234)), + Status: utils.Ptr("status"), + DefaultDatabase: utils.Ptr("default_db"), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(2), + UserId: types.Int64Value(2), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.List( + types.SetValueMust( + types.StringType, []attr.Value{ + types.StringValue(""), + types.StringValue("role_1"), + types.StringValue("role_2"), + }, + ), + ), + Password: types.StringValue("password"), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + Region: types.StringValue(testRegion), + Status: types.StringValue("status"), + DefaultDatabase: types.StringValue("default_db"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &sqlserverflexalpha.CreateUserResponse{ + Id: utils.Ptr(int64(3)), + Roles: &[]string{}, + Username: nil, + Password: utils.Ptr(""), + Host: nil, + Port: utils.Ptr(int64(2123456789)), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(3), + UserId: types.Int64Value(3), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetValueMust(types.StringType, []attr.Value{})), + Password: types.StringValue(""), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + Region: types.StringValue(testRegion), + DefaultDatabase: types.StringNull(), + Status: types.StringNull(), + }, + true, + }, + { + "nil_response", + nil, + testRegion, + resourceModel{}, + false, + }, + { + "nil_response_2", + &sqlserverflexalpha.CreateUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + { + "no_resource_id", + &sqlserverflexalpha.CreateUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + { + "no_password", + &sqlserverflexalpha.CreateUserResponse{ + Id: utils.Ptr(int64(1)), + }, + testRegion, + resourceModel{}, + false, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + state := &resourceModel{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFieldsCreate(tt.input, state, tt.region) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(&tt.expected, state) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} + +func TestMapFields(t *testing.T) { + const testRegion = "region" + tests := []struct { + description string + input *sqlserverflexalpha.GetUserResponse + region string + expected resourceModel + isValid bool + }{ + { + "default_values", + &sqlserverflexalpha.GetUserResponse{}, + testRegion, + resourceModel{ + Id: types.Int64Value(1), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetNull(types.StringType)), + Host: types.StringNull(), + Port: types.Int64Null(), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "simple_values", + &sqlserverflexalpha.GetUserResponse{ + Roles: &[]string{ + "role_2", + "role_1", + "", + }, + Username: utils.Ptr("username"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int64(1234)), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(2), + UserId: types.Int64Value(2), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.List( + types.SetValueMust( + types.StringType, []attr.Value{ + types.StringValue(""), + types.StringValue("role_1"), + types.StringValue("role_2"), + }, + ), + ), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "null_fields_and_int_conversions", + &sqlserverflexalpha.GetUserResponse{ + Id: utils.Ptr(int64(1)), + Roles: &[]string{}, + Username: nil, + Host: nil, + Port: utils.Ptr(int64(2123456789)), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(1), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetValueMust(types.StringType, []attr.Value{})), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "nil_response", + nil, + testRegion, + resourceModel{}, + false, + }, + { + "nil_response_2", + &sqlserverflexalpha.GetUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + { + "no_resource_id", + &sqlserverflexalpha.GetUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + state := &resourceModel{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + UserId: tt.expected.UserId, + } + err := mapFields(tt.input, state, tt.region) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(&tt.expected, state) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *resourceModel + inputRoles []string + expected *sqlserverflexalpha.CreateUserRequestPayload + isValid bool + }{ + { + "default_values", + &resourceModel{}, + []string{}, + &sqlserverflexalpha.CreateUserRequestPayload{ + Roles: &[]string{}, + Username: nil, + }, + true, + }, + { + "default_values", + &resourceModel{ + Username: types.StringValue("username"), + }, + []string{ + "role_1", + "role_2", + }, + &sqlserverflexalpha.CreateUserRequestPayload{ + Roles: &[]string{ + "role_1", + "role_2", + }, + Username: utils.Ptr("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &resourceModel{ + Username: types.StringNull(), + }, + []string{ + "", + }, + &sqlserverflexalpha.CreateUserRequestPayload{ + Roles: &[]string{ + "", + }, + Username: nil, + }, + true, + }, + { + "nil_model", + nil, + []string{}, + nil, + false, + }, + { + "nil_roles", + &resourceModel{ + Username: types.StringValue("username"), + }, + []string{}, + &sqlserverflexalpha.CreateUserRequestPayload{ + Roles: &[]string{}, + Username: utils.Ptr("username"), + }, + true, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputRoles) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(tt.expected, output) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} diff --git a/stackit/internal/services/sqlserverflexalpha/user/planModifiers.yaml b/stackit/internal/services/sqlserverflexalpha/user/planModifiers.yaml new file mode 100644 index 00000000..8ff346ab --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/user/planModifiers.yaml @@ -0,0 +1,49 @@ +fields: + - name: 'id' + modifiers: + - 'UseStateForUnknown' + + - name: 'instance_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'project_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'region' + modifiers: + - 'RequiresReplace' + + - name: 'user_id' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'username' + modifiers: + - 'UseStateForUnknown' + + - name: 'roles' + modifiers: + - 'UseStateForUnknown' + + - name: 'password' + modifiers: + - 'UseStateForUnknown' + + - name: 'uri' + modifiers: + - 'UseStateForUnknown' + + - name: 'status' + modifiers: + - 'UseStateForUnknown' diff --git a/stackit/internal/services/sqlserverflexalpha/user/resource.go b/stackit/internal/services/sqlserverflexalpha/user/resource.go new file mode 100644 index 00000000..ee322fab --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/user/resource.go @@ -0,0 +1,553 @@ +package sqlserverflexalpha + +import ( + "context" + _ "embed" + "errors" + "fmt" + "net/http" + "slices" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + sqlserverflexalphaUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils" + sqlserverflexalphaWait "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/sqlserverflexalpha" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexalphaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/user/resources_gen" +) + +var ( + _ resource.Resource = &userResource{} + _ resource.ResourceWithConfigure = &userResource{} + _ resource.ResourceWithImportState = &userResource{} + _ resource.ResourceWithModifyPlan = &userResource{} + _ resource.ResourceWithIdentity = &userResource{} + _ resource.ResourceWithValidateConfig = &userResource{} +) + +func NewUserResource() resource.Resource { + return &userResource{} +} + +// resourceModel describes the resource data model. +type resourceModel = sqlserverflexalphaResGen.UserModel + +// UserResourceIdentityModel describes the resource's identity attributes. +type UserResourceIdentityModel struct { + ProjectID types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + InstanceID types.String `tfsdk:"instance_id"` + UserID types.Int64 `tfsdk:"user_id"` +} + +type userResource struct { + client *sqlserverflexalpha.APIClient + providerData core.ProviderData +} + +func (r *userResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexalpha_user" +} + +// Configure adds the provider configured client to the resource. +func (r *userResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClient := sqlserverflexalphaUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + r.client = apiClient + tflog.Info(ctx, "SQLServer Beta Flex user client configured") +} + +// ModifyPlan implements resource.ResourceWithModifyPlan. +// Use the modifier to set the effective region in the current plan. +func (r *userResource) ModifyPlan( + ctx context.Context, + req resource.ModifyPlanRequest, + resp *resource.ModifyPlanResponse, +) { // nolint:gocritic // function signature required by Terraform + var configModel resourceModel + // skip initial empty configuration to avoid follow-up errors + if req.Config.Raw.IsNull() { + return + } + resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...) + if resp.Diagnostics.HasError() { + return + } + + var planModel resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) + if resp.Diagnostics.HasError() { + return + } + + utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...) + if resp.Diagnostics.HasError() { + return + } +} + +//go:embed planModifiers.yaml +var modifiersFileByte []byte + +// Schema defines the schema for the resource. +func (r *userResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + s := sqlserverflexalphaResGen.UserResourceSchema(ctx) + + fields, err := utils.ReadModifiersConfig(modifiersFileByte) + if err != nil { + resp.Diagnostics.AddError("error during read modifiers config file", err.Error()) + return + } + + err = utils.AddPlanModifiersToResourceSchema(fields, &s) + if err != nil { + resp.Diagnostics.AddError("error adding plan modifiers", err.Error()) + return + } + resp.Schema = s +} + +// IdentitySchema defines the schema for the resource's identity attributes. +func (r *userResource) IdentitySchema( + _ context.Context, + _ resource.IdentitySchemaRequest, + response *resource.IdentitySchemaResponse, +) { + response.IdentitySchema = identityschema.Schema{ + Attributes: map[string]identityschema.Attribute{ + "project_id": identityschema.StringAttribute{ + RequiredForImport: true, // must be set during import by the practitioner + }, + "region": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "instance_id": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "user_id": identityschema.Int64Attribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + }, + } +} + +func (r *userResource) ValidateConfig( + ctx context.Context, + req resource.ValidateConfigRequest, + resp *resource.ValidateConfigResponse, +) { + var data resourceModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + var roles []string + diags := data.Roles.ElementsAs(ctx, &roles, false) + resp.Diagnostics.Append(diags...) + if diags.HasError() { + return + } + + var resRoles []string + for _, role := range roles { + if slices.Contains(resRoles, role) { + resp.Diagnostics.AddAttributeError( + path.Root("roles"), + "Attribute Configuration Error", + "defined roles MUST NOT contain duplicates", + ) + return + } + resRoles = append(resRoles, role) + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *userResource) Create( + ctx context.Context, + req resource.CreateRequest, + resp *resource.CreateResponse, +) { // nolint:gocritic // function signature required by Terraform + var model resourceModel + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + region := model.Region.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "region", region) + + var roles []string + if !model.Roles.IsNull() && !model.Roles.IsUnknown() { + diags = model.Roles.ElementsAs(ctx, &roles, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + slices.Sort(roles) + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, roles) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating user", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new user + userResp, err := r.client.CreateUserRequest( + ctx, + projectId, + region, + instanceId, + ).CreateUserRequestPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating user", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + if userResp == nil || userResp.Id == nil || *userResp.Id == 0 { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating user", + "API didn't return user Id. A user might have been created", + ) + return + } + + userId := *userResp.Id + ctx = tflog.SetField(ctx, "user_id", userId) + + // Set data returned by API in identity + identity := UserResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + UserID: types.Int64Value(userId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + err = mapFieldsCreate(userResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + waitResp, err := sqlserverflexalphaWait.CreateUserWaitHandler( + ctx, + r.client, + projectId, + instanceId, + region, + userId, + ).SetSleepBeforeWait( + 90 * time.Second, + ).SetTimeout( + 90 * time.Minute, + ).WaitWithContext(ctx) + + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "create user", + fmt.Sprintf("Instance creation waiting: %v", err), + ) + return + } + + if waitResp.Id == nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "create user", + "Instance creation waiting: returned id is nil", + ) + return + } + + // Map response body to schema + err = mapFields(waitResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQLServer Flex user created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *userResource) Read( + ctx context.Context, + req resource.ReadRequest, + resp *resource.ReadResponse, +) { // nolint:gocritic // function signature required by Terraform + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueInt64() + region := r.providerData.GetRegionWithOverride(model.Region) + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + ctx = tflog.SetField(ctx, "region", region) + + recordSetResp, err := r.client.GetUserRequest(ctx, projectId, region, instanceId, userId).Execute() + if err != nil { + var oapiErr *oapierror.GenericOpenAPIError + ok := errors.As( + err, + &oapiErr, + ) + //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading user", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema + err = mapFields(recordSetResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Set data returned by API in identity + identity := UserResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + UserID: types.Int64Value(userId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQLServer Flex user read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *userResource) Update( + ctx context.Context, + _ resource.UpdateRequest, + resp *resource.UpdateResponse, +) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating user", "User can't be updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *userResource) Delete( + ctx context.Context, + req resource.DeleteRequest, + resp *resource.DeleteResponse, +) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueInt64() + region := model.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + ctx = tflog.SetField(ctx, "region", region) + + // Delete existing record set + // err := r.client.DeleteUserRequest(ctx, projectId, region, instanceId, userId).Execute() + err := r.client.DeleteUserRequestExecute(ctx, projectId, region, instanceId, userId) + if err != nil { + var oapiErr *oapierror.GenericOpenAPIError + ok := errors.As(err, &oapiErr) + if !ok { + // TODO err handling + return + } + + switch oapiErr.StatusCode { + case http.StatusNotFound: + resp.State.RemoveResource(ctx) + return + // case http.StatusInternalServerError: + // tflog.Warn(ctx, "[delete user] Wait handler got error 500") + // return false, nil, nil + default: + // TODO err handling + return + } + } + // Delete existing record set + _, err = sqlserverflexalphaWait.DeleteUserWaitHandler(ctx, r.client, projectId, region, instanceId, userId). + WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "User Delete Error", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + resp.State.RemoveResource(ctx) + + tflog.Info(ctx, "SQLServer Flex user deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *userResource) ImportState( + ctx context.Context, + req resource.ImportStateRequest, + resp *resource.ImportStateResponse, +) { + ctx = core.InitProviderContext(ctx) + + if req.ID != "" { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" { + core.LogAndAddError( + ctx, &resp.Diagnostics, + "Error importing user", + fmt.Sprintf( + "Expected import identifier with format [project_id],[region],[instance_id],[user_id], got %q", + req.ID, + ), + ) + return + } + + userId, err := strconv.ParseInt(idParts[3], 10, 64) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error importing user", + fmt.Sprintf("Invalid user_id format: %q. It must be a valid integer.", idParts[3]), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[2])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_id"), userId)...) + + tflog.Info(ctx, "SQLServer Flex user state imported") + + return + } + + // If no ID is provided, attempt to read identity attributes from the import configuration + var identityData UserResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + instanceId := identityData.InstanceID.ValueString() + userId := identityData.UserID.ValueInt64() + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), region)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), instanceId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_id"), userId)...) + + core.LogAndAddWarning( + ctx, + &resp.Diagnostics, + "SQLServer Flex user imported with empty password", + "The user password is not imported as it is only available upon creation of a new user. The password field will be empty.", + ) + tflog.Info(ctx, "SQLServer Flex user state imported") +} diff --git a/stackit/internal/services/sqlserverflexalpha/utils/util.go b/stackit/internal/services/sqlserverflexalpha/utils/util.go new file mode 100644 index 00000000..7fbf0901 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/utils/util.go @@ -0,0 +1,48 @@ +package utils + +import ( + "context" + "fmt" + + sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +func ConfigureClient( + ctx context.Context, + providerData *core.ProviderData, + diags *diag.Diagnostics, +) *sqlserverflex.APIClient { + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(providerData.RoundTripper), + utils.UserAgentConfigOption(providerData.Version), + } + if providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append(apiClientConfigOptions, config.WithRegion(providerData.GetRegion())) + } + apiClient, err := sqlserverflex.NewAPIClient(apiClientConfigOptions...) + if err != nil { + core.LogAndAddError( + ctx, + diags, + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return nil + } + + return apiClient +} diff --git a/stackit/internal/services/sqlserverflexalpha/utils/util_test.go b/stackit/internal/services/sqlserverflexalpha/utils/util_test.go new file mode 100644 index 00000000..91f90030 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/utils/util_test.go @@ -0,0 +1,98 @@ +package utils + +import ( + "context" + "os" + "reflect" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + sdkClients "github.com/stackitcloud/stackit-sdk-go/core/clients" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +const ( + testVersion = "1.2.3" + testCustomEndpoint = "https://sqlserverflex-custom-endpoint.api.stackit.cloud" +) + +func TestConfigureClient(t *testing.T) { + /* mock authentication by setting service account token env variable */ + os.Clearenv() + err := os.Setenv(sdkClients.ServiceAccountToken, "mock-val") + if err != nil { + t.Errorf("error setting env variable: %v", err) + } + + type args struct { + providerData *core.ProviderData + } + tests := []struct { + name string + args args + wantErr bool + expected *sqlserverflex.APIClient + }{ + { + name: "default endpoint", + args: args{ + providerData: &core.ProviderData{ + Version: testVersion, + }, + }, + expected: func() *sqlserverflex.APIClient { + apiClient, err := sqlserverflex.NewAPIClient( + config.WithRegion("eu01"), + utils.UserAgentConfigOption(testVersion), + ) + if err != nil { + t.Errorf("error configuring client: %v", err) + } + return apiClient + }(), + wantErr: false, + }, + { + name: "custom endpoint", + args: args{ + providerData: &core.ProviderData{ + Version: testVersion, + SQLServerFlexCustomEndpoint: testCustomEndpoint, + }, + }, + expected: func() *sqlserverflex.APIClient { + apiClient, err := sqlserverflex.NewAPIClient( + utils.UserAgentConfigOption(testVersion), + config.WithEndpoint(testCustomEndpoint), + ) + if err != nil { + t.Errorf("error configuring client: %v", err) + } + return apiClient + }(), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + ctx := context.Background() + diags := diag.Diagnostics{} + + actual := ConfigureClient(ctx, tt.args.providerData, &diags) + if diags.HasError() != tt.wantErr { + t.Errorf("ConfigureClient() error = %v, want %v", diags.HasError(), tt.wantErr) + } + + if !reflect.DeepEqual(actual, tt.expected) { + t.Errorf("ConfigureClient() = %v, want %v", actual, tt.expected) + } + }, + ) + } +} diff --git a/stackit/internal/services/sqlserverflexalpha/versions/datasources_gen/version_data_source_gen.go b/stackit/internal/services/sqlserverflexalpha/versions/datasources_gen/version_data_source_gen.go new file mode 100644 index 00000000..cb9008f1 --- /dev/null +++ b/stackit/internal/services/sqlserverflexalpha/versions/datasources_gen/version_data_source_gen.go @@ -0,0 +1,569 @@ +// Code generated by terraform-plugin-framework-generator DO NOT EDIT. + +package sqlserverflexalpha + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func VersionDataSourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "project_id": schema.StringAttribute{ + Required: true, + Description: "The STACKIT project ID.", + MarkdownDescription: "The STACKIT project ID.", + }, + "region": schema.StringAttribute{ + Required: true, + Description: "The region which should be addressed", + MarkdownDescription: "The region which should be addressed", + Validators: []validator.String{ + stringvalidator.OneOf( + "eu01", + ), + }, + }, + "versions": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "beta": schema.BoolAttribute{ + Computed: true, + Description: "Flag if the version is a beta version. If set the version may contain bugs and is not fully tested.", + MarkdownDescription: "Flag if the version is a beta version. If set the version may contain bugs and is not fully tested.", + }, + "deprecated": schema.StringAttribute{ + Computed: true, + Description: "Timestamp in RFC3339 format which says when the version will no longer be supported by STACKIT.", + MarkdownDescription: "Timestamp in RFC3339 format which says when the version will no longer be supported by STACKIT.", + }, + "recommend": schema.BoolAttribute{ + Computed: true, + Description: "Flag if the version is recommend by the STACKIT Team.", + MarkdownDescription: "Flag if the version is recommend by the STACKIT Team.", + }, + "version": schema.StringAttribute{ + Computed: true, + Description: "The sqlserver version used for the instance.", + MarkdownDescription: "The sqlserver version used for the instance.", + }, + }, + CustomType: VersionsType{ + ObjectType: types.ObjectType{ + AttrTypes: VersionsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "A list containing available sqlserver versions.", + MarkdownDescription: "A list containing available sqlserver versions.", + }, + }, + } +} + +type VersionModel struct { + ProjectId types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + Versions types.List `tfsdk:"versions"` +} + +var _ basetypes.ObjectTypable = VersionsType{} + +type VersionsType struct { + basetypes.ObjectType +} + +func (t VersionsType) Equal(o attr.Type) bool { + other, ok := o.(VersionsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t VersionsType) String() string { + return "VersionsType" +} + +func (t VersionsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + betaAttribute, ok := attributes["beta"] + + if !ok { + diags.AddError( + "Attribute Missing", + `beta is missing from object`) + + return nil, diags + } + + betaVal, ok := betaAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`beta expected to be basetypes.BoolValue, was: %T`, betaAttribute)) + } + + deprecatedAttribute, ok := attributes["deprecated"] + + if !ok { + diags.AddError( + "Attribute Missing", + `deprecated is missing from object`) + + return nil, diags + } + + deprecatedVal, ok := deprecatedAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`deprecated expected to be basetypes.StringValue, was: %T`, deprecatedAttribute)) + } + + recommendAttribute, ok := attributes["recommend"] + + if !ok { + diags.AddError( + "Attribute Missing", + `recommend is missing from object`) + + return nil, diags + } + + recommendVal, ok := recommendAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`recommend expected to be basetypes.BoolValue, was: %T`, recommendAttribute)) + } + + versionAttribute, ok := attributes["version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version is missing from object`) + + return nil, diags + } + + versionVal, ok := versionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version expected to be basetypes.StringValue, was: %T`, versionAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return VersionsValue{ + Beta: betaVal, + Deprecated: deprecatedVal, + Recommend: recommendVal, + Version: versionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewVersionsValueNull() VersionsValue { + return VersionsValue{ + state: attr.ValueStateNull, + } +} + +func NewVersionsValueUnknown() VersionsValue { + return VersionsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewVersionsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (VersionsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing VersionsValue Attribute Value", + "While creating a VersionsValue value, a missing attribute value was detected. "+ + "A VersionsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("VersionsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid VersionsValue Attribute Type", + "While creating a VersionsValue value, an invalid attribute value was detected. "+ + "A VersionsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("VersionsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("VersionsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra VersionsValue Attribute Value", + "While creating a VersionsValue value, an extra attribute value was detected. "+ + "A VersionsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra VersionsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewVersionsValueUnknown(), diags + } + + betaAttribute, ok := attributes["beta"] + + if !ok { + diags.AddError( + "Attribute Missing", + `beta is missing from object`) + + return NewVersionsValueUnknown(), diags + } + + betaVal, ok := betaAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`beta expected to be basetypes.BoolValue, was: %T`, betaAttribute)) + } + + deprecatedAttribute, ok := attributes["deprecated"] + + if !ok { + diags.AddError( + "Attribute Missing", + `deprecated is missing from object`) + + return NewVersionsValueUnknown(), diags + } + + deprecatedVal, ok := deprecatedAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`deprecated expected to be basetypes.StringValue, was: %T`, deprecatedAttribute)) + } + + recommendAttribute, ok := attributes["recommend"] + + if !ok { + diags.AddError( + "Attribute Missing", + `recommend is missing from object`) + + return NewVersionsValueUnknown(), diags + } + + recommendVal, ok := recommendAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`recommend expected to be basetypes.BoolValue, was: %T`, recommendAttribute)) + } + + versionAttribute, ok := attributes["version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version is missing from object`) + + return NewVersionsValueUnknown(), diags + } + + versionVal, ok := versionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version expected to be basetypes.StringValue, was: %T`, versionAttribute)) + } + + if diags.HasError() { + return NewVersionsValueUnknown(), diags + } + + return VersionsValue{ + Beta: betaVal, + Deprecated: deprecatedVal, + Recommend: recommendVal, + Version: versionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewVersionsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) VersionsValue { + object, diags := NewVersionsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewVersionsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t VersionsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewVersionsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewVersionsValueUnknown(), nil + } + + if in.IsNull() { + return NewVersionsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewVersionsValueMust(VersionsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t VersionsType) ValueType(ctx context.Context) attr.Value { + return VersionsValue{} +} + +var _ basetypes.ObjectValuable = VersionsValue{} + +type VersionsValue struct { + Beta basetypes.BoolValue `tfsdk:"beta"` + Deprecated basetypes.StringValue `tfsdk:"deprecated"` + Recommend basetypes.BoolValue `tfsdk:"recommend"` + Version basetypes.StringValue `tfsdk:"version"` + state attr.ValueState +} + +func (v VersionsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 4) + + var val tftypes.Value + var err error + + attrTypes["beta"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["deprecated"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["recommend"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["version"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 4) + + val, err = v.Beta.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["beta"] = val + + val, err = v.Deprecated.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["deprecated"] = val + + val, err = v.Recommend.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["recommend"] = val + + val, err = v.Version.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["version"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v VersionsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v VersionsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v VersionsValue) String() string { + return "VersionsValue" +} + +func (v VersionsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "beta": basetypes.BoolType{}, + "deprecated": basetypes.StringType{}, + "recommend": basetypes.BoolType{}, + "version": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "beta": v.Beta, + "deprecated": v.Deprecated, + "recommend": v.Recommend, + "version": v.Version, + }) + + return objVal, diags +} + +func (v VersionsValue) Equal(o attr.Value) bool { + other, ok := o.(VersionsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Beta.Equal(other.Beta) { + return false + } + + if !v.Deprecated.Equal(other.Deprecated) { + return false + } + + if !v.Recommend.Equal(other.Recommend) { + return false + } + + if !v.Version.Equal(other.Version) { + return false + } + + return true +} + +func (v VersionsValue) Type(ctx context.Context) attr.Type { + return VersionsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v VersionsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "beta": basetypes.BoolType{}, + "deprecated": basetypes.StringType{}, + "recommend": basetypes.BoolType{}, + "version": basetypes.StringType{}, + } +} diff --git a/stackit/internal/services/sqlserverflexbeta/database/datasource.go b/stackit/internal/services/sqlserverflexbeta/database/datasource.go new file mode 100644 index 00000000..e3ecd20f --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/database/datasource.go @@ -0,0 +1,174 @@ +package sqlserverflexbeta + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexbetaPkg "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + sqlserverflexbetaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/database/datasources_gen" +) + +var _ datasource.DataSource = (*databaseDataSource)(nil) + +const errorPrefix = "[Sqlserverflexbeta - Database]" + +func NewDatabaseDataSource() datasource.DataSource { + return &databaseDataSource{} +} + +type dataSourceModel struct { + sqlserverflexbetaGen.DatabaseModel + TerraformId types.String `tfsdk:"id"` +} + +type databaseDataSource struct { + client *sqlserverflexbetaPkg.APIClient + providerData core.ProviderData +} + +func (d *databaseDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_database" +} + +func (d *databaseDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexbetaGen.DatabaseDataSourceSchema(ctx) + resp.Schema.Attributes["id"] = schema.StringAttribute{ + Computed: true, + Description: "The terraform internal identifier.", + MarkdownDescription: "The terraform internal identifier.", + } +} + +// Configure adds the provider configured client to the data source. +func (d *databaseDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(d.providerData.RoundTripper), + utils.UserAgentConfigOption(d.providerData.Version), + } + if d.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(d.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(d.providerData.GetRegion()), + ) + } + apiClient, err := sqlserverflexbetaPkg.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + d.client = apiClient + tflog.Info(ctx, fmt.Sprintf("%s client configured", errorPrefix)) +} + +func (d *databaseDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dataSourceModel + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + // Extract identifiers from the plan + projectId := data.ProjectId.ValueString() + region := d.providerData.GetRegionWithOverride(data.Region) + instanceId := data.InstanceId.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + databaseName := data.DatabaseName.ValueString() + + databaseResp, err := d.client.DefaultAPI.GetDatabaseRequest(ctx, projectId, region, instanceId, databaseName).Execute() + if err != nil { + handleReadError(ctx, &resp.Diagnostics, err, projectId, instanceId) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + // Map response body to schema and populate Computed attribute values + err = mapFields(databaseResp, &data, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading database", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + + tflog.Info(ctx, "SQL Server Flex beta database read") +} + +// handleReadError centralizes API error handling for the Read operation. +func handleReadError(ctx context.Context, diags *diag.Diagnostics, err error, projectId, instanceId string) { + utils.LogError( + ctx, + diags, + err, + "Reading database", + fmt.Sprintf( + "Could not retrieve database for instance %q in project %q.", + instanceId, + projectId, + ), + map[int]string{ + http.StatusBadRequest: fmt.Sprintf( + "Invalid request parameters for project %q and instance %q.", + projectId, + instanceId, + ), + http.StatusNotFound: fmt.Sprintf( + "Database, instance %q, or project %q not found.", + instanceId, + projectId, + ), + http.StatusForbidden: fmt.Sprintf("Forbidden access to project %q.", projectId), + }, + ) +} diff --git a/stackit/internal/services/sqlserverflexbeta/database/mapper.go b/stackit/internal/services/sqlserverflexbeta/database/mapper.go new file mode 100644 index 00000000..39bd7da0 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/database/mapper.go @@ -0,0 +1,104 @@ +package sqlserverflexbeta + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" + utils2 "github.com/stackitcloud/stackit-sdk-go/core/utils" + + sqlserverflexbeta "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +// mapFields maps fields from a ListDatabase API response to a resourceModel for the data source. +func mapFields(source *sqlserverflexbeta.GetDatabaseResponse, model *dataSourceModel, region string) error { + if source == nil { + return fmt.Errorf("response is nil") + } + if source.Id == 0 { + return fmt.Errorf("id not present") + } + if model == nil { + return fmt.Errorf("model given is nil") + } + + var databaseId int64 + if model.Id.ValueInt64() != 0 { + databaseId = model.Id.ValueInt64() + } else if source.Id != 0 { + databaseId = source.Id + } else { + return fmt.Errorf("database id not present") + } + + model.Id = types.Int64Value(databaseId) + model.DatabaseName = types.StringValue(source.GetName()) + model.Name = types.StringValue(source.GetName()) + model.Owner = types.StringValue(source.GetOwner()) + model.Region = types.StringValue(region) + model.ProjectId = types.StringValue(model.ProjectId.ValueString()) + model.InstanceId = types.StringValue(model.InstanceId.ValueString()) + model.CompatibilityLevel = types.Int64Value(int64(source.GetCompatibilityLevel())) + model.CollationName = types.StringValue(source.GetCollationName()) + + model.TerraformId = utils.BuildInternalTerraformId( + model.ProjectId.ValueString(), + region, + model.InstanceId.ValueString(), + model.DatabaseName.ValueString(), + ) + + return nil +} + +// mapResourceFields maps fields from a ListDatabase API response to a resourceModel for the resource. +func mapResourceFields(source *sqlserverflexbeta.GetDatabaseResponse, model *resourceModel, region string) error { + if source == nil { + return fmt.Errorf("response is nil") + } + if source.Id == 0 { + return fmt.Errorf("id not present") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + + var databaseId int64 + if model.Id.ValueInt64() != 0 { + databaseId = model.Id.ValueInt64() + } else if source.Id != 0 { + databaseId = source.Id + } else { + return fmt.Errorf("database id not present") + } + + model.Id = types.Int64Value(databaseId) + model.DatabaseName = types.StringValue(source.GetName()) + model.Name = types.StringValue(source.GetName()) + model.Owner = types.StringValue(source.GetOwner()) + model.Region = types.StringValue(region) + model.ProjectId = types.StringValue(model.ProjectId.ValueString()) + model.InstanceId = types.StringValue(model.InstanceId.ValueString()) + + model.Compatibility = types.Int64Value(int64(source.GetCompatibilityLevel())) + model.CompatibilityLevel = types.Int64Value(int64(source.GetCompatibilityLevel())) + + model.Collation = types.StringValue(source.GetCollationName()) // it does not come back from api + model.CollationName = types.StringValue(source.GetCollationName()) + + return nil +} + +// toCreatePayload converts the resource model to an API create payload. +func toCreatePayload(model *resourceModel) (*sqlserverflexbeta.CreateDatabaseRequestPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + return &sqlserverflexbeta.CreateDatabaseRequestPayload{ + Name: model.Name.ValueString(), + Owner: model.Owner.ValueString(), + Collation: model.Collation.ValueStringPointer(), + Compatibility: utils2.Ptr(int32(model.Compatibility.ValueInt64())), + }, nil +} diff --git a/stackit/internal/services/sqlserverflexbeta/database/mapper_test.go b/stackit/internal/services/sqlserverflexbeta/database/mapper_test.go new file mode 100644 index 00000000..f865f22f --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/database/mapper_test.go @@ -0,0 +1,232 @@ +package sqlserverflexbeta + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" + datasource "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/database/datasources_gen" +) + +func TestMapFields(t *testing.T) { + type given struct { + source *sqlserverflexbeta.GetDatabaseResponse + model *dataSourceModel + region string + } + type expected struct { + model *dataSourceModel + err bool + } + + testcases := []struct { + name string + given given + expected expected + }{ + { + name: "should map fields correctly", + given: given{ + source: &sqlserverflexbeta.GetDatabaseResponse{ + Id: utils.Ptr(int64(1)), + Name: utils.Ptr("my-db"), + CollationName: utils.Ptr("collation"), + CompatibilityLevel: utils.Ptr(int64(150)), + Owner: utils.Ptr("my-owner"), + }, + model: &dataSourceModel{ + DatabaseModel: datasource.DatabaseModel{ + ProjectId: types.StringValue("my-project"), + InstanceId: types.StringValue("my-instance"), + }, + }, + region: "eu01", + }, + expected: expected{ + model: &dataSourceModel{ + DatabaseModel: datasource.DatabaseModel{ + Id: types.Int64Value(1), + Name: types.StringValue("my-db"), + DatabaseName: types.StringValue("my-db"), + Owner: types.StringValue("my-owner"), + Region: types.StringValue("eu01"), + InstanceId: types.StringValue("my-instance"), + ProjectId: types.StringValue("my-project"), + CompatibilityLevel: types.Int64Value(150), + CollationName: types.StringValue("collation"), + }, + TerraformId: types.StringValue("my-project,eu01,my-instance,my-db"), + }, + }, + }, + { + name: "should fail on nil source", + given: given{ + source: nil, + model: &dataSourceModel{}, + }, + expected: expected{err: true}, + }, + { + name: "should fail on nil source ID", + given: given{ + source: &sqlserverflexbeta.GetDatabaseResponse{Id: nil}, + model: &dataSourceModel{}, + }, + expected: expected{err: true}, + }, + { + name: "should fail on nil model", + given: given{ + source: &sqlserverflexbeta.GetDatabaseResponse{Id: utils.Ptr(int64(1))}, + model: nil, + }, + expected: expected{err: true}, + }, + } + + for _, tc := range testcases { + t.Run( + tc.name, func(t *testing.T) { + err := mapFields(tc.given.source, tc.given.model, tc.given.region) + if (err != nil) != tc.expected.err { + t.Fatalf("expected error: %v, got: %v", tc.expected.err, err) + } + if err == nil { + if diff := cmp.Diff(tc.expected.model, tc.given.model); diff != "" { + t.Errorf("model mismatch (-want +got):\n%s", diff) + } + } + }, + ) + } +} + +func TestMapResourceFields(t *testing.T) { + type given struct { + source *sqlserverflexbeta.GetDatabaseResponse + model *resourceModel + region string + } + type expected struct { + model *resourceModel + err bool + } + + testcases := []struct { + name string + given given + expected expected + }{ + { + name: "should map fields correctly", + given: given{ + source: &sqlserverflexbeta.GetDatabaseResponse{ + Id: utils.Ptr(int64(1)), + Name: utils.Ptr("my-db"), + Owner: utils.Ptr("my-owner"), + }, + model: &resourceModel{ + ProjectId: types.StringValue("my-project"), + InstanceId: types.StringValue("my-instance"), + }, + region: "eu01", + }, + expected: expected{ + model: &resourceModel{ + Id: types.Int64Value(1), + Name: types.StringValue("my-db"), + Compatibility: types.Int64Value(0), + CompatibilityLevel: types.Int64Value(0), + Collation: types.StringValue(""), + CollationName: types.StringValue(""), + DatabaseName: types.StringValue("my-db"), + InstanceId: types.StringValue("my-instance"), + ProjectId: types.StringValue("my-project"), + Region: types.StringValue("eu01"), + Owner: types.StringValue("my-owner"), + }, + }, + }, + { + name: "should fail on nil source", + given: given{ + source: nil, + model: &resourceModel{}, + }, + expected: expected{err: true}, + }, + } + + for _, tc := range testcases { + t.Run( + tc.name, func(t *testing.T) { + err := mapResourceFields(tc.given.source, tc.given.model, tc.given.region) + if (err != nil) != tc.expected.err { + t.Fatalf("expected error: %v, got: %v", tc.expected.err, err) + } + if err == nil { + if diff := cmp.Diff(tc.expected.model, tc.given.model); diff != "" { + t.Errorf("model mismatch (-want +got):\n%s", diff) + } + } + }, + ) + } +} + +func TestToCreatePayload(t *testing.T) { + type given struct { + model *resourceModel + } + type expected struct { + payload *sqlserverflexbeta.CreateDatabaseRequestPayload + err bool + } + + testcases := []struct { + name string + given given + expected expected + }{ + { + name: "should convert model to payload", + given: given{ + model: &resourceModel{ + Name: types.StringValue("my-db"), + Owner: types.StringValue("my-owner"), + }, + }, + expected: expected{ + payload: &sqlserverflexbeta.CreateDatabaseRequestPayload{ + Name: utils.Ptr("my-db"), + Owner: utils.Ptr("my-owner"), + }, + }, + }, + { + name: "should fail on nil model", + given: given{model: nil}, + expected: expected{err: true}, + }, + } + + for _, tc := range testcases { + t.Run( + tc.name, func(t *testing.T) { + actual, err := toCreatePayload(tc.given.model) + if (err != nil) != tc.expected.err { + t.Fatalf("expected error: %v, got: %v", tc.expected.err, err) + } + if err == nil { + if diff := cmp.Diff(tc.expected.payload, actual); diff != "" { + t.Errorf("payload mismatch (-want +got):\n%s", diff) + } + } + }, + ) + } +} diff --git a/stackit/internal/services/sqlserverflexbeta/database/planModifiers.yaml b/stackit/internal/services/sqlserverflexbeta/database/planModifiers.yaml new file mode 100644 index 00000000..08d7e6cf --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/database/planModifiers.yaml @@ -0,0 +1,56 @@ +fields: + - name: 'id' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'instance_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'RequiresReplace' + + - name: 'project_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'RequiresReplace' + + - name: 'region' + modifiers: + - 'RequiresReplace' + + - name: 'name' + modifiers: + - 'RequiresReplace' + + - name: 'collation' + modifiers: + - 'RequiresReplace' + + - name: 'owner' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'database_name' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'collation_name' + modifiers: + - 'RequiresReplace' + - 'UseStateForUnknown' + + - name: 'compatibility' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'compatibility_level' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' diff --git a/stackit/internal/services/sqlserverflexbeta/database/resource.go b/stackit/internal/services/sqlserverflexbeta/database/resource.go new file mode 100644 index 00000000..ae991c0d --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/database/resource.go @@ -0,0 +1,558 @@ +package sqlserverflexbeta + +import ( + "context" + _ "embed" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + utils2 "github.com/stackitcloud/stackit-sdk-go/core/utils" + + sqlserverflexbeta "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + wait "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/sqlserverflexbeta" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexbetaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/database/resources_gen" +) + +var ( + _ resource.Resource = &databaseResource{} + _ resource.ResourceWithConfigure = &databaseResource{} + _ resource.ResourceWithImportState = &databaseResource{} + _ resource.ResourceWithModifyPlan = &databaseResource{} + _ resource.ResourceWithIdentity = &databaseResource{} + + // Define errors + errDatabaseNotFound = errors.New("database not found") +) + +func NewDatabaseResource() resource.Resource { + return &databaseResource{} +} + +// resourceModel describes the resource data model. +type resourceModel = sqlserverflexbetaResGen.DatabaseModel + +type databaseResource struct { + client *sqlserverflexbeta.APIClient + providerData core.ProviderData +} + +type DatabaseResourceIdentityModel struct { + ProjectID types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + InstanceID types.String `tfsdk:"instance_id"` + DatabaseName types.String `tfsdk:"database_name"` +} + +func (r *databaseResource) Metadata( + _ context.Context, + req resource.MetadataRequest, + resp *resource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_database" +} + +//go:embed planModifiers.yaml +var modifiersFileByte []byte + +func (r *databaseResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + s := sqlserverflexbetaResGen.DatabaseResourceSchema(ctx) + + fields, err := utils.ReadModifiersConfig(modifiersFileByte) + if err != nil { + resp.Diagnostics.AddError("error during read modifiers config file", err.Error()) + return + } + + err = utils.AddPlanModifiersToResourceSchema(fields, &s) + if err != nil { + resp.Diagnostics.AddError("error adding plan modifiers", err.Error()) + return + } + resp.Schema = s +} + +func (r *databaseResource) IdentitySchema( + _ context.Context, + _ resource.IdentitySchemaRequest, + resp *resource.IdentitySchemaResponse, +) { + resp.IdentitySchema = identityschema.Schema{ + Attributes: map[string]identityschema.Attribute{ + "project_id": identityschema.StringAttribute{ + RequiredForImport: true, // must be set during import by the practitioner + }, + "region": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "instance_id": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "database_name": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + }, + } +} + +// Configure adds the provider configured client to the resource. +func (r *databaseResource) Configure( + ctx context.Context, + req resource.ConfigureRequest, + resp *resource.ConfigureResponse, +) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(r.providerData.RoundTripper), + utils.UserAgentConfigOption(r.providerData.Version), + } + if r.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(r.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append(apiClientConfigOptions, config.WithRegion(r.providerData.GetRegion())) + } + apiClient, err := sqlserverflexbeta.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + r.client = apiClient + tflog.Info(ctx, "sqlserverflexbeta.Database client configured") +} + +func (r *databaseResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data resourceModel + createErr := "DB create error" + + // Read Terraform plan data into the model + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + instanceId := data.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + databaseName := data.Name.ValueString() + ctx = tflog.SetField(ctx, "database_name", databaseName) + + payLoad := sqlserverflexbeta.CreateDatabaseRequestPayload{} + if !data.Collation.IsNull() && !data.Collation.IsUnknown() { + payLoad.Collation = data.Collation.ValueStringPointer() + } + + if !data.Compatibility.IsNull() && !data.Compatibility.IsUnknown() { + payLoad.Compatibility = utils2.Ptr(int32(data.Compatibility.ValueInt64())) + } + + payLoad.Name = data.Name.ValueString() + payLoad.Owner = data.Owner.ValueString() + + _, err := wait.WaitForUserWaitHandler( + ctx, + r.client.DefaultAPI, + projectId, + instanceId, + region, + data.Owner.ValueString(), + ). + SetSleepBeforeWait(10 * time.Second). + WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + fmt.Sprintf("Calling API: %v", err), + ) + return + } + + createResp, err := r.client.DefaultAPI.CreateDatabaseRequest(ctx, projectId, region, instanceId). + CreateDatabaseRequestPayload(payLoad). + Execute() + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + fmt.Sprintf("Calling API: %v", err), + ) + return + } + + if createResp == nil || createResp.Id == 0 { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating database", + "API didn't return database Id. A database might have been created", + ) + return + } + + databaseId := createResp.Id + + ctx = tflog.SetField(ctx, "database_id", databaseId) + + ctx = core.LogResponse(ctx) + + // Set data returned by API in identity + identity := DatabaseResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + DatabaseName: types.StringValue(databaseName), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + waitResp, err := wait.CreateDatabaseWaitHandler( + ctx, + r.client.DefaultAPI, + projectId, + instanceId, + region, + databaseName, + ).SetSleepBeforeWait( + 30 * time.Second, + ).SetTimeout( + 15 * time.Minute, + ).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + fmt.Sprintf("Database creation waiting: %v", err), + ) + return + } + + if waitResp.Id == 0 { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned id is nil", + ) + return + } + + if waitResp.Id != databaseId { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned id is different", + ) + return + } + + if waitResp.Owner != data.Owner.ValueString() { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned owner is different", + ) + return + } + + if waitResp.Name != data.Name.ValueString() { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + createErr, + "Database creation waiting: returned name is different", + ) + return + } + + // Map response body to schema + err = mapResourceFields(waitResp, &data, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating database", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, data)...) + if resp.Diagnostics.HasError() { + return + } + + // Save data into Terraform state + + tflog.Info(ctx, "sqlserverflexbeta.Database created") +} + +func (r *databaseResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + region := model.Region.ValueString() + instanceId := model.InstanceId.ValueString() + databaseName := model.DatabaseName.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "database_name", databaseName) + + databaseResp, err := r.client.DefaultAPI.GetDatabaseRequest(ctx, projectId, region, instanceId, databaseName).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if (ok && oapiErr.StatusCode == http.StatusNotFound) || errors.Is(err, errDatabaseNotFound) { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading database", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema + err = mapResourceFields(databaseResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading database", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save identity into Terraform state + identity := DatabaseResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + DatabaseName: types.StringValue(databaseName), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &model)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "sqlserverflexbeta.Database read") +} + +func (r *databaseResource) Update(ctx context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { + // TODO: Check update api endpoint - not available at the moment, so return an error for now + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating database", "there is no way to update a database") +} + +func (r *databaseResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // nolint:gocritic // function signature required by Terraform + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + region := model.Region.ValueString() + instanceId := model.InstanceId.ValueString() + databaseName := model.DatabaseName.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "database_name", databaseName) + + // Delete existing record set + err := r.client.DefaultAPI.DeleteDatabaseRequest(ctx, projectId, region, instanceId, databaseName).Execute() + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error deleting database", + fmt.Sprintf( + "Calling API: %v\nname: %s, region: %s, instanceId: %s", err, databaseName, region, instanceId, + ), + ) + return + } + + // TODO: wait handler?? + + ctx = core.LogResponse(ctx) + resp.State.RemoveResource(ctx) + + tflog.Info(ctx, "sqlserverflexbeta.Database deleted") +} + +// ModifyPlan implements resource.ResourceWithModifyPlan. +// Use the modifier to set the effective region in the current plan. +func (r *databaseResource) ModifyPlan( + ctx context.Context, + req resource.ModifyPlanRequest, + resp *resource.ModifyPlanResponse, +) { // nolint:gocritic // function signature required by Terraform + // skip initial empty configuration to avoid follow-up errors + if req.Config.Raw.IsNull() { + return + } + + var configModel resourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...) + if resp.Diagnostics.HasError() { + return + } + + var planModel resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) + if resp.Diagnostics.HasError() { + return + } + + utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp) + if resp.Diagnostics.HasError() { + return + } + + var identityModel DatabaseResourceIdentityModel + identityModel.ProjectID = planModel.ProjectId + identityModel.Region = planModel.Region + + if !planModel.InstanceId.IsNull() && !planModel.InstanceId.IsUnknown() { + identityModel.InstanceID = planModel.InstanceId + } + + if !planModel.Name.IsNull() && !planModel.Name.IsUnknown() { + identityModel.DatabaseName = planModel.Name + } + + resp.Diagnostics.Append(resp.Identity.Set(ctx, identityModel)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...) + if resp.Diagnostics.HasError() { + return + } +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *databaseResource) ImportState( + ctx context.Context, + req resource.ImportStateRequest, + resp *resource.ImportStateResponse, +) { + ctx = core.InitProviderContext(ctx) + + if req.ID != "" { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" { + core.LogAndAddError( + ctx, &resp.Diagnostics, + "Error importing database", + fmt.Sprintf( + "Expected import identifier with format [project_id],[region],[instance_id],[database_name] Got: %q", + req.ID, + ), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[2])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database_name"), idParts[3])...) + + var identityData DatabaseResourceIdentityModel + identityData.ProjectID = types.StringValue(idParts[0]) + identityData.Region = types.StringValue(idParts[1]) + identityData.InstanceID = types.StringValue(idParts[2]) + identityData.DatabaseName = types.StringValue(idParts[3]) + + resp.Diagnostics.Append(resp.Identity.Set(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "Sqlserverflexbeta database state imported") + return + } + + // If no ID is provided, attempt to read identity attributes from the import configuration + var identityData DatabaseResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + instanceId := identityData.InstanceID.ValueString() + databaseName := identityData.DatabaseName.ValueString() + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), region)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), instanceId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database_name"), databaseName)...) + + tflog.Info(ctx, "Sqlserverflexbeta database state imported") +} diff --git a/stackit/internal/services/sqlserverflexbeta/flavor/datasource.go b/stackit/internal/services/sqlserverflexbeta/flavor/datasource.go new file mode 100644 index 00000000..06e055f2 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/flavor/datasource.go @@ -0,0 +1,355 @@ +package sqlserverFlexBetaFlavor + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexbetaPkg "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" + sqlserverflexbetaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/flavor/datasources_gen" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &flavorDataSource{} + _ datasource.DataSourceWithConfigure = &flavorDataSource{} +) + +type FlavorModel struct { + ProjectId types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + StorageClass types.String `tfsdk:"storage_class"` + Cpu types.Int64 `tfsdk:"cpu"` + Description types.String `tfsdk:"description"` + Id types.String `tfsdk:"id"` + FlavorId types.String `tfsdk:"flavor_id"` + MaxGb types.Int64 `tfsdk:"max_gb"` + Memory types.Int64 `tfsdk:"ram"` + MinGb types.Int64 `tfsdk:"min_gb"` + NodeType types.String `tfsdk:"node_type"` + StorageClasses types.List `tfsdk:"storage_classes"` +} + +// NewFlavorDataSource is a helper function to simplify the provider implementation. +func NewFlavorDataSource() datasource.DataSource { + return &flavorDataSource{} +} + +// flavorDataSource is the data source implementation. +type flavorDataSource struct { + client *sqlserverflexbetaPkg.APIClient + providerData core.ProviderData +} + +// Metadata returns the data source type name. +func (r *flavorDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_flavor" +} + +// Configure adds the provider configured client to the data source. +func (r *flavorDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(r.providerData.RoundTripper), + utils.UserAgentConfigOption(r.providerData.Version), + } + if r.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(r.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(r.providerData.GetRegion()), + ) + } + apiClient, err := sqlserverflexbetaPkg.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + r.client = apiClient + tflog.Info(ctx, "SQL Server Flex instance client configured") +} + +func (r *flavorDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "project_id": schema.StringAttribute{ + Required: true, + Description: "The project ID of the flavor.", + MarkdownDescription: "The project ID of the flavor.", + }, + "region": schema.StringAttribute{ + Required: true, + Description: "The region of the flavor.", + MarkdownDescription: "The region of the flavor.", + }, + "cpu": schema.Int64Attribute{ + Required: true, + Description: "The cpu count of the instance.", + MarkdownDescription: "The cpu count of the instance.", + }, + "ram": schema.Int64Attribute{ + Required: true, + Description: "The memory of the instance in Gibibyte.", + MarkdownDescription: "The memory of the instance in Gibibyte.", + }, + "storage_class": schema.StringAttribute{ + Required: true, + Description: "The memory of the instance in Gibibyte.", + MarkdownDescription: "The memory of the instance in Gibibyte.", + }, + "node_type": schema.StringAttribute{ + Required: true, + Description: "defines the nodeType it can be either single or HA", + MarkdownDescription: "defines the nodeType it can be either single or HA", + }, + "flavor_id": schema.StringAttribute{ + Computed: true, + Description: "The id of the instance flavor.", + MarkdownDescription: "The id of the instance flavor.", + }, + "description": schema.StringAttribute{ + Computed: true, + Description: "The flavor description.", + MarkdownDescription: "The flavor description.", + }, + "id": schema.StringAttribute{ + Computed: true, + Description: "The id of the instance flavor.", + MarkdownDescription: "The id of the instance flavor.", + }, + "max_gb": schema.Int64Attribute{ + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + "min_gb": schema.Int64Attribute{ + Computed: true, + Description: "minimum storage which is required to order in Gigabyte.", + MarkdownDescription: "minimum storage which is required to order in Gigabyte.", + }, + "storage_classes": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Computed: true, + }, + "max_io_per_sec": schema.Int64Attribute{ + Computed: true, + }, + "max_through_in_mb": schema.Int64Attribute{ + Computed: true, + }, + }, + CustomType: sqlserverflexbetaGen.StorageClassesType{ + ObjectType: types.ObjectType{ + AttrTypes: sqlserverflexbetaGen.StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + }, + //Attributes: map[string]schema.Attribute{ + // "project_id": schema.StringAttribute{ + // Required: true, + // Description: "The cpu count of the instance.", + // MarkdownDescription: "The cpu count of the instance.", + // }, + // "region": schema.StringAttribute{ + // Required: true, + // Description: "The flavor description.", + // MarkdownDescription: "The flavor description.", + // }, + // "cpu": schema.Int64Attribute{ + // Required: true, + // Description: "The cpu count of the instance.", + // MarkdownDescription: "The cpu count of the instance.", + // }, + // "ram": schema.Int64Attribute{ + // Required: true, + // Description: "The memory of the instance in Gibibyte.", + // MarkdownDescription: "The memory of the instance in Gibibyte.", + // }, + // "storage_class": schema.StringAttribute{ + // Required: true, + // Description: "The memory of the instance in Gibibyte.", + // MarkdownDescription: "The memory of the instance in Gibibyte.", + // }, + // "description": schema.StringAttribute{ + // Computed: true, + // Description: "The flavor description.", + // MarkdownDescription: "The flavor description.", + // }, + // "id": schema.StringAttribute{ + // Computed: true, + // Description: "The terraform id of the instance flavor.", + // MarkdownDescription: "The terraform id of the instance flavor.", + // }, + // "flavor_id": schema.StringAttribute{ + // Computed: true, + // Description: "The flavor id of the instance flavor.", + // MarkdownDescription: "The flavor id of the instance flavor.", + // }, + // "max_gb": schema.Int64Attribute{ + // Computed: true, + // Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + // MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + // }, + // "min_gb": schema.Int64Attribute{ + // Computed: true, + // Description: "minimum storage which is required to order in Gigabyte.", + // MarkdownDescription: "minimum storage which is required to order in Gigabyte.", + // }, + // "node_type": schema.StringAttribute{ + // Required: true, + // Description: "defines the nodeType it can be either single or replica", + // MarkdownDescription: "defines the nodeType it can be either single or replica", + // }, + // "storage_classes": schema.ListNestedAttribute{ + // Computed: true, + // NestedObject: schema.NestedAttributeObject{ + // Attributes: map[string]schema.Attribute{ + // "class": schema.StringAttribute{ + // Computed: true, + // }, + // "max_io_per_sec": schema.Int64Attribute{ + // Computed: true, + // }, + // "max_through_in_mb": schema.Int64Attribute{ + // Computed: true, + // }, + // }, + // CustomType: sqlserverflexalphaGen.StorageClassesType{ + // ObjectType: types.ObjectType{ + // AttrTypes: sqlserverflexalphaGen.StorageClassesValue{}.AttributeTypes(ctx), + // }, + // }, + // }, + // }, + // }, + } +} + +func (r *flavorDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var model FlavorModel + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + region := r.providerData.GetRegionWithOverride(model.Region) + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + flavors, err := getAllFlavors(ctx, r.client, projectId, region) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading flavors", fmt.Sprintf("getAllFlavors: %v", err)) + return + } + + var foundFlavors []sqlserverflexbetaPkg.ListFlavors + for _, flavor := range flavors { + if model.Cpu.ValueInt64() != *flavor.Cpu { + continue + } + if model.Memory.ValueInt64() != *flavor.Memory { + continue + } + if model.NodeType.ValueString() != *flavor.NodeType { + continue + } + for _, sc := range *flavor.StorageClasses { + if model.StorageClass.ValueString() != *sc.Class { + continue + } + foundFlavors = append(foundFlavors, flavor) + } + } + if len(foundFlavors) == 0 { + resp.Diagnostics.AddError("get flavor", "could not find requested flavor") + return + } + if len(foundFlavors) > 1 { + resp.Diagnostics.AddError("get flavor", "found too many matching flavors") + return + } + + f := foundFlavors[0] + model.Description = types.StringValue(*f.Description) + model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, *f.Id) + model.FlavorId = types.StringValue(*f.Id) + model.MaxGb = types.Int64Value(*f.MaxGB) + model.MinGb = types.Int64Value(*f.MinGB) + + if f.StorageClasses == nil { + model.StorageClasses = types.ListNull(sqlserverflexbetaGen.StorageClassesType{ + ObjectType: basetypes.ObjectType{ + AttrTypes: sqlserverflexbetaGen.StorageClassesValue{}.AttributeTypes(ctx), + }, + }) + } else { + var scList []attr.Value + for _, sc := range *f.StorageClasses { + scList = append( + scList, + sqlserverflexbetaGen.NewStorageClassesValueMust( + sqlserverflexbetaGen.StorageClassesValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "class": types.StringValue(*sc.Class), + "max_io_per_sec": types.Int64Value(*sc.MaxIoPerSec), + "max_through_in_mb": types.Int64Value(*sc.MaxThroughInMb), + }, + ), + ) + } + storageClassesList := types.ListValueMust( + sqlserverflexbetaGen.StorageClassesType{ + ObjectType: basetypes.ObjectType{ + AttrTypes: sqlserverflexbetaGen.StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + scList, + ) + model.StorageClasses = storageClassesList + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQL Server Flex flavors read") +} diff --git a/stackit/internal/services/sqlserverflexbeta/flavor/datasources_gen/flavor_data_source_gen.go b/stackit/internal/services/sqlserverflexbeta/flavor/datasources_gen/flavor_data_source_gen.go new file mode 100644 index 00000000..a766197e --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/flavor/datasources_gen/flavor_data_source_gen.go @@ -0,0 +1,1909 @@ +// Code generated by terraform-plugin-framework-generator DO NOT EDIT. + +package sqlserverflexbeta + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func FlavorDataSourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "flavors": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + Description: "The cpu count of the instance.", + MarkdownDescription: "The cpu count of the instance.", + }, + "description": schema.StringAttribute{ + Computed: true, + Description: "The flavor description.", + MarkdownDescription: "The flavor description.", + }, + "id": schema.StringAttribute{ + Computed: true, + Description: "The id of the instance flavor.", + MarkdownDescription: "The id of the instance flavor.", + }, + "max_gb": schema.Int64Attribute{ + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + "memory": schema.Int64Attribute{ + Computed: true, + Description: "The memory of the instance in Gibibyte.", + MarkdownDescription: "The memory of the instance in Gibibyte.", + }, + "min_gb": schema.Int64Attribute{ + Computed: true, + Description: "minimum storage which is required to order in Gigabyte.", + MarkdownDescription: "minimum storage which is required to order in Gigabyte.", + }, + "node_type": schema.StringAttribute{ + Computed: true, + Description: "defines the nodeType it can be either single or HA", + MarkdownDescription: "defines the nodeType it can be either single or HA", + }, + "storage_classes": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Computed: true, + }, + "max_io_per_sec": schema.Int64Attribute{ + Computed: true, + }, + "max_through_in_mb": schema.Int64Attribute{ + Computed: true, + }, + }, + CustomType: StorageClassesType{ + ObjectType: types.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "maximum storage which can be ordered for the flavor in Gigabyte.", + MarkdownDescription: "maximum storage which can be ordered for the flavor in Gigabyte.", + }, + }, + CustomType: FlavorsType{ + ObjectType: types.ObjectType{ + AttrTypes: FlavorsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of flavors available for the project.", + MarkdownDescription: "List of flavors available for the project.", + }, + "page": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of the page of items list to be returned.", + MarkdownDescription: "Number of the page of items list to be returned.", + }, + "pagination": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "page": schema.Int64Attribute{ + Computed: true, + }, + "size": schema.Int64Attribute{ + Computed: true, + }, + "sort": schema.StringAttribute{ + Computed: true, + }, + "total_pages": schema.Int64Attribute{ + Computed: true, + }, + "total_rows": schema.Int64Attribute{ + Computed: true, + }, + }, + CustomType: PaginationType{ + ObjectType: types.ObjectType{ + AttrTypes: PaginationValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "project_id": schema.StringAttribute{ + Required: true, + Description: "The STACKIT project ID.", + MarkdownDescription: "The STACKIT project ID.", + }, + "region": schema.StringAttribute{ + Required: true, + Description: "The region which should be addressed", + MarkdownDescription: "The region which should be addressed", + Validators: []validator.String{ + stringvalidator.OneOf( + "eu01", + ), + }, + }, + "size": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of items to be returned on each page.", + MarkdownDescription: "Number of items to be returned on each page.", + }, + "sort": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Sorting of the flavors to be returned on each page.", + MarkdownDescription: "Sorting of the flavors to be returned on each page.", + Validators: []validator.String{ + stringvalidator.OneOf( + "index.desc", + "index.asc", + "cpu.desc", + "cpu.asc", + "flavor_description.asc", + "flavor_description.desc", + "id.desc", + "id.asc", + "size_max.desc", + "size_max.asc", + "ram.desc", + "ram.asc", + "size_min.desc", + "size_min.asc", + "storage_class.asc", + "storage_class.desc", + "node_type.asc", + "node_type.desc", + ), + }, + }, + }, + } +} + +type FlavorModel struct { + Flavors types.List `tfsdk:"flavors"` + Page types.Int64 `tfsdk:"page"` + Pagination PaginationValue `tfsdk:"pagination"` + ProjectId types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + Size types.Int64 `tfsdk:"size"` + Sort types.String `tfsdk:"sort"` +} + +var _ basetypes.ObjectTypable = FlavorsType{} + +type FlavorsType struct { + basetypes.ObjectType +} + +func (t FlavorsType) Equal(o attr.Type) bool { + other, ok := o.(FlavorsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t FlavorsType) String() string { + return "FlavorsType" +} + +func (t FlavorsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + cpuAttribute, ok := attributes["cpu"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cpu is missing from object`) + + return nil, diags + } + + cpuVal, ok := cpuAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute)) + } + + descriptionAttribute, ok := attributes["description"] + + if !ok { + diags.AddError( + "Attribute Missing", + `description is missing from object`) + + return nil, diags + } + + descriptionVal, ok := descriptionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute)) + } + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return nil, diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + maxGbAttribute, ok := attributes["max_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_gb is missing from object`) + + return nil, diags + } + + maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute)) + } + + memoryAttribute, ok := attributes["memory"] + + if !ok { + diags.AddError( + "Attribute Missing", + `memory is missing from object`) + + return nil, diags + } + + memoryVal, ok := memoryAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute)) + } + + minGbAttribute, ok := attributes["min_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `min_gb is missing from object`) + + return nil, diags + } + + minGbVal, ok := minGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute)) + } + + nodeTypeAttribute, ok := attributes["node_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_type is missing from object`) + + return nil, diags + } + + nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute)) + } + + storageClassesAttribute, ok := attributes["storage_classes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `storage_classes is missing from object`) + + return nil, diags + } + + storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return FlavorsValue{ + Cpu: cpuVal, + Description: descriptionVal, + Id: idVal, + MaxGb: maxGbVal, + Memory: memoryVal, + MinGb: minGbVal, + NodeType: nodeTypeVal, + StorageClasses: storageClassesVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewFlavorsValueNull() FlavorsValue { + return FlavorsValue{ + state: attr.ValueStateNull, + } +} + +func NewFlavorsValueUnknown() FlavorsValue { + return FlavorsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewFlavorsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (FlavorsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing FlavorsValue Attribute Value", + "While creating a FlavorsValue value, a missing attribute value was detected. "+ + "A FlavorsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid FlavorsValue Attribute Type", + "While creating a FlavorsValue value, an invalid attribute value was detected. "+ + "A FlavorsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("FlavorsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("FlavorsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra FlavorsValue Attribute Value", + "While creating a FlavorsValue value, an extra attribute value was detected. "+ + "A FlavorsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra FlavorsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewFlavorsValueUnknown(), diags + } + + cpuAttribute, ok := attributes["cpu"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cpu is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + cpuVal, ok := cpuAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cpu expected to be basetypes.Int64Value, was: %T`, cpuAttribute)) + } + + descriptionAttribute, ok := attributes["description"] + + if !ok { + diags.AddError( + "Attribute Missing", + `description is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + descriptionVal, ok := descriptionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute)) + } + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + maxGbAttribute, ok := attributes["max_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_gb is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + maxGbVal, ok := maxGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_gb expected to be basetypes.Int64Value, was: %T`, maxGbAttribute)) + } + + memoryAttribute, ok := attributes["memory"] + + if !ok { + diags.AddError( + "Attribute Missing", + `memory is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + memoryVal, ok := memoryAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`memory expected to be basetypes.Int64Value, was: %T`, memoryAttribute)) + } + + minGbAttribute, ok := attributes["min_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `min_gb is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + minGbVal, ok := minGbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`min_gb expected to be basetypes.Int64Value, was: %T`, minGbAttribute)) + } + + nodeTypeAttribute, ok := attributes["node_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_type is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + nodeTypeVal, ok := nodeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_type expected to be basetypes.StringValue, was: %T`, nodeTypeAttribute)) + } + + storageClassesAttribute, ok := attributes["storage_classes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `storage_classes is missing from object`) + + return NewFlavorsValueUnknown(), diags + } + + storageClassesVal, ok := storageClassesAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`storage_classes expected to be basetypes.ListValue, was: %T`, storageClassesAttribute)) + } + + if diags.HasError() { + return NewFlavorsValueUnknown(), diags + } + + return FlavorsValue{ + Cpu: cpuVal, + Description: descriptionVal, + Id: idVal, + MaxGb: maxGbVal, + Memory: memoryVal, + MinGb: minGbVal, + NodeType: nodeTypeVal, + StorageClasses: storageClassesVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewFlavorsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) FlavorsValue { + object, diags := NewFlavorsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewFlavorsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t FlavorsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewFlavorsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewFlavorsValueUnknown(), nil + } + + if in.IsNull() { + return NewFlavorsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewFlavorsValueMust(FlavorsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t FlavorsType) ValueType(ctx context.Context) attr.Value { + return FlavorsValue{} +} + +var _ basetypes.ObjectValuable = FlavorsValue{} + +type FlavorsValue struct { + Cpu basetypes.Int64Value `tfsdk:"cpu"` + Description basetypes.StringValue `tfsdk:"description"` + Id basetypes.StringValue `tfsdk:"id"` + MaxGb basetypes.Int64Value `tfsdk:"max_gb"` + Memory basetypes.Int64Value `tfsdk:"memory"` + MinGb basetypes.Int64Value `tfsdk:"min_gb"` + NodeType basetypes.StringValue `tfsdk:"node_type"` + StorageClasses basetypes.ListValue `tfsdk:"storage_classes"` + state attr.ValueState +} + +func (v FlavorsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 8) + + var val tftypes.Value + var err error + + attrTypes["cpu"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["description"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["max_gb"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["memory"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["min_gb"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["node_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["storage_classes"] = basetypes.ListType{ + ElemType: StorageClassesValue{}.Type(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 8) + + val, err = v.Cpu.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["cpu"] = val + + val, err = v.Description.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["description"] = val + + val, err = v.Id.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["id"] = val + + val, err = v.MaxGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["max_gb"] = val + + val, err = v.Memory.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["memory"] = val + + val, err = v.MinGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["min_gb"] = val + + val, err = v.NodeType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["node_type"] = val + + val, err = v.StorageClasses.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["storage_classes"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v FlavorsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v FlavorsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v FlavorsValue) String() string { + return "FlavorsValue" +} + +func (v FlavorsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + storageClasses := types.ListValueMust( + StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + v.StorageClasses.Elements(), + ) + + if v.StorageClasses.IsNull() { + storageClasses = types.ListNull( + StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.StorageClasses.IsUnknown() { + storageClasses = types.ListUnknown( + StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: StorageClassesValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "cpu": basetypes.Int64Type{}, + "description": basetypes.StringType{}, + "id": basetypes.StringType{}, + "max_gb": basetypes.Int64Type{}, + "memory": basetypes.Int64Type{}, + "min_gb": basetypes.Int64Type{}, + "node_type": basetypes.StringType{}, + "storage_classes": basetypes.ListType{ + ElemType: StorageClassesValue{}.Type(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "cpu": v.Cpu, + "description": v.Description, + "id": v.Id, + "max_gb": v.MaxGb, + "memory": v.Memory, + "min_gb": v.MinGb, + "node_type": v.NodeType, + "storage_classes": storageClasses, + }) + + return objVal, diags +} + +func (v FlavorsValue) Equal(o attr.Value) bool { + other, ok := o.(FlavorsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Cpu.Equal(other.Cpu) { + return false + } + + if !v.Description.Equal(other.Description) { + return false + } + + if !v.Id.Equal(other.Id) { + return false + } + + if !v.MaxGb.Equal(other.MaxGb) { + return false + } + + if !v.Memory.Equal(other.Memory) { + return false + } + + if !v.MinGb.Equal(other.MinGb) { + return false + } + + if !v.NodeType.Equal(other.NodeType) { + return false + } + + if !v.StorageClasses.Equal(other.StorageClasses) { + return false + } + + return true +} + +func (v FlavorsValue) Type(ctx context.Context) attr.Type { + return FlavorsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v FlavorsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "cpu": basetypes.Int64Type{}, + "description": basetypes.StringType{}, + "id": basetypes.StringType{}, + "max_gb": basetypes.Int64Type{}, + "memory": basetypes.Int64Type{}, + "min_gb": basetypes.Int64Type{}, + "node_type": basetypes.StringType{}, + "storage_classes": basetypes.ListType{ + ElemType: StorageClassesValue{}.Type(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = StorageClassesType{} + +type StorageClassesType struct { + basetypes.ObjectType +} + +func (t StorageClassesType) Equal(o attr.Type) bool { + other, ok := o.(StorageClassesType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t StorageClassesType) String() string { + return "StorageClassesType" +} + +func (t StorageClassesType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + classAttribute, ok := attributes["class"] + + if !ok { + diags.AddError( + "Attribute Missing", + `class is missing from object`) + + return nil, diags + } + + classVal, ok := classAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute)) + } + + maxIoPerSecAttribute, ok := attributes["max_io_per_sec"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_io_per_sec is missing from object`) + + return nil, diags + } + + maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute)) + } + + maxThroughInMbAttribute, ok := attributes["max_through_in_mb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_through_in_mb is missing from object`) + + return nil, diags + } + + maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return StorageClassesValue{ + Class: classVal, + MaxIoPerSec: maxIoPerSecVal, + MaxThroughInMb: maxThroughInMbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewStorageClassesValueNull() StorageClassesValue { + return StorageClassesValue{ + state: attr.ValueStateNull, + } +} + +func NewStorageClassesValueUnknown() StorageClassesValue { + return StorageClassesValue{ + state: attr.ValueStateUnknown, + } +} + +func NewStorageClassesValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (StorageClassesValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing StorageClassesValue Attribute Value", + "While creating a StorageClassesValue value, a missing attribute value was detected. "+ + "A StorageClassesValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid StorageClassesValue Attribute Type", + "While creating a StorageClassesValue value, an invalid attribute value was detected. "+ + "A StorageClassesValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("StorageClassesValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("StorageClassesValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra StorageClassesValue Attribute Value", + "While creating a StorageClassesValue value, an extra attribute value was detected. "+ + "A StorageClassesValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra StorageClassesValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewStorageClassesValueUnknown(), diags + } + + classAttribute, ok := attributes["class"] + + if !ok { + diags.AddError( + "Attribute Missing", + `class is missing from object`) + + return NewStorageClassesValueUnknown(), diags + } + + classVal, ok := classAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute)) + } + + maxIoPerSecAttribute, ok := attributes["max_io_per_sec"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_io_per_sec is missing from object`) + + return NewStorageClassesValueUnknown(), diags + } + + maxIoPerSecVal, ok := maxIoPerSecAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_io_per_sec expected to be basetypes.Int64Value, was: %T`, maxIoPerSecAttribute)) + } + + maxThroughInMbAttribute, ok := attributes["max_through_in_mb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_through_in_mb is missing from object`) + + return NewStorageClassesValueUnknown(), diags + } + + maxThroughInMbVal, ok := maxThroughInMbAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_through_in_mb expected to be basetypes.Int64Value, was: %T`, maxThroughInMbAttribute)) + } + + if diags.HasError() { + return NewStorageClassesValueUnknown(), diags + } + + return StorageClassesValue{ + Class: classVal, + MaxIoPerSec: maxIoPerSecVal, + MaxThroughInMb: maxThroughInMbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewStorageClassesValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) StorageClassesValue { + object, diags := NewStorageClassesValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewStorageClassesValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t StorageClassesType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewStorageClassesValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewStorageClassesValueUnknown(), nil + } + + if in.IsNull() { + return NewStorageClassesValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewStorageClassesValueMust(StorageClassesValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t StorageClassesType) ValueType(ctx context.Context) attr.Value { + return StorageClassesValue{} +} + +var _ basetypes.ObjectValuable = StorageClassesValue{} + +type StorageClassesValue struct { + Class basetypes.StringValue `tfsdk:"class"` + MaxIoPerSec basetypes.Int64Value `tfsdk:"max_io_per_sec"` + MaxThroughInMb basetypes.Int64Value `tfsdk:"max_through_in_mb"` + state attr.ValueState +} + +func (v StorageClassesValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 3) + + var val tftypes.Value + var err error + + attrTypes["class"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["max_io_per_sec"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["max_through_in_mb"] = basetypes.Int64Type{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 3) + + val, err = v.Class.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["class"] = val + + val, err = v.MaxIoPerSec.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["max_io_per_sec"] = val + + val, err = v.MaxThroughInMb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["max_through_in_mb"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v StorageClassesValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v StorageClassesValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v StorageClassesValue) String() string { + return "StorageClassesValue" +} + +func (v StorageClassesValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "class": basetypes.StringType{}, + "max_io_per_sec": basetypes.Int64Type{}, + "max_through_in_mb": basetypes.Int64Type{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "class": v.Class, + "max_io_per_sec": v.MaxIoPerSec, + "max_through_in_mb": v.MaxThroughInMb, + }) + + return objVal, diags +} + +func (v StorageClassesValue) Equal(o attr.Value) bool { + other, ok := o.(StorageClassesValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Class.Equal(other.Class) { + return false + } + + if !v.MaxIoPerSec.Equal(other.MaxIoPerSec) { + return false + } + + if !v.MaxThroughInMb.Equal(other.MaxThroughInMb) { + return false + } + + return true +} + +func (v StorageClassesValue) Type(ctx context.Context) attr.Type { + return StorageClassesType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v StorageClassesValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "class": basetypes.StringType{}, + "max_io_per_sec": basetypes.Int64Type{}, + "max_through_in_mb": basetypes.Int64Type{}, + } +} + +var _ basetypes.ObjectTypable = PaginationType{} + +type PaginationType struct { + basetypes.ObjectType +} + +func (t PaginationType) Equal(o attr.Type) bool { + other, ok := o.(PaginationType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t PaginationType) String() string { + return "PaginationType" +} + +func (t PaginationType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + pageAttribute, ok := attributes["page"] + + if !ok { + diags.AddError( + "Attribute Missing", + `page is missing from object`) + + return nil, diags + } + + pageVal, ok := pageAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute)) + } + + sizeAttribute, ok := attributes["size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `size is missing from object`) + + return nil, diags + } + + sizeVal, ok := sizeAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute)) + } + + sortAttribute, ok := attributes["sort"] + + if !ok { + diags.AddError( + "Attribute Missing", + `sort is missing from object`) + + return nil, diags + } + + sortVal, ok := sortAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute)) + } + + totalPagesAttribute, ok := attributes["total_pages"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_pages is missing from object`) + + return nil, diags + } + + totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute)) + } + + totalRowsAttribute, ok := attributes["total_rows"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_rows is missing from object`) + + return nil, diags + } + + totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return PaginationValue{ + Page: pageVal, + Size: sizeVal, + Sort: sortVal, + TotalPages: totalPagesVal, + TotalRows: totalRowsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewPaginationValueNull() PaginationValue { + return PaginationValue{ + state: attr.ValueStateNull, + } +} + +func NewPaginationValueUnknown() PaginationValue { + return PaginationValue{ + state: attr.ValueStateUnknown, + } +} + +func NewPaginationValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (PaginationValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing PaginationValue Attribute Value", + "While creating a PaginationValue value, a missing attribute value was detected. "+ + "A PaginationValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid PaginationValue Attribute Type", + "While creating a PaginationValue value, an invalid attribute value was detected. "+ + "A PaginationValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("PaginationValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra PaginationValue Attribute Value", + "While creating a PaginationValue value, an extra attribute value was detected. "+ + "A PaginationValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra PaginationValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewPaginationValueUnknown(), diags + } + + pageAttribute, ok := attributes["page"] + + if !ok { + diags.AddError( + "Attribute Missing", + `page is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + pageVal, ok := pageAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute)) + } + + sizeAttribute, ok := attributes["size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `size is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + sizeVal, ok := sizeAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute)) + } + + sortAttribute, ok := attributes["sort"] + + if !ok { + diags.AddError( + "Attribute Missing", + `sort is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + sortVal, ok := sortAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute)) + } + + totalPagesAttribute, ok := attributes["total_pages"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_pages is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute)) + } + + totalRowsAttribute, ok := attributes["total_rows"] + + if !ok { + diags.AddError( + "Attribute Missing", + `total_rows is missing from object`) + + return NewPaginationValueUnknown(), diags + } + + totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute)) + } + + if diags.HasError() { + return NewPaginationValueUnknown(), diags + } + + return PaginationValue{ + Page: pageVal, + Size: sizeVal, + Sort: sortVal, + TotalPages: totalPagesVal, + TotalRows: totalRowsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewPaginationValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) PaginationValue { + object, diags := NewPaginationValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewPaginationValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t PaginationType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewPaginationValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewPaginationValueUnknown(), nil + } + + if in.IsNull() { + return NewPaginationValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewPaginationValueMust(PaginationValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t PaginationType) ValueType(ctx context.Context) attr.Value { + return PaginationValue{} +} + +var _ basetypes.ObjectValuable = PaginationValue{} + +type PaginationValue struct { + Page basetypes.Int64Value `tfsdk:"page"` + Size basetypes.Int64Value `tfsdk:"size"` + Sort basetypes.StringValue `tfsdk:"sort"` + TotalPages basetypes.Int64Value `tfsdk:"total_pages"` + TotalRows basetypes.Int64Value `tfsdk:"total_rows"` + state attr.ValueState +} + +func (v PaginationValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["page"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["size"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["sort"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["total_pages"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["total_rows"] = basetypes.Int64Type{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.Page.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["page"] = val + + val, err = v.Size.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["size"] = val + + val, err = v.Sort.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["sort"] = val + + val, err = v.TotalPages.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["total_pages"] = val + + val, err = v.TotalRows.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["total_rows"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v PaginationValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v PaginationValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v PaginationValue) String() string { + return "PaginationValue" +} + +func (v PaginationValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "page": basetypes.Int64Type{}, + "size": basetypes.Int64Type{}, + "sort": basetypes.StringType{}, + "total_pages": basetypes.Int64Type{}, + "total_rows": basetypes.Int64Type{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "page": v.Page, + "size": v.Size, + "sort": v.Sort, + "total_pages": v.TotalPages, + "total_rows": v.TotalRows, + }) + + return objVal, diags +} + +func (v PaginationValue) Equal(o attr.Value) bool { + other, ok := o.(PaginationValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Page.Equal(other.Page) { + return false + } + + if !v.Size.Equal(other.Size) { + return false + } + + if !v.Sort.Equal(other.Sort) { + return false + } + + if !v.TotalPages.Equal(other.TotalPages) { + return false + } + + if !v.TotalRows.Equal(other.TotalRows) { + return false + } + + return true +} + +func (v PaginationValue) Type(ctx context.Context) attr.Type { + return PaginationType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v PaginationValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "page": basetypes.Int64Type{}, + "size": basetypes.Int64Type{}, + "sort": basetypes.StringType{}, + "total_pages": basetypes.Int64Type{}, + "total_rows": basetypes.Int64Type{}, + } +} diff --git a/stackit/internal/services/sqlserverflexbeta/flavor/functions.go b/stackit/internal/services/sqlserverflexbeta/flavor/functions.go new file mode 100644 index 00000000..8c06da73 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/flavor/functions.go @@ -0,0 +1,65 @@ +package sqlserverFlexBetaFlavor + +import ( + "context" + "fmt" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" +) + +type flavorsClientReader interface { + GetFlavorsRequest( + ctx context.Context, + projectId, region string, + ) sqlserverflexbeta.ApiGetFlavorsRequestRequest +} + +func getAllFlavors(ctx context.Context, client flavorsClientReader, projectId, region string) ( + []sqlserverflexbeta.ListFlavors, + error, +) { + getAllFilter := func(_ sqlserverflexbeta.ListFlavors) bool { return true } + flavorList, err := getFlavorsByFilter(ctx, client, projectId, region, getAllFilter) + if err != nil { + return nil, err + } + return flavorList, nil +} + +// getFlavorsByFilter is a helper function to retrieve flavors using a filtern function. +// Hint: The API does not have a GetFlavors endpoint, only ListFlavors +func getFlavorsByFilter( + ctx context.Context, + client flavorsClientReader, + projectId, region string, + filter func(db sqlserverflexbeta.ListFlavors) bool, +) ([]sqlserverflexbeta.ListFlavors, error) { + if projectId == "" || region == "" { + return nil, fmt.Errorf("listing sqlserverflexbeta flavors: projectId and region are required") + } + + const pageSize = 25 + + var result = make([]sqlserverflexbeta.ListFlavors, 0) + + for page := int64(1); ; page++ { + res, err := client.GetFlavorsRequest(ctx, projectId, region). + Page(page).Size(pageSize).Sort(sqlserverflexbeta.FLAVORSORT_INDEX_ASC).Execute() + if err != nil { + return nil, fmt.Errorf("requesting flavors list (page %d): %w", page, err) + } + + // If the API returns no flavors, we have reached the end of the list. + if res.Flavors == nil || len(*res.Flavors) == 0 { + break + } + + for _, flavor := range *res.Flavors { + if filter(flavor) { + result = append(result, flavor) + } + } + } + + return result, nil +} diff --git a/stackit/internal/services/sqlserverflexbeta/flavor/functions_test.go b/stackit/internal/services/sqlserverflexbeta/flavor/functions_test.go new file mode 100644 index 00000000..fb666253 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/flavor/functions_test.go @@ -0,0 +1,135 @@ +package sqlserverFlexBetaFlavor + +import ( + "context" + "testing" + + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" +) + +type mockRequest struct { + executeFunc func() (*sqlserverflexbeta.GetFlavorsResponse, error) +} + +func (m *mockRequest) Page(_ int64) sqlserverflexbeta.ApiGetFlavorsRequestRequest { return m } +func (m *mockRequest) Size(_ int64) sqlserverflexbeta.ApiGetFlavorsRequestRequest { return m } +func (m *mockRequest) Sort(_ sqlserverflexbeta.FlavorSort) sqlserverflexbeta.ApiGetFlavorsRequestRequest { + return m +} +func (m *mockRequest) Execute() (*sqlserverflexbeta.GetFlavorsResponse, error) { + return m.executeFunc() +} + +type mockFlavorsClient struct { + executeRequest func() sqlserverflexbeta.ApiGetFlavorsRequestRequest +} + +func (m *mockFlavorsClient) GetFlavorsRequest(_ context.Context, _, _ string) sqlserverflexbeta.ApiGetFlavorsRequestRequest { + return m.executeRequest() +} + +var mockResp = func(page int64) (*sqlserverflexbeta.GetFlavorsResponse, error) { + if page == 1 { + return &sqlserverflexbeta.GetFlavorsResponse{ + Flavors: &[]sqlserverflexbeta.ListFlavors{ + {Id: utils.Ptr("flavor-1"), Description: utils.Ptr("first")}, + {Id: utils.Ptr("flavor-2"), Description: utils.Ptr("second")}, + }, + }, nil + } + if page == 2 { + return &sqlserverflexbeta.GetFlavorsResponse{ + Flavors: &[]sqlserverflexbeta.ListFlavors{ + {Id: utils.Ptr("flavor-3"), Description: utils.Ptr("three")}, + }, + }, nil + } + + return &sqlserverflexbeta.GetFlavorsResponse{ + Flavors: &[]sqlserverflexbeta.ListFlavors{}, + }, nil +} + +func TestGetFlavorsByFilter(t *testing.T) { + tests := []struct { + description string + projectId string + region string + mockErr error + filter func(sqlserverflexbeta.ListFlavors) bool + wantCount int + wantErr bool + }{ + { + description: "Success - Get all flavors (2 pages)", + projectId: "pid", region: "reg", + filter: func(_ sqlserverflexbeta.ListFlavors) bool { return true }, + wantCount: 3, + wantErr: false, + }, + { + description: "Success - Filter flavors by description", + projectId: "pid", region: "reg", + filter: func(f sqlserverflexbeta.ListFlavors) bool { return *f.Description == "first" }, + wantCount: 1, + wantErr: false, + }, + { + description: "Error - Missing parameters", + projectId: "", region: "reg", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + var currentPage int64 + client := &mockFlavorsClient{ + executeRequest: func() sqlserverflexbeta.ApiGetFlavorsRequestRequest { + return &mockRequest{ + executeFunc: func() (*sqlserverflexbeta.GetFlavorsResponse, error) { + currentPage++ + return mockResp(currentPage) + }, + } + }, + } + actual, err := getFlavorsByFilter(context.Background(), client, tt.projectId, tt.region, tt.filter) + + if (err != nil) != tt.wantErr { + t.Errorf("getFlavorsByFilter() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !tt.wantErr && len(actual) != tt.wantCount { + t.Errorf("getFlavorsByFilter() got %d flavors, want %d", len(actual), tt.wantCount) + } + }, + ) + } +} + +func TestGetAllFlavors(t *testing.T) { + var currentPage int64 + client := &mockFlavorsClient{ + executeRequest: func() sqlserverflexbeta.ApiGetFlavorsRequestRequest { + return &mockRequest{ + executeFunc: func() (*sqlserverflexbeta.GetFlavorsResponse, error) { + currentPage++ + return mockResp(currentPage) + }, + } + }, + } + + res, err := getAllFlavors(context.Background(), client, "pid", "reg") + if err != nil { + t.Errorf("getAllFlavors() unexpected error: %v", err) + } + if len(res) != 3 { + t.Errorf("getAllFlavors() expected 3 flavor, got %d", len(res)) + } +} diff --git a/stackit/internal/services/sqlserverflexbeta/flavors/datasource.go b/stackit/internal/services/sqlserverflexbeta/flavors/datasource.go new file mode 100644 index 00000000..b6be1dd4 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/flavors/datasource.go @@ -0,0 +1,156 @@ +package sqlserverflexbeta + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexbetaPkg "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" + + sqlserverflexbetaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/flavors/datasources_gen" +) + +var _ datasource.DataSource = (*flavorsDataSource)(nil) + +const errorPrefix = "[Sqlserverflexbeta - Flavors]" + +func NewFlavorsDataSource() datasource.DataSource { + return &flavorsDataSource{} +} + +type dataSourceModel struct { + sqlserverflexbetaGen.FlavorsModel + TerraformId types.String `tfsdk:"id"` +} + +type flavorsDataSource struct { + client *sqlserverflexbetaPkg.APIClient + providerData core.ProviderData +} + +func (d *flavorsDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_flavors" +} + +func (d *flavorsDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexbetaGen.FlavorsDataSourceSchema(ctx) + resp.Schema.Attributes["id"] = schema.StringAttribute{ + Computed: true, + Description: "The terraform internal identifier.", + MarkdownDescription: "The terraform internal identifier.", + } +} + +// Configure adds the provider configured client to the data source. +func (d *flavorsDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(d.providerData.RoundTripper), + utils.UserAgentConfigOption(d.providerData.Version), + } + if d.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(d.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(d.providerData.GetRegion()), + ) + } + apiClient, err := sqlserverflexbetaPkg.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + d.client = apiClient + tflog.Info(ctx, fmt.Sprintf("%s client configured", errorPrefix)) +} + +func (d *flavorsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dataSourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := d.providerData.GetRegionWithOverride(data.Region) + // TODO: implement right identifier for flavors + flavorsId := data.Flavors + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + // TODO: implement needed fields + ctx = tflog.SetField(ctx, "flavors_id", flavorsId) + + // TODO: refactor to correct implementation + _, err := d.client.GetFlavorsRequest(ctx, projectId, region).Execute() + if err != nil { + utils.LogError( + ctx, + &resp.Diagnostics, + err, + "Reading flavors", + fmt.Sprintf("flavors with ID %q does not exist in project %q.", flavorsId, projectId), + map[int]string{ + http.StatusForbidden: fmt.Sprintf("Project with ID %q not found or forbidden access", projectId), + }, + ) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + + // TODO: refactor to correct implementation of internal tf id + data.TerraformId = utils.BuildInternalTerraformId(projectId, region) + + // TODO: fill remaining fields + // data.Flavors = types.Sometype(apiResponse.GetFlavors()) + // data.Page = types.Sometype(apiResponse.GetPage()) + // data.Pagination = types.Sometype(apiResponse.GetPagination()) + // data.ProjectId = types.Sometype(apiResponse.GetProjectId()) + // data.Region = types.Sometype(apiResponse.GetRegion()) + // data.Size = types.Sometype(apiResponse.GetSize()) + // data.Sort = types.Sometype(apiResponse.GetSort())// Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + + tflog.Info(ctx, fmt.Sprintf("%s read successful", errorPrefix)) +} diff --git a/stackit/internal/services/sqlserverflexbeta/instance/datasource.go b/stackit/internal/services/sqlserverflexbeta/instance/datasource.go new file mode 100644 index 00000000..d2fd7bc3 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/instance/datasource.go @@ -0,0 +1,146 @@ +package sqlserverflexbeta + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + + sqlserverflexbetaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance/datasources_gen" +) + +var _ datasource.DataSource = (*instanceDataSource)(nil) + +const errorPrefix = "[Sqlserverflexbeta - Instance]" + +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// dataSourceModel maps the data source schema data. +type dataSourceModel struct { + sqlserverflexbetaGen.InstanceModel + TerraformID types.String `tfsdk:"id"` +} + +type instanceDataSource struct { + client *v3beta1api.APIClient + providerData core.ProviderData +} + +func (d *instanceDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_instance" +} + +func (d *instanceDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexbetaGen.InstanceDataSourceSchema(ctx) +} + +// Configure adds the provider configured client to the data source. +func (d *instanceDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(d.providerData.RoundTripper), + utils.UserAgentConfigOption(d.providerData.Version), + } + if d.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(d.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithRegion(d.providerData.GetRegion()), + ) + } + apiClient, err := v3beta1api.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + d.client = apiClient + tflog.Info(ctx, fmt.Sprintf("%s client configured", errorPrefix)) +} + +func (d *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dataSourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := d.providerData.GetRegionWithOverride(data.Region) + instanceId := data.InstanceId.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := d.client.DefaultAPI.GetInstanceRequest(ctx, projectId, region, instanceId).Execute() + if err != nil { + utils.LogError( + ctx, + &resp.Diagnostics, + err, + "Reading instance", + fmt.Sprintf("instance with ID %q does not exist in project %q.", instanceId, projectId), + map[int]string{ + http.StatusForbidden: fmt.Sprintf("Project with ID %q not found or forbidden access", projectId), + }, + ) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + + err = mapDataResponseToModel(ctx, instanceResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + fmt.Sprintf("%s Read", errorPrefix), + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/stackit/internal/services/sqlserverflexbeta/instance/functions.go b/stackit/internal/services/sqlserverflexbeta/instance/functions.go new file mode 100644 index 00000000..55a81a3b --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/instance/functions.go @@ -0,0 +1,270 @@ +package sqlserverflexbeta + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + sqlserverflexbetaDataGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance/datasources_gen" + sqlserverflexbetaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance/resources_gen" +) + +func mapResponseToModel( + ctx context.Context, + resp *v3beta1api.GetInstanceResponse, + m *sqlserverflexbetaResGen.InstanceModel, + tfDiags diag.Diagnostics, +) error { + m.BackupSchedule = types.StringValue(resp.GetBackupSchedule()) + m.Edition = types.StringValue(string(resp.GetEdition())) + m.Encryption = handleEncryption(ctx, m, resp) + m.FlavorId = types.StringValue(resp.GetFlavorId()) + m.Id = types.StringValue(resp.GetId()) + m.InstanceId = types.StringValue(resp.GetId()) + m.IsDeletable = types.BoolValue(resp.GetIsDeletable()) + m.Name = types.StringValue(resp.GetName()) + netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl()) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf( + "error converting network acl response value", + ) + } + net, diags := sqlserverflexbetaResGen.NewNetworkValue( + sqlserverflexbetaResGen.NetworkValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "access_scope": types.StringValue(string(resp.Network.GetAccessScope())), + "acl": netAcl, + "instance_address": types.StringValue(resp.Network.GetInstanceAddress()), + "router_address": types.StringValue(resp.Network.GetRouterAddress()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return errors.New("error converting network response value") + } + m.Network = net + m.Replicas = types.Int64Value(int64(resp.GetReplicas())) + m.RetentionDays = types.Int64Value(int64(resp.GetRetentionDays())) + m.Status = types.StringValue(string(resp.GetStatus())) + + stor, diags := sqlserverflexbetaResGen.NewStorageValue( + sqlserverflexbetaResGen.StorageValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "class": types.StringValue(resp.Storage.GetClass()), + "size": types.Int64Value(resp.Storage.GetSize()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf("error converting storage response value") + } + m.Storage = stor + + m.Version = types.StringValue(string(resp.GetVersion())) + return nil +} + +func mapDataResponseToModel( + ctx context.Context, + resp *v3beta1api.GetInstanceResponse, + m *dataSourceModel, + tfDiags diag.Diagnostics, +) error { + m.BackupSchedule = types.StringValue(resp.GetBackupSchedule()) + m.Edition = types.StringValue(string(resp.GetEdition())) + m.Encryption = handleDSEncryption(ctx, m, resp) + m.FlavorId = types.StringValue(resp.GetFlavorId()) + m.Id = types.StringValue(resp.GetId()) + m.InstanceId = types.StringValue(resp.GetId()) + m.IsDeletable = types.BoolValue(resp.GetIsDeletable()) + m.Name = types.StringValue(resp.GetName()) + netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl()) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf( + "error converting network acl response value", + ) + } + net, diags := sqlserverflexbetaDataGen.NewNetworkValue( + sqlserverflexbetaDataGen.NetworkValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "access_scope": types.StringValue(string(resp.Network.GetAccessScope())), + "acl": netAcl, + "instance_address": types.StringValue(resp.Network.GetInstanceAddress()), + "router_address": types.StringValue(resp.Network.GetRouterAddress()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return errors.New("error converting network response value") + } + m.Network = net + m.Replicas = types.Int64Value(int64(resp.GetReplicas())) + m.RetentionDays = types.Int64Value(int64(resp.GetRetentionDays())) + m.Status = types.StringValue(string(resp.GetStatus())) + + stor, diags := sqlserverflexbetaDataGen.NewStorageValue( + sqlserverflexbetaDataGen.StorageValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "class": types.StringValue(resp.Storage.GetClass()), + "size": types.Int64Value(resp.Storage.GetSize()), + }, + ) + tfDiags.Append(diags...) + if diags.HasError() { + return fmt.Errorf("error converting storage response value") + } + m.Storage = stor + + m.Version = types.StringValue(string(resp.GetVersion())) + return nil +} + +func handleEncryption( + ctx context.Context, + m *sqlserverflexbetaResGen.InstanceModel, + resp *v3beta1api.GetInstanceResponse, +) sqlserverflexbetaResGen.EncryptionValue { + if !resp.HasEncryption() || + resp.Encryption == nil || + resp.Encryption.KekKeyId == "" || + resp.Encryption.KekKeyRingId == "" || + resp.Encryption.KekKeyVersion == "" || + resp.Encryption.ServiceAccount == "" { + if m.Encryption.IsNull() || m.Encryption.IsUnknown() { + return sqlserverflexbetaResGen.NewEncryptionValueNull() + } + return m.Encryption + } + + enc := sqlserverflexbetaResGen.NewEncryptionValueMust( + sqlserverflexbetaResGen.EncryptionValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "kek_key_id": types.StringValue(resp.Encryption.GetKekKeyId()), + "kek_key_ring_id": types.StringValue(resp.Encryption.GetKekKeyRingId()), + "kek_key_version": types.StringValue(resp.Encryption.GetKekKeyVersion()), + "service_account": types.StringValue(resp.Encryption.GetServiceAccount()), + }, + ) + return enc +} + +func handleDSEncryption( + ctx context.Context, + m *dataSourceModel, + resp *v3beta1api.GetInstanceResponse, +) sqlserverflexbetaDataGen.EncryptionValue { + if !resp.HasEncryption() || + resp.Encryption == nil || + resp.Encryption.KekKeyId == "" || + resp.Encryption.KekKeyRingId == "" || + resp.Encryption.KekKeyVersion == "" || + resp.Encryption.ServiceAccount == "" { + if m.Encryption.IsNull() || m.Encryption.IsUnknown() { + return sqlserverflexbetaDataGen.NewEncryptionValueNull() + } + return m.Encryption + } + + enc := sqlserverflexbetaDataGen.NewEncryptionValueMust( + sqlserverflexbetaDataGen.EncryptionValue{}.AttributeTypes(ctx), + map[string]attr.Value{ + "kek_key_id": types.StringValue(resp.Encryption.GetKekKeyId()), + "kek_key_ring_id": types.StringValue(resp.Encryption.GetKekKeyRingId()), + "kek_key_version": types.StringValue(resp.Encryption.GetKekKeyVersion()), + "service_account": types.StringValue(resp.Encryption.GetServiceAccount()), + }, + ) + return enc +} + +func toCreatePayload( + ctx context.Context, + model *sqlserverflexbetaResGen.InstanceModel, +) (*v3beta1api.CreateInstanceRequestPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + storagePayload := v3beta1api.StorageCreate{} + if !model.Storage.IsNull() && !model.Storage.IsUnknown() { + storagePayload.Class = model.Storage.Class.ValueString() + storagePayload.Size = model.Storage.Size.ValueInt64() + } + + var encryptionPayload *v3beta1api.InstanceEncryption = nil + if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() { + encryptionPayload = &v3beta1api.InstanceEncryption{} + encryptionPayload.KekKeyId = model.Encryption.KekKeyId.ValueString() + encryptionPayload.KekKeyRingId = model.Encryption.KekKeyRingId.ValueString() + encryptionPayload.KekKeyVersion = model.Encryption.KekKeyVersion.ValueString() + encryptionPayload.ServiceAccount = model.Encryption.ServiceAccount.ValueString() + } + + networkPayload := v3beta1api.CreateInstanceRequestPayloadNetwork{} + if !model.Network.IsNull() && !model.Network.IsUnknown() { + accScope := v3beta1api.InstanceNetworkAccessScope( + model.Network.AccessScope.ValueString(), + ) + networkPayload.AccessScope = &accScope + + var resList []string + diags := model.Network.Acl.ElementsAs(ctx, &resList, false) + if diags.HasError() { + return nil, fmt.Errorf("error converting network acl list") + } + networkPayload.Acl = resList + } + + return &v3beta1api.CreateInstanceRequestPayload{ + BackupSchedule: model.BackupSchedule.ValueString(), + Encryption: encryptionPayload, + FlavorId: model.FlavorId.ValueString(), + Name: model.Name.ValueString(), + Network: networkPayload, + RetentionDays: int32(model.RetentionDays.ValueInt64()), + Storage: storagePayload, + Version: v3beta1api.InstanceVersion(model.Version.ValueString()), + }, nil +} + +func toUpdatePayload( + ctx context.Context, + m *sqlserverflexbetaResGen.InstanceModel, + resp *resource.UpdateResponse, +) (*v3beta1api.UpdateInstanceRequestPayload, error) { + if m == nil { + return nil, fmt.Errorf("nil model") + } + if m.Replicas.ValueInt64() > math.MaxUint32 { + return nil, fmt.Errorf("replicas value is too big for uint32") + } + replVal := v3beta1api.Replicas(uint32(m.Replicas.ValueInt64())) // nolint:gosec // check is performed above + + var netAcl []string + diags := m.Network.Acl.ElementsAs(ctx, &netAcl, false) + resp.Diagnostics.Append(diags...) + if diags.HasError() { + return nil, fmt.Errorf("error converting model network acl value") + } + return &v3beta1api.UpdateInstanceRequestPayload{ + BackupSchedule: m.BackupSchedule.ValueString(), + FlavorId: m.FlavorId.ValueString(), + Name: m.Name.ValueString(), + Network: v3beta1api.UpdateInstanceRequestPayloadNetwork{ + Acl: netAcl, + }, + Replicas: replVal, + RetentionDays: int32(m.RetentionDays.ValueInt64()), + Storage: v3beta1api.StorageUpdate{Size: m.Storage.Size.ValueInt64Pointer()}, + Version: v3beta1api.InstanceVersion(m.Version.ValueString()), + }, nil +} diff --git a/stackit/internal/services/sqlserverflexbeta/instance/functions_test.go b/stackit/internal/services/sqlserverflexbeta/instance/functions_test.go new file mode 100644 index 00000000..e9728b80 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/instance/functions_test.go @@ -0,0 +1,283 @@ +package sqlserverflexbeta + +import ( + "context" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + sqlserverflexbetaPkgGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" + sqlserverflexbetaRs "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance/resources_gen" +) + +func Test_handleDSEncryption(t *testing.T) { + type args struct { + m *dataSourceModel + resp *sqlserverflexbetaPkgGen.GetInstanceResponse + } + tests := []struct { + name string + args args + want sqlserverflexbetaRs.EncryptionValue + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + if got := handleDSEncryption(t.Context(), tt.args.m, tt.args.resp); !reflect.DeepEqual(got, tt.want) { + t.Errorf("handleDSEncryption() = %v, want %v", got, tt.want) + } + }, + ) + } +} + +func Test_handleEncryption(t *testing.T) { + type args struct { + m *sqlserverflexbetaRs.InstanceModel + resp *sqlserverflexbetaPkgGen.GetInstanceResponse + } + tests := []struct { + name string + args args + want sqlserverflexbetaRs.EncryptionValue + }{ + { + name: "nil response", + args: args{ + m: &sqlserverflexbetaRs.InstanceModel{}, + resp: &sqlserverflexbetaPkgGen.GetInstanceResponse{}, + }, + want: sqlserverflexbetaRs.EncryptionValue{}, + }, + { + name: "nil response", + args: args{ + m: &sqlserverflexbetaRs.InstanceModel{}, + resp: &sqlserverflexbetaPkgGen.GetInstanceResponse{ + Encryption: &sqlserverflexbetaPkgGen.InstanceEncryption{}, + }, + }, + want: sqlserverflexbetaRs.NewEncryptionValueNull(), + }, + { + name: "response with values", + args: args{ + m: &sqlserverflexbetaRs.InstanceModel{}, + resp: &sqlserverflexbetaPkgGen.GetInstanceResponse{ + Encryption: &sqlserverflexbetaPkgGen.InstanceEncryption{ + KekKeyId: utils.Ptr("kek_key_id"), + KekKeyRingId: utils.Ptr("kek_key_ring_id"), + KekKeyVersion: utils.Ptr("kek_key_version"), + ServiceAccount: utils.Ptr("kek_svc_acc"), + }, + }, + }, + want: sqlserverflexbetaRs.NewEncryptionValueMust( + sqlserverflexbetaRs.EncryptionValue{}.AttributeTypes(context.TODO()), + map[string]attr.Value{ + "kek_key_id": types.StringValue("kek_key_id"), + "kek_key_ring_id": types.StringValue("kek_key_ring_id"), + "kek_key_version": types.StringValue("kek_key_version"), + "service_account": types.StringValue("kek_svc_acc"), + }, + ), + }, + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got := handleEncryption(t.Context(), tt.args.m, tt.args.resp) + + diff := cmp.Diff(tt.want, got) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + + //if !reflect.DeepEqual(got, tt.want) { + // t.Errorf("handleEncryption() = %v, want %v", got, tt.want) + //} + }, + ) + } +} + +func Test_mapDataResponseToModel(t *testing.T) { + type args struct { + ctx context.Context + resp *sqlserverflexbetaPkgGen.GetInstanceResponse + m *dataSourceModel + tfDiags diag.Diagnostics + } + tests := []struct { + name string + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + if err := mapDataResponseToModel( + tt.args.ctx, + tt.args.resp, + tt.args.m, + tt.args.tfDiags, + ); (err != nil) != tt.wantErr { + t.Errorf("mapDataResponseToModel() error = %v, wantErr %v", err, tt.wantErr) + } + }, + ) + } +} + +func Test_mapResponseToModel(t *testing.T) { + type args struct { + ctx context.Context + resp *sqlserverflexbetaPkgGen.GetInstanceResponse + m *sqlserverflexbetaRs.InstanceModel + tfDiags diag.Diagnostics + } + tests := []struct { + name string + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + if err := mapResponseToModel( + tt.args.ctx, + tt.args.resp, + tt.args.m, + tt.args.tfDiags, + ); (err != nil) != tt.wantErr { + t.Errorf("mapResponseToModel() error = %v, wantErr %v", err, tt.wantErr) + } + }, + ) + } +} + +func Test_toCreatePayload(t *testing.T) { + type args struct { + ctx context.Context + model *sqlserverflexbetaRs.InstanceModel + } + tests := []struct { + name string + args args + want *sqlserverflexbetaPkgGen.CreateInstanceRequestPayload + wantErr bool + }{ + { + name: "simple", + args: args{ + ctx: context.Background(), + model: &sqlserverflexbetaRs.InstanceModel{ + Encryption: sqlserverflexbetaRs.NewEncryptionValueMust( + sqlserverflexbetaRs.EncryptionValue{}.AttributeTypes(context.Background()), + map[string]attr.Value{ + "kek_key_id": types.StringValue("kek_key_id"), + "kek_key_ring_id": types.StringValue("kek_key_ring_id"), + "kek_key_version": types.StringValue("kek_key_version"), + "service_account": types.StringValue("sacc"), + }, + ), + Storage: sqlserverflexbetaRs.StorageValue{}, + }, + }, + want: &sqlserverflexbetaPkgGen.CreateInstanceRequestPayload{ + BackupSchedule: nil, + Encryption: &sqlserverflexbetaPkgGen.InstanceEncryption{ + KekKeyId: utils.Ptr("kek_key_id"), + KekKeyRingId: utils.Ptr("kek_key_ring_id"), + KekKeyVersion: utils.Ptr("kek_key_version"), + ServiceAccount: utils.Ptr("sacc"), + }, + FlavorId: nil, + Name: nil, + Network: &sqlserverflexbetaPkgGen.CreateInstanceRequestPayloadNetwork{}, + RetentionDays: nil, + Storage: &sqlserverflexbetaPkgGen.CreateInstanceRequestPayloadGetStorageArgType{}, + Version: nil, + }, + wantErr: false, + }, + { + name: "nil object", + args: args{ + ctx: context.Background(), + model: &sqlserverflexbetaRs.InstanceModel{ + Encryption: sqlserverflexbetaRs.NewEncryptionValueNull(), + Storage: sqlserverflexbetaRs.StorageValue{}, + }, + }, + want: &sqlserverflexbetaPkgGen.CreateInstanceRequestPayload{ + BackupSchedule: nil, + Encryption: nil, + FlavorId: nil, + Name: nil, + Network: &sqlserverflexbetaPkgGen.CreateInstanceRequestPayloadNetwork{}, + RetentionDays: nil, + Storage: &sqlserverflexbetaPkgGen.CreateInstanceRequestPayloadGetStorageArgType{}, + Version: nil, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got, err := toCreatePayload(tt.args.ctx, tt.args.model) + if (err != nil) != tt.wantErr { + t.Errorf("toCreatePayload() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("model mismatch (-want +got):\n%s", diff) + } + }, + ) + } +} + +func Test_toUpdatePayload(t *testing.T) { + type args struct { + ctx context.Context + m *sqlserverflexbetaRs.InstanceModel + resp *resource.UpdateResponse + } + tests := []struct { + name string + args args + want *sqlserverflexbetaPkgGen.UpdateInstanceRequestPayload + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + got, err := toUpdatePayload(tt.args.ctx, tt.args.m, tt.args.resp) + if (err != nil) != tt.wantErr { + t.Errorf("toUpdatePayload() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("toUpdatePayload() got = %v, want %v", got, tt.want) + } + }, + ) + } +} diff --git a/stackit/internal/services/sqlserverflexbeta/instance/planModifiers.yaml b/stackit/internal/services/sqlserverflexbeta/instance/planModifiers.yaml new file mode 100644 index 00000000..71d4cbe4 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/instance/planModifiers.yaml @@ -0,0 +1,124 @@ +fields: + - name: 'id' + modifiers: + - 'UseStateForUnknown' + + - name: 'instance_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + + - name: 'project_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'name' + modifiers: + - 'UseStateForUnknown' + + - name: 'backup_schedule' + modifiers: + - 'UseStateForUnknown' + + - name: 'encryption.kek_key_id' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'encryption.kek_key_version' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'encryption.kek_key_ring_id' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'encryption.service_account' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'network.access_scope' + validators: + - validate.NoSeparator + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'network.acl' + modifiers: + - 'UseStateForUnknown' + + - name: 'network.instance_address' + modifiers: + - 'UseStateForUnknown' + + - name: 'network.router_address' + modifiers: + - 'UseStateForUnknown' + + - name: 'status' + modifiers: + - 'UseStateForUnknown' + + - name: 'region' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'retention_days' + modifiers: + - 'UseStateForUnknown' + + - name: 'edition' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'version' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'replicas' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'storage' + modifiers: + - 'UseStateForUnknown' + + - name: 'storage.class' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'storage.size' + modifiers: + - 'UseStateForUnknown' + + - name: 'flavor_id' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'is_deletable' + modifiers: + - 'UseStateForUnknown' diff --git a/stackit/internal/services/sqlserverflexbeta/instance/resource.go b/stackit/internal/services/sqlserverflexbeta/instance/resource.go new file mode 100644 index 00000000..a824aabf --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/instance/resource.go @@ -0,0 +1,546 @@ +package sqlserverflexbeta + +import ( + "context" + _ "embed" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + wait "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/sqlserverflexbeta" + + "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexbetaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance/resources_gen" +) + +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} + _ resource.ResourceWithModifyPlan = &instanceResource{} + _ resource.ResourceWithIdentity = &instanceResource{} +) + +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +type instanceResource struct { + client *v3beta1api.APIClient + providerData core.ProviderData +} + +// resourceModel describes the resource data model. +type resourceModel = sqlserverflexbetaResGen.InstanceModel + +type InstanceResourceIdentityModel struct { + ProjectID types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + InstanceID types.String `tfsdk:"instance_id"` +} + +func (r *instanceResource) Metadata( + _ context.Context, + req resource.MetadataRequest, + resp *resource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_instance" +} + +//go:embed planModifiers.yaml +var modifiersFileByte []byte + +func (r *instanceResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + s := sqlserverflexbetaResGen.InstanceResourceSchema(ctx) + + fields, err := utils.ReadModifiersConfig(modifiersFileByte) + if err != nil { + resp.Diagnostics.AddError("error during read modifiers config file", err.Error()) + return + } + + err = utils.AddPlanModifiersToResourceSchema(fields, &s) + if err != nil { + resp.Diagnostics.AddError("error adding plan modifiers", err.Error()) + return + } + resp.Schema = s +} + +func (r *instanceResource) IdentitySchema( + _ context.Context, + _ resource.IdentitySchemaRequest, + resp *resource.IdentitySchemaResponse, +) { + resp.IdentitySchema = identityschema.Schema{ + Attributes: map[string]identityschema.Attribute{ + "project_id": identityschema.StringAttribute{ + RequiredForImport: true, // must be set during import by the practitioner + }, + "region": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "instance_id": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + }, + } +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure( + ctx context.Context, + req resource.ConfigureRequest, + resp *resource.ConfigureResponse, +) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(r.providerData.RoundTripper), + utils.UserAgentConfigOption(r.providerData.Version), + } + if r.providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(r.providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append(apiClientConfigOptions, config.WithRegion(r.providerData.GetRegion())) + } + apiClient, err := v3beta1api.NewAPIClient(apiClientConfigOptions...) + if err != nil { + resp.Diagnostics.AddError( + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return + } + r.client = apiClient + tflog.Info(ctx, "sqlserverflexbeta.Instance client configured") +} + +// ModifyPlan implements resource.ResourceWithModifyPlan. +// Use the modifier to set the effective region in the current plan. +func (r *instanceResource) ModifyPlan( + ctx context.Context, + req resource.ModifyPlanRequest, + resp *resource.ModifyPlanResponse, +) { // nolint:gocritic // function signature required by Terraform + // skip initial empty configuration to avoid follow-up errors + if req.Config.Raw.IsNull() { + return + } + var configModel resourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...) + if resp.Diagnostics.HasError() { + return + } + + if req.Plan.Raw.IsNull() { + return + } + var planModel resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) + if resp.Diagnostics.HasError() { + return + } + + utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data resourceModel + crateErr := "[SQL Server Flex BETA - Create] error" + + // Read Terraform plan data into the model + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + // Generate API request body from model + payload, err := toCreatePayload(ctx, &data) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + fmt.Sprintf("Creating API payload: %v", err), + ) + return + } + + // Create new Instance + createResp, err := r.client.DefaultAPI.CreateInstanceRequest( + ctx, + projectId, + region, + ).CreateInstanceRequestPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, crateErr, fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + instanceId := createResp.Id + data.InstanceId = types.StringValue(instanceId) + + identity := InstanceResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + waitResp, err := wait.CreateInstanceWaitHandler( + ctx, + r.client.DefaultAPI, + projectId, + instanceId, + region, + ).SetSleepBeforeWait( + 10 * time.Second, + ).SetTimeout( + 90 * time.Minute, + ).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + fmt.Sprintf("Instance creation waiting: %v", err), + ) + return + } + + if waitResp.Id == "" { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + "Instance creation waiting: returned id is nil", + ) + return + } + + // Map response body to schema + err = mapResponseToModel(ctx, waitResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + crateErr, + fmt.Sprintf("processing API payload: %v", err), + ) + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + + tflog.Info(ctx, "sqlserverflexbeta.Instance created") +} + +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data resourceModel + + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + instanceId := data.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + instanceResp, err := r.client.DefaultAPI.GetInstanceRequest(ctx, projectId, region, instanceId).Execute() + if err != nil { + oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", err.Error()) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema + err = mapResponseToModel(ctx, instanceResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading instance", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Save identity into Terraform state + identity := InstanceResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Save updated data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "sqlserverflexbeta.Instance read") +} + +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var data resourceModel + updateInstanceError := "Error updating instance" + + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := data.ProjectId.ValueString() + region := data.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + instanceId := data.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Generate API request body from model + payload, err := toUpdatePayload(ctx, &data, resp) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + updateInstanceError, + fmt.Sprintf("Creating API payload: %v", err), + ) + return + } + // Update existing instance + err = r.client.DefaultAPI.UpdateInstanceRequest( + ctx, + projectId, + region, + instanceId, + ).UpdateInstanceRequestPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, updateInstanceError, err.Error()) + return + } + + ctx = core.LogResponse(ctx) + + waitResp, err := wait. + UpdateInstanceWaitHandler(ctx, r.client.DefaultAPI, projectId, instanceId, region). + SetSleepBeforeWait(15 * time.Second). + SetTimeout(45 * time.Minute). + WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + updateInstanceError, + fmt.Sprintf("Instance update waiting: %v", err), + ) + return + } + + // Map response body to schema + err = mapResponseToModel(ctx, waitResp, &data, resp.Diagnostics) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + updateInstanceError, + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + identity := InstanceResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Save updated data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "sqlserverflexbeta.Instance updated") +} + +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data resourceModel + + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // Read identity data + var identityData InstanceResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "region", region) + + instanceId := identityData.InstanceID.ValueString() + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DefaultAPI.DeleteInstanceRequest(ctx, projectId, region, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + delResp, err := wait.DeleteInstanceWaitHandler(ctx, r.client.DefaultAPI, projectId, instanceId, region).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error deleting instance", + fmt.Sprintf("Instance deletion waiting: %v", err), + ) + return + } + + if delResp != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error deleting instance", + "wait handler returned non nil result", + ) + return + } + + resp.State.RemoveResource(ctx) + + tflog.Info(ctx, "sqlserverflexbeta.Instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *instanceResource) ImportState( + ctx context.Context, + req resource.ImportStateRequest, + resp *resource.ImportStateResponse, +) { + ctx = core.InitProviderContext(ctx) + + if req.ID != "" { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + core.LogAndAddError( + ctx, &resp.Diagnostics, + "Error importing instance", + fmt.Sprintf( + "Expected import identifier with format [project_id],[region],[instance_id] Got: %q", + req.ID, + ), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[2])...) + return + } + + // If no ID is provided, attempt to read identity attributes from the import configuration + var identityData InstanceResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + instanceId := identityData.InstanceID.ValueString() + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), region)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), instanceId)...) + + tflog.Info(ctx, "Sqlserverflexbeta instance state imported") +} diff --git a/stackit/internal/services/sqlserverflexbeta/sqlserverflex_acc_test.go b/stackit/internal/services/sqlserverflexbeta/sqlserverflex_acc_test.go new file mode 100644 index 00000000..246623ea --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/sqlserverflex_acc_test.go @@ -0,0 +1,448 @@ +package sqlserverflexbeta_test + +import ( + "context" + _ "embed" + "fmt" + "os" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/internal/testutils" + sqlserverflexbeta "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance" + + // The fwresource import alias is so there is no collision + // with the more typical acceptance testing import: + // "github.com/hashicorp/terraform-plugin-testing/helper/resource" + fwresource "github.com/hashicorp/terraform-plugin-framework/resource" +) + +const providerPrefix = "stackitprivatepreview_sqlserverflexbeta" + +func TestInstanceResourceSchema(t *testing.T) { + t.Parallel() + + ctx := context.Background() + schemaRequest := fwresource.SchemaRequest{} + schemaResponse := &fwresource.SchemaResponse{} + + // Instantiate the resource.Resource and call its Schema method + sqlserverflexbeta.NewInstanceResource().Schema(ctx, schemaRequest, schemaResponse) + + if schemaResponse.Diagnostics.HasError() { + t.Fatalf("Schema method diagnostics: %+v", schemaResponse.Diagnostics) + } + + // Validate the schema + diagnostics := schemaResponse.Schema.ValidateImplementation(ctx) + + if diagnostics.HasError() { + t.Fatalf("Schema validation diagnostics: %+v", diagnostics) + } +} + +func TestMain(m *testing.M) { + testutils.Setup() + code := m.Run() + // shutdown() + os.Exit(code) +} + +func testAccPreCheck(t *testing.T) { + if _, ok := os.LookupEnv("TF_ACC_PROJECT_ID"); !ok { + t.Fatalf("could not find env var TF_ACC_PROJECT_ID") + } +} + +type resData struct { + ServiceAccountFilePath string + ProjectId string + Region string + Name string + TfName string + FlavorId string + BackupSchedule string + UseEncryption bool + KekKeyId string + KekKeyRingId string + KekKeyVersion uint8 + KekServiceAccount string + PerformanceClass string + Size uint32 + AclString string + AccessScope string + RetentionDays uint32 + Version string + Users []User + Databases []Database +} + +type User struct { + Name string + ProjectId string + Roles []string +} + +type Database struct { + Name string + ProjectId string + Owner string + Collation string + Compatibility string +} + +func resName(res, name string) string { + return fmt.Sprintf("%s_%s.%s", providerPrefix, res, name) +} + +func getExample() resData { + name := acctest.RandomWithPrefix("tf-acc") + return resData{ + Region: os.Getenv("TF_ACC_REGION"), + ServiceAccountFilePath: os.Getenv("TF_ACC_SERVICE_ACCOUNT_FILE"), + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Name: name, + TfName: name, + FlavorId: "4.16-Single", + BackupSchedule: "0 0 * * *", + UseEncryption: false, + RetentionDays: 33, + PerformanceClass: "premium-perf2-stackit", + Size: 10, + AclString: "0.0.0.0/0", + AccessScope: "PUBLIC", + Version: "2022", + } +} + +func TestAccInstance(t *testing.T) { + exData := getExample() + + updNameData := exData + updNameData.Name = "name-updated" + + updSizeData := exData + updSizeData.Size = 25 + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + t.Logf(" ... working on instance %s", exData.TfName) + }, + ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + exData, + ), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName("instance", exData.TfName), "name", exData.Name), + resource.TestCheckResourceAttrSet(resName("instance", exData.TfName), "id"), + // TODO: check all fields + ), + }, + // Update name and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + updNameData, + ), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resName("instance", exData.TfName), "name", updNameData.Name), + ), + }, + // Update size and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + updSizeData, + ), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + testutils.ResStr(providerPrefix, "instance", exData.TfName), + "storage.size", + strconv.Itoa(int(updSizeData.Size)), + ), + ), + }, + { + RefreshState: true, + }, + //// Import test + //{ + // ResourceName: resName("instance", exData.TfName), + // ImportState: true, + // ImportStateVerify: true, + // }, + }, + }) +} + +func TestAccInstanceReApply(t *testing.T) { + exData := getExample() + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + t.Logf(" ... working on instance %s", exData.TfName) + testInstances = append(testInstances, exData.TfName) + }, + ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + exData, + ), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName("instance", exData.TfName), "name", exData.Name), + resource.TestCheckResourceAttrSet(resName("instance", exData.TfName), "id"), + // TODO: check all fields + ), + }, + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + exData, + ), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName("instance", exData.TfName), "name", exData.Name), + resource.TestCheckResourceAttrSet(resName("instance", exData.TfName), "id"), + // TODO: check all fields + ), + }, + { + RefreshState: true, + }, + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + exData, + ), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName("instance", exData.TfName), "name", exData.Name), + resource.TestCheckResourceAttrSet(resName("instance", exData.TfName), "id"), + // TODO: check all fields + ), + }, + // Import test + { + ResourceName: resName("instance", exData.TfName), + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportState: true, + // ImportStateVerify is not supported with plannable import blocks + // ImportStateVerify: true, + }, + }, + }) +} + +func TestAccInstanceNoEncryption(t *testing.T) { + data := getExample() + + dbName := "testDb" + userName := "testUser" + data.Users = []User{ + { + Name: userName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Roles: []string{ + "##STACKIT_DatabaseManager##", + "##STACKIT_LoginManager##", + "##STACKIT_ProcessManager##", + "##STACKIT_SQLAgentManager##", + "##STACKIT_SQLAgentUser##", + "##STACKIT_ServerManager##", + }, + }, + } + data.Databases = []Database{ + { + Name: dbName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Owner: userName, + }, + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + t.Logf(" ... working on instance %s", data.TfName) + testInstances = append(testInstances, data.TfName) + }, + ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + data, + ), + Check: resource.ComposeAggregateTestCheckFunc( + // check instance values are set + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "backup_schedule"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "edition"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "flavor_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "instance_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "is_deletable"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "name"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "replicas"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "retention_days"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "status"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "version"), + + resource.TestCheckNoResourceAttr(resName("instance", data.TfName), "encryption"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_version"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_ring_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.service_account"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.access_scope"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.acl"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.instance_address"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.router_address"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.class"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.size"), + + // check instance values are correct + resource.TestCheckResourceAttr(resName("instance", data.TfName), "name", data.Name), + + // check user values are set + resource.TestCheckResourceAttrSet(resName("user", userName), "id"), + resource.TestCheckResourceAttrSet(resName("user", userName), "username"), + // resource.TestCheckResourceAttrSet(resName("user", userName), "roles"), + + // func(s *terraform.State) error { + // return nil + // }, + + // check user values are correct + resource.TestCheckResourceAttr(resName("user", userName), "username", userName), + resource.TestCheckResourceAttr(resName("user", userName), "roles.#", strconv.Itoa(len(data.Users[0].Roles))), + + // check database values are set + resource.TestCheckResourceAttrSet(resName("database", dbName), "id"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "name"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "owner"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "compatibility"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "collation"), + + // check database values are correct + resource.TestCheckResourceAttr(resName("database", dbName), "name", dbName), + resource.TestCheckResourceAttr(resName("database", dbName), "owner", userName), + ), + }, + }, + }) +} + +func TestAccInstanceEncryption(t *testing.T) { + data := getExample() + + dbName := "testDb" + userName := "testUser" + data.Users = []User{ + { + Name: userName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Roles: []string{"##STACKIT_DatabaseManager##", "##STACKIT_LoginManager##"}, + }, + } + data.Databases = []Database{ + { + Name: dbName, + ProjectId: os.Getenv("TF_ACC_PROJECT_ID"), + Owner: userName, + }, + } + + data.UseEncryption = true + data.KekKeyId = "fe039bcf-8d7b-431a-801d-9e81371a6b7b" + data.KekKeyRingId = "6a2d95ab-3c4c-4963-a2bb-08d17a320e27" + data.KekKeyVersion = 1 + data.KekServiceAccount = "henselinm-u2v3ex1@sa.stackit.cloud" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + t.Logf(" ... working on instance %s", data.TfName) + testInstances = append(testInstances, data.TfName) + }, + ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Create and verify + { + Config: testutils.StringFromTemplateMust( + "testdata/instance_template.gompl", + data, + ), + Check: resource.ComposeAggregateTestCheckFunc( + // check instance values are set + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "backup_schedule"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "edition"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "flavor_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "instance_id"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "is_deletable"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "name"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "replicas"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "retention_days"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "status"), + resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "version"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_version"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.kek_key_ring_id"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "encryption.service_account"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.access_scope"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.acl"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.instance_address"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "network.router_address"), + + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.class"), + // resource.TestCheckResourceAttrSet(resName("instance", data.TfName), "storage.size"), + + // check instance values are correct + resource.TestCheckResourceAttr(resName("instance", data.TfName), "name", data.Name), + + // check user values are set + resource.TestCheckResourceAttrSet(resName("user", userName), "id"), + resource.TestCheckResourceAttrSet(resName("user", userName), "username"), + + // func(s *terraform.State) error { + // return nil + // }, + + // check user values are correct + resource.TestCheckResourceAttr(resName("user", userName), "username", userName), + resource.TestCheckResourceAttr(resName("user", userName), "roles.#", "2"), + + // check database values are set + resource.TestCheckResourceAttrSet(resName("database", dbName), "id"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "name"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "owner"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "compatibility"), + resource.TestCheckResourceAttrSet(resName("database", dbName), "collation"), + + // check database values are correct + resource.TestCheckResourceAttr(resName("database", dbName), "name", dbName), + resource.TestCheckResourceAttr(resName("database", dbName), "owner", userName), + ), + }, + }, + }) +} diff --git a/stackit/internal/services/sqlserverflexbeta/testdata/instance_template.gompl b/stackit/internal/services/sqlserverflexbeta/testdata/instance_template.gompl new file mode 100644 index 00000000..e71f3fa0 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/testdata/instance_template.gompl @@ -0,0 +1,60 @@ +provider "stackitprivatepreview" { + default_region = "{{ .Region }}" + service_account_key_path = "{{ .ServiceAccountFilePath }}" +} + +resource "stackitprivatepreview_sqlserverflexbeta_instance" "{{ .TfName }}" { + project_id = "{{ .ProjectId }}" + name = "{{ .Name }}" + backup_schedule = "{{ .BackupSchedule }}" + retention_days = {{ .RetentionDays }} + flavor_id = "{{ .FlavorId }}" + storage = { + class = "{{ .PerformanceClass }}" + size = {{ .Size }} + } +{{ if .UseEncryption }} + encryption = { + kek_key_id = "{{ .KekKeyId }}" + kek_key_ring_id = "{{ .KekKeyRingId }}" + kek_key_version = {{ .KekKeyVersion }} + service_account = "{{ .KekServiceAccount }}" + } +{{ end }} + network = { + acl = ["{{ .AclString }}"] + access_scope = "{{ .AccessScope }}" + } + version = "{{ .Version }}" +} + +{{ if .Users }} +{{ $tfName := .TfName }} +{{ range $user := .Users }} +resource "stackitprivatepreview_sqlserverflexbeta_user" "{{ $user.Name }}" { + project_id = "{{ $user.ProjectId }}" + instance_id = stackitprivatepreview_sqlserverflexbeta_instance.{{ $tfName }}.instance_id + username = "{{ $user.Name }}" + roles = [{{ range $i, $v := $user.Roles }}{{if $i}},{{end}}"{{$v}}"{{end}}] +} +{{ end }} +{{ end }} + +{{ if .Databases }} +{{ $tfName := .TfName }} +{{ range $db := .Databases }} +resource "stackitprivatepreview_sqlserverflexbeta_database" "{{ $db.Name }}" { + depends_on = [stackitprivatepreview_sqlserverflexbeta_user.{{ $db.Owner }}] + project_id = "{{ $db.ProjectId }}" + instance_id = stackitprivatepreview_sqlserverflexbeta_instance.{{ $tfName }}.instance_id + name = "{{ $db.Name }}" + owner = "{{ $db.Owner }}" +{{ if $db.Collation }} + collation = "{{ $db.Collation }}" +{{ end }} +{{ if $db.Compatibility }} + compatibility = "{{ $db.Compatibility }}" +{{ end }} +} +{{ end }} +{{ end }} diff --git a/stackit/internal/services/sqlserverflexbeta/user/datasource.go b/stackit/internal/services/sqlserverflexbeta/user/datasource.go new file mode 100644 index 00000000..a1cd5f96 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/user/datasource.go @@ -0,0 +1,139 @@ +package sqlserverflexbeta + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/utils" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + sqlserverflexbetaGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/user/datasources_gen" +) + +var _ datasource.DataSource = (*userDataSource)(nil) + +func NewUserDataSource() datasource.DataSource { + return &userDataSource{} +} + +type dataSourceModel struct { + DefaultDatabase types.String `tfsdk:"default_database"` + Host types.String `tfsdk:"host"` + Id types.String `tfsdk:"id"` + InstanceId types.String `tfsdk:"instance_id"` + Port types.Int64 `tfsdk:"port"` + ProjectId types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + Roles types.List `tfsdk:"roles"` + Status types.String `tfsdk:"status"` + UserId types.Int64 `tfsdk:"user_id"` + Username types.String `tfsdk:"username"` +} + +type userDataSource struct { + client *v3beta1api.APIClient + providerData core.ProviderData +} + +func (d *userDataSource) Metadata( + _ context.Context, + req datasource.MetadataRequest, + resp *datasource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_user" +} + +func (d *userDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = sqlserverflexbetaGen.UserDataSourceSchema(ctx) +} + +// Configure adds the provider configured client to the data source. +func (d *userDataSource) Configure( + ctx context.Context, + req datasource.ConfigureRequest, + resp *datasource.ConfigureResponse, +) { + var ok bool + d.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClient := sqlserverflexUtils.ConfigureClient(ctx, &d.providerData, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + d.client = apiClient + tflog.Info(ctx, "SQL SERVER Flex alpha database client configured") +} + +func (d *userDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var model dataSourceModel + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueInt64() + region := d.providerData.GetRegionWithOverride(model.Region) + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + ctx = tflog.SetField(ctx, "region", region) + + recordSetResp, err := d.client.DefaultAPI.GetUserRequest(ctx, projectId, region, instanceId, userId).Execute() + if err != nil { + utils.LogError( + ctx, + &resp.Diagnostics, + err, + "Reading user", + fmt.Sprintf( + "User with ID %q or instance with ID %q does not exist in project %q.", + userId, + instanceId, + projectId, + ), + map[int]string{ + http.StatusForbidden: fmt.Sprintf("Project with ID %q not found or forbidden access", projectId), + }, + ) + resp.State.RemoveResource(ctx) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema and populate Computed attribute values + err = mapDataSourceFields(recordSetResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQLServer Flex beta instance read") +} diff --git a/stackit/internal/services/sqlserverflexbeta/user/mapper.go b/stackit/internal/services/sqlserverflexbeta/user/mapper.go new file mode 100644 index 00000000..cf4c3284 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/user/mapper.go @@ -0,0 +1,192 @@ +package sqlserverflexbeta + +import ( + "fmt" + "slices" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +// mapDataSourceFields maps the API response to a dataSourceModel. +func mapDataSourceFields(userResp *v3beta1api.GetUserResponse, model *dataSourceModel, region string) error { + if userResp == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp + + // Handle user ID + var userId int64 + if model.UserId.ValueInt64() != 0 { + userId = model.UserId.ValueInt64() + } else if user.Id != 0 { + userId = user.Id + } else { + return fmt.Errorf("user id not present") + } + + // Set main attributes + model.Id = utils.BuildInternalTerraformId( + model.ProjectId.ValueString(), region, model.InstanceId.ValueString(), strconv.FormatInt(userId, 10), + ) + model.UserId = types.Int64Value(userId) + model.Username = types.StringValue(user.Username) + + // Map roles + if user.Roles == nil { + model.Roles = types.List(types.SetNull(types.StringType)) + } else { + var roles []attr.Value + resRoles := user.Roles + slices.Sort(resRoles) + for _, role := range resRoles { + roles = append(roles, types.StringValue(string(role))) + } + rolesSet, diags := types.SetValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = types.List(rolesSet) + } + + // Set remaining attributes + model.Host = types.StringValue(user.Host) + model.Port = types.Int64Value(int64(user.Port)) + model.Region = types.StringValue(region) + model.Status = types.StringValue(user.Status) + model.DefaultDatabase = types.StringValue(user.DefaultDatabase) + + return nil +} + +// mapFields maps the API response to a resourceModel. +func mapFields(userResp *v3beta1api.GetUserResponse, model *resourceModel, region string) error { + if userResp == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp + + // Handle user ID + var userId int64 + if model.UserId.ValueInt64() != 0 { + userId = model.UserId.ValueInt64() + } else if user.Id != 0 { + userId = user.Id + } else { + return fmt.Errorf("user id not present") + } + + // Set main attributes + model.Id = types.Int64Value(userId) + model.UserId = types.Int64Value(userId) + model.Username = types.StringValue(user.Username) + + // Map roles + if userResp.Roles != nil { + resRoles := userResp.Roles + slices.Sort(resRoles) + + var roles []attr.Value + for _, role := range resRoles { + roles = append(roles, types.StringValue(string(role))) + } + + rolesSet, diags := types.ListValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = rolesSet + } + + // Ensure roles is not null + if model.Roles.IsNull() || model.Roles.IsUnknown() { + model.Roles = types.List(types.SetNull(types.StringType)) + } + + // Set connection details + model.Host = types.StringValue(user.Host) + model.Port = types.Int64Value(int64(user.Port)) + model.Region = types.StringValue(region) + return nil +} + +// mapFieldsCreate maps the API response from creating a user to a resourceModel. +func mapFieldsCreate(userResp *v3beta1api.CreateUserResponse, model *resourceModel, region string) error { + if userResp == nil { + return fmt.Errorf("response is nil") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + user := userResp + + userId := user.Id + model.Id = types.Int64Value(userId) + model.UserId = types.Int64Value(userId) + model.Username = types.StringValue(user.Username) + + model.Password = types.StringValue(user.Password) + + if user.Roles != nil { + resRoles := user.Roles + slices.Sort(resRoles) + + var roles []attr.Value + for _, role := range resRoles { + roles = append(roles, types.StringValue(string(role))) + } + rolesList, diags := types.ListValue(types.StringType, roles) + if diags.HasError() { + return fmt.Errorf("failed to map roles: %w", core.DiagsToError(diags)) + } + model.Roles = rolesList + } + + if model.Roles.IsNull() || model.Roles.IsUnknown() { + model.Roles = types.List(types.SetNull(types.StringType)) + } + + model.Password = types.StringValue(user.Password) + model.Uri = types.StringValue(user.Uri) + + model.Host = types.StringValue(user.Host) + model.Port = types.Int64Value(int64(user.Port)) + model.Region = types.StringValue(region) + model.Status = types.StringValue(user.Status) + model.DefaultDatabase = types.StringValue(user.DefaultDatabase) + + return nil +} + +// toCreatePayload converts a resourceModel to an API CreateUserRequestPayload. +func toCreatePayload( + model *resourceModel, + roles []string, +) (*v3beta1api.CreateUserRequestPayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + + pl := v3beta1api.CreateUserRequestPayload{ + Username: model.Username.ValueString(), + Roles: roles, + } + slices.Sort(roles) + if !model.DefaultDatabase.IsNull() || !model.DefaultDatabase.IsUnknown() { + pl.DefaultDatabase = conversion.StringValueToPointer(model.DefaultDatabase) + } + + return &pl, nil +} diff --git a/stackit/internal/services/sqlserverflexbeta/user/mapper_test.go b/stackit/internal/services/sqlserverflexbeta/user/mapper_test.go new file mode 100644 index 00000000..c0e09bda --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/user/mapper_test.go @@ -0,0 +1,527 @@ +package sqlserverflexbeta + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" +) + +func TestMapDataSourceFields(t *testing.T) { + const testRegion = "region" + tests := []struct { + description string + input *sqlserverflexbeta.GetUserResponse + region string + expected dataSourceModel + isValid bool + }{ + { + "default_values", + &sqlserverflexbeta.GetUserResponse{}, + testRegion, + dataSourceModel{ + Id: types.StringValue("pid,region,iid,1"), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetNull(types.StringType)), + Host: types.StringNull(), + Port: types.Int64Null(), + Region: types.StringValue(testRegion), + Status: types.StringNull(), + DefaultDatabase: types.StringNull(), + }, + true, + }, + { + "simple_values", + &sqlserverflexbeta.GetUserResponse{ + Roles: &[]string{ + "role_1", + "role_2", + "", + }, + Username: utils.Ptr("username"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int64(1234)), + Status: utils.Ptr("active"), + DefaultDatabase: utils.Ptr("default_db"), + }, + testRegion, + dataSourceModel{ + Id: types.StringValue("pid,region,iid,1"), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.List( + types.SetValueMust( + types.StringType, []attr.Value{ + types.StringValue(""), + types.StringValue("role_1"), + types.StringValue("role_2"), + }, + ), + ), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + Region: types.StringValue(testRegion), + Status: types.StringValue("active"), + DefaultDatabase: types.StringValue("default_db"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &sqlserverflexbeta.GetUserResponse{ + Id: utils.Ptr(int64(1)), + Roles: &[]string{}, + Username: nil, + Host: nil, + Port: utils.Ptr(int64(2123456789)), + }, + testRegion, + dataSourceModel{ + Id: types.StringValue("pid,region,iid,1"), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetValueMust(types.StringType, []attr.Value{})), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "nil_response", + nil, + testRegion, + dataSourceModel{}, + false, + }, + { + "nil_response_2", + &sqlserverflexbeta.GetUserResponse{}, + testRegion, + dataSourceModel{}, + false, + }, + { + "no_resource_id", + &sqlserverflexbeta.GetUserResponse{}, + testRegion, + dataSourceModel{}, + false, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + state := &dataSourceModel{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + UserId: tt.expected.UserId, + } + err := mapDataSourceFields(tt.input, state, tt.region) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(&tt.expected, state) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} + +func TestMapFieldsCreate(t *testing.T) { + const testRegion = "region" + tests := []struct { + description string + input *sqlserverflexbeta.CreateUserResponse + region string + expected resourceModel + isValid bool + }{ + { + "default_values", + &sqlserverflexbeta.CreateUserResponse{ + Id: utils.Ptr(int64(1)), + Password: utils.Ptr(""), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(1), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetNull(types.StringType)), + Password: types.StringValue(""), + Host: types.StringNull(), + Port: types.Int64Null(), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "simple_values", + &sqlserverflexbeta.CreateUserResponse{ + Id: utils.Ptr(int64(2)), + Roles: &[]string{ + "role_1", + "role_2", + "", + }, + Username: utils.Ptr("username"), + Password: utils.Ptr("password"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int64(1234)), + Status: utils.Ptr("status"), + DefaultDatabase: utils.Ptr("default_db"), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(2), + UserId: types.Int64Value(2), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.List( + types.SetValueMust( + types.StringType, []attr.Value{ + types.StringValue(""), + types.StringValue("role_1"), + types.StringValue("role_2"), + }, + ), + ), + Password: types.StringValue("password"), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + Region: types.StringValue(testRegion), + Status: types.StringValue("status"), + DefaultDatabase: types.StringValue("default_db"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &sqlserverflexbeta.CreateUserResponse{ + Id: utils.Ptr(int64(3)), + Roles: &[]string{}, + Username: nil, + Password: utils.Ptr(""), + Host: nil, + Port: utils.Ptr(int64(2123456789)), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(3), + UserId: types.Int64Value(3), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetValueMust(types.StringType, []attr.Value{})), + Password: types.StringValue(""), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + Region: types.StringValue(testRegion), + DefaultDatabase: types.StringNull(), + Status: types.StringNull(), + }, + true, + }, + { + "nil_response", + nil, + testRegion, + resourceModel{}, + false, + }, + { + "nil_response_2", + &sqlserverflexbeta.CreateUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + { + "no_resource_id", + &sqlserverflexbeta.CreateUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + { + "no_password", + &sqlserverflexbeta.CreateUserResponse{ + Id: utils.Ptr(int64(1)), + }, + testRegion, + resourceModel{}, + false, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + state := &resourceModel{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFieldsCreate(tt.input, state, tt.region) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(&tt.expected, state) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} + +func TestMapFields(t *testing.T) { + const testRegion = "region" + tests := []struct { + description string + input *sqlserverflexbeta.GetUserResponse + region string + expected resourceModel + isValid bool + }{ + { + "default_values", + &sqlserverflexbeta.GetUserResponse{}, + testRegion, + resourceModel{ + Id: types.Int64Value(1), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetNull(types.StringType)), + Host: types.StringNull(), + Port: types.Int64Null(), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "simple_values", + &sqlserverflexbeta.GetUserResponse{ + Roles: &[]string{ + "role_2", + "role_1", + "", + }, + Username: utils.Ptr("username"), + Host: utils.Ptr("host"), + Port: utils.Ptr(int64(1234)), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(2), + UserId: types.Int64Value(2), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringValue("username"), + Roles: types.List( + types.SetValueMust( + types.StringType, []attr.Value{ + types.StringValue(""), + types.StringValue("role_1"), + types.StringValue("role_2"), + }, + ), + ), + Host: types.StringValue("host"), + Port: types.Int64Value(1234), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "null_fields_and_int_conversions", + &sqlserverflexbeta.GetUserResponse{ + Id: utils.Ptr(int64(1)), + Roles: &[]string{}, + Username: nil, + Host: nil, + Port: utils.Ptr(int64(2123456789)), + }, + testRegion, + resourceModel{ + Id: types.Int64Value(1), + UserId: types.Int64Value(1), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Username: types.StringNull(), + Roles: types.List(types.SetValueMust(types.StringType, []attr.Value{})), + Host: types.StringNull(), + Port: types.Int64Value(2123456789), + Region: types.StringValue(testRegion), + }, + true, + }, + { + "nil_response", + nil, + testRegion, + resourceModel{}, + false, + }, + { + "nil_response_2", + &sqlserverflexbeta.GetUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + { + "no_resource_id", + &sqlserverflexbeta.GetUserResponse{}, + testRegion, + resourceModel{}, + false, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + state := &resourceModel{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + UserId: tt.expected.UserId, + } + err := mapFields(tt.input, state, tt.region) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(&tt.expected, state) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *resourceModel + inputRoles []string + expected *sqlserverflexbeta.CreateUserRequestPayload + isValid bool + }{ + { + "default_values", + &resourceModel{}, + []string{}, + &sqlserverflexbeta.CreateUserRequestPayload{ + Roles: &[]string{}, + Username: nil, + }, + true, + }, + { + "default_values", + &resourceModel{ + Username: types.StringValue("username"), + }, + []string{ + "role_1", + "role_2", + }, + &sqlserverflexbeta.CreateUserRequestPayload{ + Roles: &[]string{ + "role_1", + "role_2", + }, + Username: utils.Ptr("username"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &resourceModel{ + Username: types.StringNull(), + }, + []string{ + "", + }, + &sqlserverflexbeta.CreateUserRequestPayload{ + Roles: &[]string{ + "", + }, + Username: nil, + }, + true, + }, + { + "nil_model", + nil, + []string{}, + nil, + false, + }, + { + "nil_roles", + &resourceModel{ + Username: types.StringValue("username"), + }, + []string{}, + &sqlserverflexbeta.CreateUserRequestPayload{ + Roles: &[]string{}, + Username: utils.Ptr("username"), + }, + true, + }, + } + for _, tt := range tests { + t.Run( + tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputRoles) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }, + ) + } +} diff --git a/stackit/internal/services/sqlserverflexbeta/user/planModifiers.yaml b/stackit/internal/services/sqlserverflexbeta/user/planModifiers.yaml new file mode 100644 index 00000000..43b029e8 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/user/planModifiers.yaml @@ -0,0 +1,53 @@ +fields: + - name: 'id' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'instance_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'project_id' + validators: + - validate.NoSeparator + - validate.UUID + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'region' + modifiers: + - 'RequiresReplace' + - 'RequiresReplace' + + - name: 'user_id' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'username' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'roles' + modifiers: + - 'UseStateForUnknown' + - 'RequiresReplace' + + - name: 'password' + modifiers: + - 'UseStateForUnknown' + + - name: 'uri' + modifiers: + - 'UseStateForUnknown' + + - name: 'status' + modifiers: + - 'UseStateForUnknown' diff --git a/stackit/internal/services/sqlserverflexbeta/user/resource.go b/stackit/internal/services/sqlserverflexbeta/user/resource.go new file mode 100644 index 00000000..0c04f31b --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/user/resource.go @@ -0,0 +1,578 @@ +package sqlserverflexbeta + +import ( + "context" + _ "embed" + "errors" + "fmt" + "net/http" + "slices" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/stackit-sdk-go/core/oapierror" + + sqlserverflexbeta "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" + sqlserverflexbetaUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/utils" + sqlserverflexbetaWait "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/sqlserverflexbeta" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" + + sqlserverflexbetaResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/user/resources_gen" +) + +var ( + _ resource.Resource = &userResource{} + _ resource.ResourceWithConfigure = &userResource{} + _ resource.ResourceWithImportState = &userResource{} + _ resource.ResourceWithModifyPlan = &userResource{} + _ resource.ResourceWithIdentity = &userResource{} + _ resource.ResourceWithValidateConfig = &userResource{} +) + +func NewUserResource() resource.Resource { + return &userResource{} +} + +// resourceModel describes the resource data model. +type resourceModel = sqlserverflexbetaResGen.UserModel + +// UserResourceIdentityModel describes the resource's identity attributes. +type UserResourceIdentityModel struct { + ProjectID types.String `tfsdk:"project_id"` + Region types.String `tfsdk:"region"` + InstanceID types.String `tfsdk:"instance_id"` + UserID types.Int64 `tfsdk:"user_id"` +} + +type userResource struct { + client *sqlserverflexbeta.APIClient + providerData core.ProviderData +} + +func (r *userResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_sqlserverflexbeta_user" +} + +// Configure adds the provider configured client to the resource. +func (r *userResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + var ok bool + r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) + if !ok { + return + } + + apiClient := sqlserverflexbetaUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + r.client = apiClient + tflog.Info(ctx, "SQLServer Beta Flex user client configured") +} + +// ModifyPlan implements resource.ResourceWithModifyPlan. +// Use the modifier to set the effective region in the current plan. +func (r *userResource) ModifyPlan( + ctx context.Context, + req resource.ModifyPlanRequest, + resp *resource.ModifyPlanResponse, +) { // nolint:gocritic // function signature required by Terraform + var configModel resourceModel + // skip initial empty configuration to avoid follow-up errors + if req.Config.Raw.IsNull() { + return + } + resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...) + if resp.Diagnostics.HasError() { + return + } + + var planModel resourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) + if resp.Diagnostics.HasError() { + return + } + + utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp) + if resp.Diagnostics.HasError() { + return + } + + //// TODO: verify if this is needed - START + // var planRoles []string + // diags := planModel.Roles.ElementsAs(ctx, &planRoles, false) + // resp.Diagnostics.Append(diags...) + // if diags.HasError() { + // return + //} + // slices.Sort(planRoles) + // var roles []attr.Value + // for _, role := range planRoles { + // roles = append(roles, types.StringValue(string(role))) + //} + // rolesSet, diags := types.ListValue(types.StringType, roles) + // resp.Diagnostics.Append(diags...) + // if diags.HasError() { + // return + //} + // planModel.Roles = rolesSet + //// TODO: verify if this is needed - END + + resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...) + if resp.Diagnostics.HasError() { + return + } +} + +//go:embed planModifiers.yaml +var modifiersFileByte []byte + +// Schema defines the schema for the resource. +func (r *userResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + s := sqlserverflexbetaResGen.UserResourceSchema(ctx) + + fields, err := utils.ReadModifiersConfig(modifiersFileByte) + if err != nil { + resp.Diagnostics.AddError("error during read modifiers config file", err.Error()) + return + } + + err = utils.AddPlanModifiersToResourceSchema(fields, &s) + if err != nil { + resp.Diagnostics.AddError("error adding plan modifiers", err.Error()) + return + } + resp.Schema = s +} + +// IdentitySchema defines the schema for the resource's identity attributes. +func (r *userResource) IdentitySchema( + _ context.Context, + _ resource.IdentitySchemaRequest, + response *resource.IdentitySchemaResponse, +) { + response.IdentitySchema = identityschema.Schema{ + Attributes: map[string]identityschema.Attribute{ + "project_id": identityschema.StringAttribute{ + RequiredForImport: true, // must be set during import by the practitioner + }, + "region": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "instance_id": identityschema.StringAttribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + "user_id": identityschema.Int64Attribute{ + RequiredForImport: true, // can be defaulted by the provider configuration + }, + }, + } +} + +func (r *userResource) ValidateConfig( + ctx context.Context, + req resource.ValidateConfigRequest, + resp *resource.ValidateConfigResponse, +) { + var data resourceModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + var roles []string + diags := data.Roles.ElementsAs(ctx, &roles, false) + resp.Diagnostics.Append(diags...) + if diags.HasError() { + return + } + + var resRoles []string + for _, role := range roles { + if slices.Contains(resRoles, role) { + resp.Diagnostics.AddAttributeError( + path.Root("roles"), + "Attribute Configuration Error", + "defined roles MUST NOT contain duplicates", + ) + return + } + resRoles = append(resRoles, role) + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *userResource) Create( + ctx context.Context, + req resource.CreateRequest, + resp *resource.CreateResponse, +) { // nolint:gocritic // function signature required by Terraform + var model resourceModel + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + region := model.Region.ValueString() + + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "region", region) + + var roles []string + if !model.Roles.IsNull() && !model.Roles.IsUnknown() { + diags = model.Roles.ElementsAs(ctx, &roles, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + slices.Sort(roles) + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, roles) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating user", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new user + userResp, err := r.client.DefaultAPI.CreateUserRequest( + ctx, + projectId, + region, + instanceId, + ).CreateUserRequestPayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating user", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + if userResp == nil || userResp.Id == 0 { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating user", + "API didn't return user Id. A user might have been created", + ) + return + } + + userId := userResp.Id + ctx = tflog.SetField(ctx, "user_id", userId) + + // Set data returned by API in identity + identity := UserResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + UserID: types.Int64Value(userId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + err = mapFieldsCreate(userResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + waitResp, err := sqlserverflexbetaWait.CreateUserWaitHandler( + ctx, + r.client.DefaultAPI, + projectId, + instanceId, + region, + userId, + ).SetSleepBeforeWait( + 90 * time.Second, + ).SetTimeout( + 90 * time.Minute, + ).WaitWithContext(ctx) + + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "create user", + fmt.Sprintf("Instance creation waiting: %v", err), + ) + return + } + + if waitResp.Id == 0 { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "create user", + "Instance creation waiting: returned id is nil", + ) + return + } + + // Map response body to schema + err = mapFields(waitResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error creating user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQLServer Flex user created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *userResource) Read( + ctx context.Context, + req resource.ReadRequest, + resp *resource.ReadResponse, +) { // nolint:gocritic // function signature required by Terraform + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueInt64() + region := r.providerData.GetRegionWithOverride(model.Region) + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + ctx = tflog.SetField(ctx, "region", region) + + recordSetResp, err := r.client.DefaultAPI.GetUserRequest(ctx, projectId, region, instanceId, userId).Execute() + if err != nil { + var oapiErr *oapierror.GenericOpenAPIError + ok := errors.As( + err, + &oapiErr, + ) + //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped + if ok && oapiErr.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading user", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + // Map response body to schema + err = mapFields(recordSetResp, &model, region) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error reading user", + fmt.Sprintf("Processing API payload: %v", err), + ) + return + } + + // Set data returned by API in identity + identity := UserResourceIdentityModel{ + ProjectID: types.StringValue(projectId), + Region: types.StringValue(region), + InstanceID: types.StringValue(instanceId), + UserID: types.Int64Value(userId), + } + resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...) + if resp.Diagnostics.HasError() { + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "SQLServer Flex user read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *userResource) Update( + ctx context.Context, + _ resource.UpdateRequest, + resp *resource.UpdateResponse, +) { // nolint:gocritic // function signature required by Terraform + // Update shouldn't be called + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error updating user", + "an SQL server user can not be updated, only created", + ) +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *userResource) Delete( + ctx context.Context, + req resource.DeleteRequest, + resp *resource.DeleteResponse, +) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model resourceModel + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + ctx = core.InitProviderContext(ctx) + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + userId := model.UserId.ValueInt64() + region := model.Region.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + ctx = tflog.SetField(ctx, "user_id", userId) + ctx = tflog.SetField(ctx, "region", region) + + // Delete existing record set + // err := r.client.DeleteUserRequest(ctx, projectId, region, instanceId, userId).Execute() + err := r.client.DefaultAPI.DeleteUserRequest(ctx, projectId, region, instanceId, userId).Execute() + if err != nil { + var oapiErr *oapierror.GenericOpenAPIError + ok := errors.As(err, &oapiErr) + if !ok { + // TODO err handling + return + } + + switch oapiErr.StatusCode { + case http.StatusNotFound: + resp.State.RemoveResource(ctx) + return + // case http.StatusInternalServerError: + // tflog.Warn(ctx, "[delete user] Wait handler got error 500") + // return false, nil, nil + default: + // TODO err handling + return + } + } + // Delete existing record set + _, err = sqlserverflexbetaWait.DeleteUserWaitHandler(ctx, r.client.DefaultAPI, projectId, region, instanceId, userId). + WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "User Delete Error", fmt.Sprintf("Calling API: %v", err)) + return + } + + ctx = core.LogResponse(ctx) + + resp.State.RemoveResource(ctx) + + tflog.Info(ctx, "SQLServer Flex user deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,zone_id,record_set_id +func (r *userResource) ImportState( + ctx context.Context, + req resource.ImportStateRequest, + resp *resource.ImportStateResponse, +) { + ctx = core.InitProviderContext(ctx) + + if req.ID != "" { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" { + core.LogAndAddError( + ctx, &resp.Diagnostics, + "Error importing user", + fmt.Sprintf( + "Expected import identifier with format [project_id],[region],[instance_id],[user_id], got %q", + req.ID, + ), + ) + return + } + + userId, err := strconv.ParseInt(idParts[3], 10, 64) + if err != nil { + core.LogAndAddError( + ctx, + &resp.Diagnostics, + "Error importing user", + fmt.Sprintf("Invalid user_id format: %q. It must be a valid integer.", idParts[3]), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[2])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_id"), userId)...) + + tflog.Info(ctx, "SQLServer Flex user state imported") + + return + } + + // If no ID is provided, attempt to read identity attributes from the import configuration + var identityData UserResourceIdentityModel + resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) + if resp.Diagnostics.HasError() { + return + } + + projectId := identityData.ProjectID.ValueString() + region := identityData.Region.ValueString() + instanceId := identityData.InstanceID.ValueString() + userId := identityData.UserID.ValueInt64() + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), region)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), instanceId)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_id"), userId)...) + + core.LogAndAddWarning( + ctx, + &resp.Diagnostics, + "SQLServer Flex user imported with empty password", + "The user password is not imported as it is only available upon creation of a new user. The password field will be empty.", + ) + tflog.Info(ctx, "SQLServer Flex user state imported") +} diff --git a/stackit/internal/services/sqlserverflexbeta/utils/util.go b/stackit/internal/services/sqlserverflexbeta/utils/util.go new file mode 100644 index 00000000..cdb3e4d8 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/utils/util.go @@ -0,0 +1,48 @@ +package utils + +import ( + "context" + "fmt" + + sqlserverflex "github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex/v3beta1api" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +func ConfigureClient( + ctx context.Context, + providerData *core.ProviderData, + diags *diag.Diagnostics, +) *sqlserverflex.APIClient { + apiClientConfigOptions := []config.ConfigurationOption{ + config.WithCustomAuth(providerData.RoundTripper), + utils.UserAgentConfigOption(providerData.Version), + } + if providerData.SQLServerFlexCustomEndpoint != "" { + apiClientConfigOptions = append( + apiClientConfigOptions, + config.WithEndpoint(providerData.SQLServerFlexCustomEndpoint), + ) + } else { + apiClientConfigOptions = append(apiClientConfigOptions, config.WithRegion(providerData.GetRegion())) + } + apiClient, err := sqlserverflex.NewAPIClient(apiClientConfigOptions...) + if err != nil { + core.LogAndAddError( + ctx, + diags, + "Error configuring API client", + fmt.Sprintf( + "Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", + err, + ), + ) + return nil + } + + return apiClient +} diff --git a/stackit/internal/services/sqlserverflexbeta/utils/util_test.go b/stackit/internal/services/sqlserverflexbeta/utils/util_test.go new file mode 100644 index 00000000..92fb1ae9 --- /dev/null +++ b/stackit/internal/services/sqlserverflexbeta/utils/util_test.go @@ -0,0 +1,98 @@ +package utils + +import ( + "context" + "os" + "reflect" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + sdkClients "github.com/stackitcloud/stackit-sdk-go/core/clients" + "github.com/stackitcloud/stackit-sdk-go/core/config" + + sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexbeta" + + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" + "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" +) + +const ( + testVersion = "1.2.3" + testCustomEndpoint = "https://sqlserverflex-custom-endpoint.api.stackit.cloud" +) + +func TestConfigureClient(t *testing.T) { + /* mock authentication by setting service account token env variable */ + os.Clearenv() + err := os.Setenv(sdkClients.ServiceAccountToken, "mock-val") + if err != nil { + t.Errorf("error setting env variable: %v", err) + } + + type args struct { + providerData *core.ProviderData + } + tests := []struct { + name string + args args + wantErr bool + expected *sqlserverflex.APIClient + }{ + { + name: "default endpoint", + args: args{ + providerData: &core.ProviderData{ + Version: testVersion, + }, + }, + expected: func() *sqlserverflex.APIClient { + apiClient, err := sqlserverflex.NewAPIClient( + config.WithRegion("eu01"), + utils.UserAgentConfigOption(testVersion), + ) + if err != nil { + t.Errorf("error configuring client: %v", err) + } + return apiClient + }(), + wantErr: false, + }, + { + name: "custom endpoint", + args: args{ + providerData: &core.ProviderData{ + Version: testVersion, + SQLServerFlexCustomEndpoint: testCustomEndpoint, + }, + }, + expected: func() *sqlserverflex.APIClient { + apiClient, err := sqlserverflex.NewAPIClient( + utils.UserAgentConfigOption(testVersion), + config.WithEndpoint(testCustomEndpoint), + ) + if err != nil { + t.Errorf("error configuring client: %v", err) + } + return apiClient + }(), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run( + tt.name, func(t *testing.T) { + ctx := context.Background() + diags := diag.Diagnostics{} + + actual := ConfigureClient(ctx, tt.args.providerData, &diags) + if diags.HasError() != tt.wantErr { + t.Errorf("ConfigureClient() error = %v, want %v", diags.HasError(), tt.wantErr) + } + + if !reflect.DeepEqual(actual, tt.expected) { + t.Errorf("ConfigureClient() = %v, want %v", actual, tt.expected) + } + }, + ) + } +} diff --git a/stackit/provider.go b/stackit/provider.go index e6e201a8..b825fc35 100644 --- a/stackit/provider.go +++ b/stackit/provider.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" sdkauth "github.com/stackitcloud/stackit-sdk-go/core/auth" "github.com/stackitcloud/stackit-sdk-go/core/config" + sqlserverflexbeta "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/user" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/features" @@ -29,13 +30,9 @@ import ( postgresflexalphaFlavors "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/flavors" postgresFlexAlphaInstance "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/instance" postgresFlexAlphaUser "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/user" - //sqlserverflexalphaDatabase "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/database" - //sqlserverFlexAlphaFlavor "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/flavor" - //sqlServerFlexAlphaInstance "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance" - //sqlserverFlexAlphaUser "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/user" - //sqlserverflexBetaDatabase "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/database" - //sqlserverFlexBetaFlavor "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/flavor" - //sqlserverflexBetaInstance "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance" + + sqlserverFlexBetaDatabase "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/database" + sqlserverflexBetaInstance "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/instance" //sqlserverFlexBetaUser "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexbeta/user" ) @@ -536,9 +533,9 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource //sqlserverFlexAlphaUser.NewUserDataSource, //sqlserverflexalphaDatabase.NewDatabaseDataSource, - //sqlserverflexBetaDatabase.NewDatabaseDataSource, - //sqlserverflexBetaInstance.NewInstanceDataSource, - //sqlserverFlexBetaUser.NewUserDataSource, + sqlserverFlexBetaDatabase.NewDatabaseDataSource, + sqlserverflexBetaInstance.NewInstanceDataSource, + sqlserverflexbeta.NewUserDataSource, //sqlserverFlexBetaFlavor.NewFlavorDataSource, } } @@ -554,9 +551,9 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource { //sqlserverFlexAlphaUser.NewUserResource, //sqlserverflexalphaDatabase.NewDatabaseResource, - //sqlserverflexBetaInstance.NewInstanceResource, - //sqlserverFlexBetaUser.NewUserResource, - //sqlserverflexBetaDatabase.NewDatabaseResource, + sqlserverflexBetaInstance.NewInstanceResource, + sqlserverflexbeta.NewUserResource, + sqlserverFlexBetaDatabase.NewDatabaseResource, } return resources }