package postgresflexalpha import ( "context" _ "embed" "errors" "fmt" "math" "net/http" "strconv" "strings" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/identityschema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/stackitcloud/stackit-sdk-go/core/oapierror" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/postgresflexalpha" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" postgresflexalpha2 "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/database/resources_gen" postgresflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils" ) // Ensure the implementation satisfies the expected interfaces. var ( _ resource.Resource = &databaseResource{} _ resource.ResourceWithConfigure = &databaseResource{} _ resource.ResourceWithImportState = &databaseResource{} _ resource.ResourceWithModifyPlan = &databaseResource{} _ resource.ResourceWithIdentity = &databaseResource{} errDatabaseNotFound = errors.New("database not found") ) // ResourceModel describes the resource data model. type ResourceModel struct { postgresflexalpha2.DatabaseModel TerraformID types.String `tfsdk:"id"` DatabaseID types.Int64 `tfsdk:"database_id"` } // DatabaseResourceIdentityModel describes the resource's identity attributes. type DatabaseResourceIdentityModel struct { ProjectID types.String `tfsdk:"project_id"` Region types.String `tfsdk:"region"` InstanceID types.String `tfsdk:"instance_id"` DatabaseID types.Int64 `tfsdk:"database_id"` } // NewDatabaseResource is a helper function to simplify the provider implementation. func NewDatabaseResource() resource.Resource { return &databaseResource{} } // databaseResource is the resource implementation. type databaseResource struct { client *postgresflexalpha.APIClient providerData core.ProviderData } // ModifyPlan adjusts the plan to set the correct region. func (r *databaseResource) ModifyPlan( ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse, ) { // nolint:gocritic // function signature required by Terraform var configModel ResourceModel // skip initial empty configuration to avoid follow-up errors if req.Config.Raw.IsNull() { return } resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...) if resp.Diagnostics.HasError() { return } var planModel ResourceModel resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) if resp.Diagnostics.HasError() { return } //TODO utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp) if resp.Diagnostics.HasError() { return } resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...) if resp.Diagnostics.HasError() { return } } // Metadata returns the resource type name. func (r *databaseResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { resp.TypeName = req.ProviderTypeName + "_postgresflexalpha_database" } // Configure adds the provider configured client to the resource. func (r *databaseResource) Configure( ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse, ) { var ok bool r.providerData, ok = conversion.ParseProviderData(ctx, req.ProviderData, &resp.Diagnostics) if !ok { return } apiClient := postgresflexUtils.ConfigureClient(ctx, &r.providerData, &resp.Diagnostics) if resp.Diagnostics.HasError() { return } r.client = apiClient tflog.Info(ctx, "Postgres Flex database client configured") } //go:embed planModifiers.yaml var modifiersFileByte []byte // Schema defines the schema for the resource. func (r *databaseResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { s := postgresflexalpha2.DatabaseResourceSchema(ctx) s.Attributes["id"] = schema.StringAttribute{ Description: "Terraform's internal resource ID. It is structured as \\\"`project_id`,`region`,`instance_id`,`database_id`\\\".\",", Optional: true, Computed: true, } s.Attributes["database_id"] = schema.Int64Attribute{ Description: "ID of the database.", Computed: true, PlanModifiers: []planmodifier.Int64{ int64planmodifier.UseStateForUnknown(), }, Validators: []validator.Int64{}, } fields, err := postgresflexUtils.ReadModifiersConfig(modifiersFileByte) if err != nil { resp.Diagnostics.AddError("error during read modifiers config file", err.Error()) return } err = postgresflexUtils.AddPlanModifiersToResourceSchema(fields, &s) if err != nil { resp.Diagnostics.AddError("error adding plan modifiers", err.Error()) return } resp.Schema = s } // IdentitySchema defines the schema for the resource's identity attributes. func (r *databaseResource) IdentitySchema( _ context.Context, _ resource.IdentitySchemaRequest, response *resource.IdentitySchemaResponse, ) { response.IdentitySchema = identityschema.Schema{ Attributes: map[string]identityschema.Attribute{ "project_id": identityschema.StringAttribute{ RequiredForImport: true, }, "region": identityschema.StringAttribute{ RequiredForImport: true, }, "instance_id": identityschema.StringAttribute{ RequiredForImport: true, }, "database_id": identityschema.Int64Attribute{ RequiredForImport: true, }, }, } } // Create creates the resource and sets the initial Terraform state. func (r *databaseResource) Create( ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse, ) { // nolint:gocritic // function signature required by Terraform var model ResourceModel diags := req.Plan.Get(ctx, &model) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } // Read identity data var identityData DatabaseResourceIdentityModel resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) if resp.Diagnostics.HasError() { return } ctx = core.InitProviderContext(ctx) projectId := identityData.ProjectID.ValueString() region := identityData.ProjectID.ValueString() instanceId := identityData.InstanceID.ValueString() ctx = tflog.SetField(ctx, "project_id", projectId) ctx = tflog.SetField(ctx, "instance_id", instanceId) ctx = tflog.SetField(ctx, "region", region) // Generate API request body from model payload, err := toCreatePayload(&model) if err != nil { core.LogAndAddError( ctx, &resp.Diagnostics, "Error creating database", fmt.Sprintf("Creating API payload: %v", err), ) return } // Create new database databaseResp, err := r.client.CreateDatabaseRequest( ctx, projectId, region, instanceId, ).CreateDatabaseRequestPayload(*payload).Execute() if err != nil { core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating database", fmt.Sprintf("Calling API: %v", err)) return } ctx = core.LogResponse(ctx) if databaseResp == nil || databaseResp.Id == nil { core.LogAndAddError( ctx, &resp.Diagnostics, "Error creating database", "API didn't return database Id. A database might have been created", ) return } databaseId := *databaseResp.Id ctx = tflog.SetField(ctx, "database_id", databaseId) database, err := getDatabaseById(ctx, r.client, projectId, region, instanceId, databaseId) if err != nil { core.LogAndAddError( ctx, &resp.Diagnostics, "Error creating database", fmt.Sprintf("Getting database details after creation: %v", err), ) return } // Map response body to schema err = mapResourceFields(database, &model) if err != nil { core.LogAndAddError( ctx, &resp.Diagnostics, "Error creating database", fmt.Sprintf("Processing API payload: %v", err), ) return } // Set state to fully populated data diags = resp.State.Set(ctx, model) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } tflog.Info(ctx, "Postgres Flex database created") } // Read refreshes the Terraform state with the latest data. func (r *databaseResource) Read( ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse, ) { // nolint:gocritic // function signature required by Terraform var model ResourceModel diags := req.State.Get(ctx, &model) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } // Read identity data var identityData DatabaseResourceIdentityModel resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) if resp.Diagnostics.HasError() { return } ctx = core.InitProviderContext(ctx) projectId := identityData.ProjectID.ValueString() instanceId := identityData.InstanceID.ValueString() databaseId := model.DatabaseID.ValueInt64() region := r.providerData.GetRegionWithOverride(identityData.Region) ctx = tflog.SetField(ctx, "project_id", projectId) ctx = tflog.SetField(ctx, "instance_id", instanceId) ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "database_id", databaseId) databaseResp, err := getDatabaseById(ctx, r.client, projectId, region, instanceId, databaseId) if err != nil { oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped if (ok && oapiErr.StatusCode == http.StatusNotFound) || errors.Is(err, errDatabaseNotFound) { resp.State.RemoveResource(ctx) return } core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading database", fmt.Sprintf("Calling API: %v", err)) return } ctx = core.LogResponse(ctx) // Map response body to schema err = mapResourceFields(databaseResp, &model) if err != nil { core.LogAndAddError( ctx, &resp.Diagnostics, "Error reading database", fmt.Sprintf("Processing API payload: %v", err), ) return } // Set refreshed state diags = resp.State.Set(ctx, model) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } tflog.Info(ctx, "Postgres Flex database read") } // Update updates the resource and sets the updated Terraform state on success. func (r *databaseResource) Update( ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse, ) { var model ResourceModel diags := req.Plan.Get(ctx, &model) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } // Read identity data var identityData DatabaseResourceIdentityModel resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) if resp.Diagnostics.HasError() { return } ctx = core.InitProviderContext(ctx) projectId := identityData.ProjectID.ValueString() instanceId := identityData.InstanceID.ValueString() region := r.providerData.GetRegionWithOverride(identityData.Region) databaseId64 := model.DatabaseID.ValueInt64() // database id if databaseId64 > math.MaxInt32 { core.LogAndAddError(ctx, &resp.Diagnostics, "Error in type conversion", "int value too large (databaseId)") return } databaseId := int32(databaseId64) ctx = tflog.SetField(ctx, "project_id", projectId) ctx = tflog.SetField(ctx, "instance_id", instanceId) ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "database_id", databaseId) // Retrieve values from state var stateModel ResourceModel diags = req.State.Get(ctx, &stateModel) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } modified := false var payload postgresflexalpha.UpdateDatabasePartiallyRequestPayload if stateModel.Name != model.Name { payload.Name = model.Name.ValueStringPointer() modified = true } if stateModel.Owner != model.Owner { payload.Owner = model.Owner.ValueStringPointer() modified = true } if !modified { tflog.Info(ctx, "no modification detected") return } // Update existing database res, err := r.client.UpdateDatabasePartiallyRequest( ctx, projectId, region, instanceId, databaseId, ).UpdateDatabasePartiallyRequestPayload(payload).Execute() if err != nil { core.LogAndAddError(ctx, &resp.Diagnostics, "error updating database", err.Error()) return } ctx = core.LogResponse(ctx) // Map response body to schema err = mapResourceFields(res.Database, &model) if err != nil { core.LogAndAddError( ctx, &resp.Diagnostics, "Error updating database", fmt.Sprintf("Processing API payload: %v", err), ) return } // Set state to fully populated data diags = resp.State.Set(ctx, model) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } tflog.Info(ctx, "Postgres Flex database updated") } // Delete deletes the resource and removes the Terraform state on success. func (r *databaseResource) Delete( ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse, ) { // nolint:gocritic // function signature required by Terraform var model ResourceModel diags := req.State.Get(ctx, &model) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } // Read identity data var identityData DatabaseResourceIdentityModel resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...) if resp.Diagnostics.HasError() { return } ctx = core.InitProviderContext(ctx) projectId := identityData.ProjectID.ValueString() instanceId := identityData.InstanceID.ValueString() region := r.providerData.GetRegionWithOverride(identityData.Region) databaseId64 := model.DatabaseID.ValueInt64() //database id if databaseId64 > math.MaxInt32 { core.LogAndAddError(ctx, &resp.Diagnostics, "Error in type conversion", "int value too large (databaseId)") return } databaseId := int32(databaseId64) ctx = tflog.SetField(ctx, "project_id", projectId) ctx = tflog.SetField(ctx, "instance_id", instanceId) ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "database_id", databaseId) // Delete existing record set err := r.client.DeleteDatabaseRequestExecute(ctx, projectId, region, instanceId, databaseId) if err != nil { core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting database", fmt.Sprintf("Calling API: %v", err)) } ctx = core.LogResponse(ctx) tflog.Info(ctx, "Postgres Flex database deleted") } // ImportState imports a resource into the Terraform state on success. // The expected import identifier format is: [project_id],[region],[instance_id],[database_id] func (r *databaseResource) ImportState( ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse, ) { idParts := strings.Split(req.ID, core.Separator) if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" { core.LogAndAddError( ctx, &resp.Diagnostics, "Error importing database", fmt.Sprintf( "Expected import identifier with format [project_id],[region],[instance_id],[database_id], got %q", req.ID, ), ) return } databaseId, err := strconv.ParseInt(idParts[3], 10, 64) if err != nil { core.LogAndAddError( ctx, &resp.Diagnostics, "Error importing database", fmt.Sprintf("Invalid database_id format: %q. It must be a valid integer.", idParts[3]), ) return } resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...) resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[2])...) resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database_id"), databaseId)...) //TODO: Investigate if this logic is still required. //core.LogAndAddWarning( // ctx, // &resp.Diagnostics, // "Postgresflex database imported with empty password", // "The database password is not imported as it is only available upon creation of a new database. The password field will be empty.", //) var identityData DatabaseResourceIdentityModel identityData.ProjectID = types.StringValue(idParts[0]) identityData.Region = types.StringValue(idParts[1]) identityData.InstanceID = types.StringValue(idParts[2]) identityData.DatabaseID = types.Int64Value(databaseId) resp.Diagnostics.Append(req.Identity.Set(ctx, &identityData)...) if resp.Diagnostics.HasError() { return } tflog.Info(ctx, "Postgres Flex instance state imported") }