Addd regional support for serverbackup resource and data-source (#745)

* feat(serverbackup): add regional support

* fix: corrections for review findings
This commit is contained in:
Rüdiger Schmitz 2025-03-28 09:24:52 +01:00 committed by GitHub
parent 8b57c35712
commit a870b71d0a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 220 additions and 89 deletions

View file

@ -32,6 +32,10 @@ data "stackit_server_backup_schedule" "example" {
- `project_id` (String) STACKIT Project ID to which the server is associated.
- `server_id` (String) Server ID for the backup schedule.
### Optional
- `region` (String) The resource region. If not defined, the provider region is used.
### Read-Only
- `backup_properties` (Attributes) Backup schedule details for the backups. (see [below for nested schema](#nestedatt--backup_properties))
@ -48,3 +52,5 @@ Read-Only:
- `name` (String)
- `retention_period` (Number)
- `volume_ids` (List of String)

View file

@ -30,6 +30,10 @@ data "stackit_server_backup_schedules" "example" {
- `project_id` (String) STACKIT Project ID (UUID) to which the server is associated.
- `server_id` (String) Server ID (UUID) to which the backup schedule is associated.
### Optional
- `region` (String) The resource region. If not defined, the provider region is used.
### Read-Only
- `id` (String) Terraform's internal data source identifier. It is structured as "`project_id`,`server_id`".
@ -54,3 +58,5 @@ Read-Only:
- `name` (String)
- `retention_period` (Number)
- `volume_ids` (List of String)

View file

@ -35,6 +35,7 @@ resource "stackit_server_backup_schedule" "example" {
### Required
- `backup_properties` (Attributes) Backup schedule details for the backups. (see [below for nested schema](#nestedatt--backup_properties))
- `enabled` (Boolean) Is the backup schedule enabled or disabled.
- `name` (String) The schedule name.
- `project_id` (String) STACKIT Project ID to which the server is associated.
@ -43,12 +44,12 @@ resource "stackit_server_backup_schedule" "example" {
### Optional
- `backup_properties` (Attributes) Backup schedule details for the backups. (see [below for nested schema](#nestedatt--backup_properties))
- `region` (String) The resource region. If not defined, the provider region is used.
### Read-Only
- `backup_schedule_id` (Number) Backup schedule ID.
- `id` (String) Terraform's internal resource identifier. It is structured as "`project_id`,`server_id`,`backup_schedule_id`".
- `id` (String) Terraform's internal resource identifier. It is structured as "`project_id`,`region`,`server_id`,`backup_schedule_id`".
<a id="nestedatt--backup_properties"></a>
### Nested Schema for `backup_properties`
@ -61,3 +62,5 @@ Required:
Optional:
- `volume_ids` (List of String)

4
go.mod
View file

@ -11,7 +11,7 @@ require (
github.com/hashicorp/terraform-plugin-go v0.26.0
github.com/hashicorp/terraform-plugin-log v0.9.0
github.com/hashicorp/terraform-plugin-testing v1.11.0
github.com/stackitcloud/stackit-sdk-go/core v0.16.0
github.com/stackitcloud/stackit-sdk-go/core v0.16.2
github.com/stackitcloud/stackit-sdk-go/services/argus v0.11.0
github.com/stackitcloud/stackit-sdk-go/services/dns v0.13.0
github.com/stackitcloud/stackit-sdk-go/services/iaas v0.22.0
@ -27,7 +27,7 @@ require (
github.com/stackitcloud/stackit-sdk-go/services/redis v0.21.0
github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.13.0
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.11.0
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v0.6.0
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v1.0.1
github.com/stackitcloud/stackit-sdk-go/services/serverupdate v0.5.0
github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.6.0
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v1.0.0

8
go.sum
View file

@ -151,8 +151,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/stackitcloud/stackit-sdk-go/core v0.16.0 h1:9caGZwGLZDyBBe6ojk9VR4B2m3/0H5It6znBz76sH1w=
github.com/stackitcloud/stackit-sdk-go/core v0.16.0/go.mod h1:LRheSoXTFRHWAyA8Q9skWtWBp3ZZ+bFAKiKuf4lTDkE=
github.com/stackitcloud/stackit-sdk-go/core v0.16.2 h1:F8A4P/LLlQSbz0S0+G3m8rb3BUOK6EcR/CKx5UQY5jQ=
github.com/stackitcloud/stackit-sdk-go/core v0.16.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0=
github.com/stackitcloud/stackit-sdk-go/services/argus v0.11.0 h1:JVEx/ouHB6PlwGzQa3ywyDym1HTWo3WgrxAyXprCnuM=
github.com/stackitcloud/stackit-sdk-go/services/argus v0.11.0/go.mod h1:nVllQfYODhX1q3bgwVTLO7wHOp+8NMLiKbn3u/Dg5nU=
github.com/stackitcloud/stackit-sdk-go/services/authorization v0.6.0 h1:nSaSvo9o4c9KGc0vct+JWk5T49Ic6i3hzbLKviJtVhA=
@ -185,8 +185,8 @@ github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.13.0 h1:fPCdQ
github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.13.0/go.mod h1:Hs8RwANfF2ZKqOowPBqMQqLoRHcMobiKAihWWirAdEU=
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.11.0 h1:PwfpDFGUUJ8+Go5hJf+/hA5CHfY+DPS1cqIUYH+zWtE=
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.11.0/go.mod h1:Hb21FmYP95q0fzOb9jk4/9CIxTsHzrSYDQZh6e82XUg=
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v0.6.0 h1:cESGAkm0ftADRBfdbiyx3pp/KVQ8JgmUQdRzpwG61wE=
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v0.6.0/go.mod h1:aYPLsiImzWaYXEfYIZ0wJnV56PwcR+buy8Xu9jjbfGA=
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v1.0.1 h1:qujhShugc1290NQlPoNqsembqzot8aTToAdSsJg5WrM=
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v1.0.1/go.mod h1:e1fsQL24gTPXcMWptuslNscawmXv/PLUAFuw+sOofbc=
github.com/stackitcloud/stackit-sdk-go/services/serverupdate v0.5.0 h1:TMUxDh8XGgWUpnWo7GsawVq2ICDsy/r8dMlfC26MR5g=
github.com/stackitcloud/stackit-sdk-go/services/serverupdate v0.5.0/go.mod h1:giHnHz3kHeLY8Av9MZLsyJlaTXYz+BuGqdP/SKB5Vo0=
github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.6.0 h1:y+XzJcntHJ7M+IWWvAUkiVFA8op+jZxwHs3ktW2aLoA=

View file

@ -40,6 +40,7 @@ var (
_ resource.Resource = &scheduleResource{}
_ resource.ResourceWithConfigure = &scheduleResource{}
_ resource.ResourceWithImportState = &scheduleResource{}
_ resource.ResourceWithModifyPlan = &scheduleResource{}
)
type Model struct {
@ -51,6 +52,7 @@ type Model struct {
Rrule types.String `tfsdk:"rrule"`
Enabled types.Bool `tfsdk:"enabled"`
BackupProperties *scheduleBackupPropertiesModel `tfsdk:"backup_properties"`
Region types.String `tfsdk:"region"`
}
// scheduleBackupPropertiesModel maps schedule backup_properties data
@ -67,7 +69,38 @@ func NewScheduleResource() resource.Resource {
// scheduleResource is the resource implementation.
type scheduleResource struct {
client *serverbackup.APIClient
client *serverbackup.APIClient
providerData core.ProviderData
}
// ModifyPlan implements resource.ResourceWithModifyPlan.
// Use the modifier to set the effective region in the current plan.
func (r *scheduleResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { // nolint:gocritic // function signature required by Terraform
var configModel Model
// skip initial empty configuration to avoid follow-up errors
if req.Config.Raw.IsNull() {
return
}
resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
if resp.Diagnostics.HasError() {
return
}
var planModel Model
resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
if resp.Diagnostics.HasError() {
return
}
utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp)
if resp.Diagnostics.HasError() {
return
}
resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the resource type name.
@ -82,32 +115,34 @@ func (r *scheduleResource) Configure(ctx context.Context, req resource.Configure
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
var ok bool
r.providerData, ok = req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
if !resourceBetaCheckDone {
features.CheckBetaResourcesEnabled(ctx, &providerData, &resp.Diagnostics, "stackit_server_backup_schedule", "resource")
features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_server_backup_schedule", "resource")
if resp.Diagnostics.HasError() {
return
}
resourceBetaCheckDone = true
}
var apiClient *serverbackup.APIClient
var err error
if providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", providerData.ServerBackupCustomEndpoint)
var (
apiClient *serverbackup.APIClient
err error
)
if r.providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", r.providerData.ServerBackupCustomEndpoint)
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServerBackupCustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.ServerBackupCustomEndpoint),
)
} else {
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
)
}
@ -127,7 +162,7 @@ func (r *scheduleResource) Schema(_ context.Context, _ resource.SchemaRequest, r
MarkdownDescription: features.AddBetaDescription("Server backup schedule resource schema. Must have a `region` specified in the provider configuration."),
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal resource identifier. It is structured as \"`project_id`,`server_id`,`backup_schedule_id`\".",
Description: "Terraform's internal resource identifier. It is structured as \"`project_id`,`region`,`server_id`,`backup_schedule_id`\".",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
@ -196,7 +231,7 @@ func (r *scheduleResource) Schema(_ context.Context, _ resource.SchemaRequest, r
},
"backup_properties": schema.SingleNestedAttribute{
Description: "Backup schedule details for the backups.",
Optional: true,
Required: true,
Attributes: map[string]schema.Attribute{
"volume_ids": schema.ListAttribute{
ElementType: types.StringType,
@ -213,6 +248,15 @@ func (r *scheduleResource) Schema(_ context.Context, _ resource.SchemaRequest, r
},
},
},
"region": schema.StringAttribute{
Optional: true,
// must be computed to allow for storing the override value from the provider
Computed: true,
Description: "The resource region. If not defined, the provider region is used.",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
},
}
}
@ -227,11 +271,19 @@ func (r *scheduleResource) Create(ctx context.Context, req resource.CreateReques
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "region", region)
// Enable backups if not already enabled
err := enableBackupsService(ctx, &model, r.client)
err := r.enableBackupsService(ctx, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Enabling server backup project before creation: %v", err))
return
@ -243,7 +295,7 @@ func (r *scheduleResource) Create(ctx context.Context, req resource.CreateReques
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Creating API payload: %v", err))
return
}
scheduleResp, err := r.client.CreateBackupSchedule(ctx, projectId, serverId).CreateBackupSchedulePayload(*payload).Execute()
scheduleResp, err := r.client.CreateBackupSchedule(ctx, projectId, serverId, region).CreateBackupSchedulePayload(*payload).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
@ -251,7 +303,7 @@ func (r *scheduleResource) Create(ctx context.Context, req resource.CreateReques
ctx = tflog.SetField(ctx, "backup_schedule_id", *scheduleResp.Id)
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
err = mapFields(ctx, scheduleResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
@ -275,11 +327,19 @@ func (r *scheduleResource) Read(ctx context.Context, req resource.ReadRequest, r
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
ctx = tflog.SetField(ctx, "region", region)
scheduleResp, err := r.client.GetBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).Execute()
scheduleResp, err := r.client.GetBackupSchedule(ctx, projectId, serverId, region, strconv.FormatInt(backupScheduleId, 10)).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if ok && oapiErr.StatusCode == http.StatusNotFound {
@ -291,7 +351,7 @@ func (r *scheduleResource) Read(ctx context.Context, req resource.ReadRequest, r
}
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
err = mapFields(ctx, scheduleResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
@ -317,9 +377,17 @@ func (r *scheduleResource) Update(ctx context.Context, req resource.UpdateReques
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
ctx = tflog.SetField(ctx, "region", region)
// Update schedule
payload, err := toUpdatePayload(&model)
@ -328,14 +396,14 @@ func (r *scheduleResource) Update(ctx context.Context, req resource.UpdateReques
return
}
scheduleResp, err := r.client.UpdateBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).UpdateBackupSchedulePayload(*payload).Execute()
scheduleResp, err := r.client.UpdateBackupSchedule(ctx, projectId, serverId, region, strconv.FormatInt(backupScheduleId, 10)).UpdateBackupSchedulePayload(*payload).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
err = mapFields(ctx, scheduleResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
@ -359,11 +427,19 @@ func (r *scheduleResource) Delete(ctx context.Context, req resource.DeleteReques
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
ctx = tflog.SetField(ctx, "region", region)
err := r.client.DeleteBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).Execute()
err := r.client.DeleteBackupSchedule(ctx, projectId, serverId, region, strconv.FormatInt(backupScheduleId, 10)).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
@ -371,7 +447,7 @@ func (r *scheduleResource) Delete(ctx context.Context, req resource.DeleteReques
tflog.Info(ctx, "Server backup schedule deleted.")
// Disable backups service in case there are no backups and no backup schedules.
err = disableBackupsService(ctx, &model, r.client)
err = r.disableBackupsService(ctx, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting server backup schedule", fmt.Sprintf("Disabling server backup service after deleting schedule: %v", err))
return
@ -382,15 +458,15 @@ func (r *scheduleResource) Delete(ctx context.Context, req resource.DeleteReques
// The expected format of the resource import identifier is: // project_id,server_id,schedule_id
func (r *scheduleResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing server backup schedule",
fmt.Sprintf("Expected import identifier with format [project_id],[server_id],[backup_schedule_id], got %q", req.ID),
fmt.Sprintf("Expected import identifier with format [project_id],[region],[server_id],[backup_schedule_id], got %q", req.ID),
)
return
}
intId, err := strconv.ParseInt(idParts[2], 10, 64)
intId, err := strconv.ParseInt(idParts[3], 10, 64)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing server backup schedule",
@ -400,12 +476,13 @@ func (r *scheduleResource) ImportState(ctx context.Context, req resource.ImportS
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("server_id"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("server_id"), idParts[2])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("backup_schedule_id"), intId)...)
tflog.Info(ctx, "Server backup schedule state imported.")
}
func mapFields(ctx context.Context, schedule *serverbackup.BackupSchedule, model *Model) error {
func mapFields(ctx context.Context, schedule *serverbackup.BackupSchedule, model *Model, region string) error {
if schedule == nil {
return fmt.Errorf("response input is nil")
}
@ -419,6 +496,7 @@ func mapFields(ctx context.Context, schedule *serverbackup.BackupSchedule, model
model.BackupScheduleId = types.Int64PointerValue(schedule.Id)
idParts := []string{
model.ProjectId.ValueString(),
region,
model.ServerId.ValueString(),
strconv.FormatInt(model.BackupScheduleId.ValueInt64(), 10),
}
@ -441,18 +519,26 @@ func mapFields(ctx context.Context, schedule *serverbackup.BackupSchedule, model
RetentionPeriod: types.Int64Value(*schedule.BackupProperties.RetentionPeriod),
VolumeIds: ids,
}
model.Region = types.StringValue(region)
return nil
}
// If already enabled, just continues
func enableBackupsService(ctx context.Context, model *Model, client *serverbackup.APIClient) error {
func (r *scheduleResource) enableBackupsService(ctx context.Context, model *Model) error {
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
enableServicePayload := serverbackup.EnableServicePayload{}
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
tflog.Debug(ctx, "Enabling server backup service")
err := client.EnableService(ctx, projectId, serverId).EnableServicePayload(enableServicePayload).Execute()
if err != nil {
request := r.client.EnableServiceResource(ctx, projectId, serverId, region).
EnableServiceResourcePayload(serverbackup.EnableServiceResourcePayload{})
if err := request.Execute(); err != nil {
if strings.Contains(err.Error(), "Tried to activate already active service") {
tflog.Debug(ctx, "Service for server backup already enabled")
return nil
@ -464,24 +550,20 @@ func enableBackupsService(ctx context.Context, model *Model, client *serverbacku
}
// Disables only if no backup schedules are present and no backups are present
func disableBackupsService(ctx context.Context, model *Model, client *serverbackup.APIClient) error {
func (r *scheduleResource) disableBackupsService(ctx context.Context, model *Model) error {
tflog.Debug(ctx, "Disabling server backup service (in case there are no backups and no backup schedules)")
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
tflog.Debug(ctx, "Checking for existing backup schedules")
schedules, err := client.ListBackupSchedules(ctx, projectId, serverId).Execute()
if err != nil {
return fmt.Errorf("list existing backup schedules: %w", err)
}
if *schedules.Items != nil && len(*schedules.Items) > 0 {
tflog.Debug(ctx, "Backup schedules found - will not disable server backup service")
return nil
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
tflog.Debug(ctx, "Checking for existing backups")
backups, err := client.ListBackups(ctx, projectId, serverId).Execute()
backups, err := r.client.ListBackups(ctx, projectId, serverId, region).Execute()
if err != nil {
return fmt.Errorf("list backups: %w", err)
}
@ -490,7 +572,7 @@ func disableBackupsService(ctx context.Context, model *Model, client *serverback
return nil
}
err = client.DisableService(ctx, projectId, serverId).Execute()
err = r.client.DisableServiceResourceExecute(ctx, projectId, serverId, region)
if err != nil {
return fmt.Errorf("disable server backup service: %w", err)
}

View file

@ -23,7 +23,7 @@ func TestMapFields(t *testing.T) {
Id: utils.Ptr(int64(5)),
},
Model{
ID: types.StringValue("project_uid,server_uid,5"),
ID: types.StringValue("project_uid,eu01,server_uid,5"),
ProjectId: types.StringValue("project_uid"),
ServerId: types.StringValue("server_uid"),
BackupScheduleId: types.Int64Value(5),
@ -47,7 +47,7 @@ func TestMapFields(t *testing.T) {
ServerId: types.StringValue("server_uid"),
ProjectId: types.StringValue("project_uid"),
BackupScheduleId: types.Int64Value(5),
ID: types.StringValue("project_uid,server_uid,5"),
ID: types.StringValue("project_uid,eu01,server_uid,5"),
Name: types.StringValue("backup_schedule_name_1"),
Rrule: types.StringValue("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
Enabled: types.BoolValue(true),
@ -56,6 +56,7 @@ func TestMapFields(t *testing.T) {
RetentionPeriod: types.Int64Value(3),
VolumeIds: listValueFrom([]string{"uuid1", "uuid2"}),
},
Region: types.StringValue("eu01"),
},
true,
},
@ -79,7 +80,7 @@ func TestMapFields(t *testing.T) {
ServerId: tt.expected.ServerId,
}
ctx := context.TODO()
err := mapFields(ctx, tt.input, state)
err := mapFields(ctx, tt.input, state, "eu01")
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}

View file

@ -14,6 +14,7 @@ import (
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
"github.com/stackitcloud/stackit-sdk-go/core/config"
@ -38,7 +39,8 @@ func NewScheduleDataSource() datasource.DataSource {
// scheduleDataSource is the data source implementation.
type scheduleDataSource struct {
client *serverbackup.APIClient
client *serverbackup.APIClient
providerData core.ProviderData
}
// Metadata returns the data source type name.
@ -53,14 +55,15 @@ func (r *scheduleDataSource) Configure(ctx context.Context, req datasource.Confi
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
var ok bool
r.providerData, ok = req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
if !scheduleDataSourceBetaCheckDone {
features.CheckBetaResourcesEnabled(ctx, &providerData, &resp.Diagnostics, "stackit_server_backup_schedule", "data source")
features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_server_backup_schedule", "data source")
if resp.Diagnostics.HasError() {
return
}
@ -69,16 +72,15 @@ func (r *scheduleDataSource) Configure(ctx context.Context, req datasource.Confi
var apiClient *serverbackup.APIClient
var err error
if providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", providerData.ServerBackupCustomEndpoint)
if r.providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", r.providerData.ServerBackupCustomEndpoint)
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServerBackupCustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.ServerBackupCustomEndpoint),
)
} else {
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
)
}
@ -149,6 +151,11 @@ func (r *scheduleDataSource) Schema(_ context.Context, _ datasource.SchemaReques
},
},
},
"region": schema.StringAttribute{
// the region cannot be found, so it has to be passed
Optional: true,
Description: "The resource region. If not defined, the provider region is used.",
},
},
}
}
@ -164,11 +171,19 @@ func (r *scheduleDataSource) Read(ctx context.Context, req datasource.ReadReques
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
ctx = tflog.SetField(ctx, "region", region)
scheduleResp, err := r.client.GetBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).Execute()
scheduleResp, err := r.client.GetBackupSchedule(ctx, projectId, serverId, region, strconv.FormatInt(backupScheduleId, 10)).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if ok && oapiErr.StatusCode == http.StatusNotFound {
@ -179,7 +194,7 @@ func (r *scheduleDataSource) Read(ctx context.Context, req datasource.ReadReques
}
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
err = mapFields(ctx, scheduleResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return

View file

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/features"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
@ -37,7 +38,8 @@ func NewSchedulesDataSource() datasource.DataSource {
// schedulesDataSource is the data source implementation.
type schedulesDataSource struct {
client *serverbackup.APIClient
client *serverbackup.APIClient
providerData core.ProviderData
}
// Metadata returns the data source type name.
@ -52,14 +54,15 @@ func (r *schedulesDataSource) Configure(ctx context.Context, req datasource.Conf
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
var ok bool
r.providerData, ok = req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
if !schedulesDataSourceBetaCheckDone {
features.CheckBetaResourcesEnabled(ctx, &providerData, &resp.Diagnostics, "stackit_server_backup_schedules", "data source")
features.CheckBetaResourcesEnabled(ctx, &r.providerData, &resp.Diagnostics, "stackit_server_backup_schedules", "data source")
if resp.Diagnostics.HasError() {
return
}
@ -68,16 +71,15 @@ func (r *schedulesDataSource) Configure(ctx context.Context, req datasource.Conf
var apiClient *serverbackup.APIClient
var err error
if providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", providerData.ServerBackupCustomEndpoint)
if r.providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", r.providerData.ServerBackupCustomEndpoint)
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServerBackupCustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.ServerBackupCustomEndpoint),
)
} else {
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
)
}
@ -154,6 +156,11 @@ func (r *schedulesDataSource) Schema(_ context.Context, _ datasource.SchemaReque
},
},
},
"region": schema.StringAttribute{
// the region cannot be found, so it has to be passed
Optional: true,
Description: "The resource region. If not defined, the provider region is used.",
},
},
}
}
@ -164,6 +171,7 @@ type schedulesDataSourceModel struct {
ProjectId types.String `tfsdk:"project_id"`
ServerId types.String `tfsdk:"server_id"`
Items []schedulesDatasourceItemModel `tfsdk:"items"`
Region types.String `tfsdk:"region"`
}
// schedulesDatasourceItemModel maps schedule schema data.
@ -185,10 +193,17 @@ func (r *schedulesDataSource) Read(ctx context.Context, req datasource.ReadReque
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
var region string
if utils.IsUndefined(model.Region) {
region = r.providerData.GetRegion()
} else {
region = model.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "region", region)
schedules, err := r.client.ListBackupSchedules(ctx, projectId, serverId).Execute()
schedules, err := r.client.ListBackupSchedules(ctx, projectId, serverId, region).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if ok && oapiErr.StatusCode == http.StatusNotFound {
@ -199,7 +214,7 @@ func (r *schedulesDataSource) Read(ctx context.Context, req datasource.ReadReque
}
// Map response body to schema
err = mapSchedulesDatasourceFields(ctx, schedules, &model)
err = mapSchedulesDatasourceFields(ctx, schedules, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading server backup schedules", fmt.Sprintf("Processing API payload: %v", err))
return
@ -214,7 +229,7 @@ func (r *schedulesDataSource) Read(ctx context.Context, req datasource.ReadReque
tflog.Info(ctx, "Server backup schedules read")
}
func mapSchedulesDatasourceFields(ctx context.Context, schedules *serverbackup.GetBackupSchedulesResponse, model *schedulesDataSourceModel) error {
func mapSchedulesDatasourceFields(ctx context.Context, schedules *serverbackup.GetBackupSchedulesResponse, model *schedulesDataSourceModel, region string) error {
if schedules == nil {
return fmt.Errorf("response input is nil")
}
@ -226,10 +241,11 @@ func mapSchedulesDatasourceFields(ctx context.Context, schedules *serverbackup.G
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
idParts := []string{projectId, serverId}
idParts := []string{projectId, region, serverId}
model.ID = types.StringValue(
strings.Join(idParts, core.Separator),
)
model.Region = types.StringValue(region)
for _, schedule := range *schedules.Items {
scheduleState := schedulesDatasourceItemModel{

View file

@ -29,10 +29,11 @@ func TestMapSchedulesDataSourceFields(t *testing.T) {
Items: &[]sdk.BackupSchedule{},
},
schedulesDataSourceModel{
ID: types.StringValue("project_uid,server_uid"),
ID: types.StringValue("project_uid,eu01,server_uid"),
ProjectId: types.StringValue("project_uid"),
ServerId: types.StringValue("server_uid"),
Items: nil,
Region: types.StringValue("eu01"),
},
true,
},
@ -54,7 +55,7 @@ func TestMapSchedulesDataSourceFields(t *testing.T) {
},
},
schedulesDataSourceModel{
ID: types.StringValue("project_uid,server_uid"),
ID: types.StringValue("project_uid,eu01,server_uid"),
ServerId: types.StringValue("server_uid"),
ProjectId: types.StringValue("project_uid"),
Items: []schedulesDatasourceItemModel{
@ -70,6 +71,7 @@ func TestMapSchedulesDataSourceFields(t *testing.T) {
},
},
},
Region: types.StringValue("eu01"),
},
true,
},
@ -87,7 +89,7 @@ func TestMapSchedulesDataSourceFields(t *testing.T) {
ServerId: tt.expected.ServerId,
}
ctx := context.TODO()
err := mapSchedulesDatasourceFields(ctx, tt.input, state)
err := mapSchedulesDatasourceFields(ctx, tt.input, state, "eu01")
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}

View file

@ -22,7 +22,7 @@ var serverBackupScheduleResource = map[string]string{
"project_id": testutil.ProjectId,
"server_id": testutil.ServerId,
"backup_schedule_name": testutil.ResourceNameWithDateTime("server-backup-schedule"),
"rrule": "DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1",
"rrule": "DTSTART;TZID=Europe/Berlin:20250325T080000 RRULE:FREQ=DAILY;INTERVAL=1;COUNT=3",
"backup_name": testutil.ResourceNameWithDateTime("server-backup-schedule-backup"),
}
@ -118,12 +118,14 @@ func TestAccServerBackupScheduleResource(t *testing.T) {
data "stackit_server_backup_schedules" "schedules_data_test" {
project_id = stackit_server_backup_schedule.test_schedule.project_id
server_id = stackit_server_backup_schedule.test_schedule.server_id
region = stackit_server_backup_schedule.test_schedule.region
}
data "stackit_server_backup_schedule" "schedule_data_test" {
project_id = stackit_server_backup_schedule.test_schedule.project_id
server_id = stackit_server_backup_schedule.test_schedule.server_id
backup_schedule_id = stackit_server_backup_schedule.test_schedule.backup_schedule_id
region = stackit_server_backup_schedule.test_schedule.region
}`,
resourceConfig(validRetentionPeriod),
),
@ -156,7 +158,7 @@ func TestAccServerBackupScheduleResource(t *testing.T) {
if !ok {
return "", fmt.Errorf("couldn't find attribute backup_schedule_id")
}
return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, testutil.ServerId, scheduleId), nil
return fmt.Sprintf("%s,%s,%s,%s", testutil.ProjectId, testutil.Region, testutil.ServerId, scheduleId), nil
},
ImportState: true,
ImportStateVerify: true,
@ -187,9 +189,7 @@ func testAccCheckServerBackupScheduleDestroy(s *terraform.State) error {
var client *serverbackup.APIClient
var err error
if testutil.ServerBackupCustomEndpoint == "" {
client, err = serverbackup.NewAPIClient(
config.WithRegion("eu01"),
)
client, err = serverbackup.NewAPIClient()
} else {
client, err = serverbackup.NewAPIClient(
config.WithEndpoint(testutil.ServerBackupCustomEndpoint),
@ -205,11 +205,11 @@ func testAccCheckServerBackupScheduleDestroy(s *terraform.State) error {
continue
}
// server backup schedule terraform ID: "[project_id],[server_id],[backup_schedule_id]"
scheduleId := strings.Split(rs.Primary.ID, core.Separator)[2]
scheduleId := strings.Split(rs.Primary.ID, core.Separator)[3]
schedulesToDestroy = append(schedulesToDestroy, scheduleId)
}
schedulesResp, err := client.ListBackupSchedules(ctx, testutil.ProjectId, testutil.ServerId).Execute()
schedulesResp, err := client.ListBackupSchedules(ctx, testutil.ProjectId, testutil.ServerId, testutil.Region).Execute()
if err != nil {
return fmt.Errorf("getting schedulesResp: %w", err)
}
@ -221,7 +221,7 @@ func testAccCheckServerBackupScheduleDestroy(s *terraform.State) error {
}
scheduleId := strconv.FormatInt(*schedules[i].Id, 10)
if utils.Contains(schedulesToDestroy, scheduleId) {
err := client.DeleteBackupScheduleExecute(ctx, testutil.ProjectId, testutil.ServerId, scheduleId)
err := client.DeleteBackupScheduleExecute(ctx, testutil.ProjectId, testutil.ServerId, scheduleId, testutil.Region)
if err != nil {
return fmt.Errorf("destroying server backup schedule %s during CheckDestroy: %w", scheduleId, err)
}