ref 624723: server backup schedules (#416)

Signed-off-by: Adrian Nackov <adrian.nackov@mail.schwarz>
This commit is contained in:
a_nackov 2024-06-26 13:51:06 +03:00 committed by GitHub
parent 7fbb13c0b6
commit b5eb8bd379
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 1852 additions and 0 deletions

View file

@ -27,6 +27,9 @@ generate-docs:
build:
@go build -o bin/terraform-provider-stackit
fmt:
@gofmt -s -w .
# TEST
test:
@echo "Running tests for the terraform provider"

View file

@ -0,0 +1,47 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "stackit_server_backup_schedule Data Source - stackit"
subcategory: ""
description: |-
Server backup schedule resource schema. Must have a region specified in the provider configuration.
---
# stackit_server_backup_schedule (Data Source)
Server backup schedule resource schema. Must have a `region` specified in the provider configuration.
## Example Usage
```terraform
data "stackit_server_backup_schedule" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
server_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
backup_schedule_id = xxxxx
}
```
<!-- schema generated by tfplugindocs -->
## Schema
### Required
- `backup_schedule_id` (Number) Backup schedule ID.
- `project_id` (String) STACKIT Project ID to which the server is associated.
- `server_id` (String) Server ID for the backup schedule.
### Read-Only
- `backup_properties` (Attributes) Backup schedule details for the backups. (see [below for nested schema](#nestedatt--backup_properties))
- `enabled` (Boolean) Is the backup schedule enabled or disabled.
- `id` (String) Terraform's internal resource identifier. It is structured as "`project_id`,`server_id`,`backup_schedule_id`".
- `name` (String) The schedule name.
- `rrule` (String) Backup schedule described in `rrule` (recurrence rule) format.
<a id="nestedatt--backup_properties"></a>
### Nested Schema for `backup_properties`
Read-Only:
- `name` (String)
- `retention_period` (Number)
- `volume_ids` (List of String)

View file

@ -0,0 +1,53 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "stackit_server_backup_schedules Data Source - stackit"
subcategory: ""
description: |-
Server backup schedule data source schema.
---
# stackit_server_backup_schedules (Data Source)
Server backup schedule data source schema.
## Example Usage
```terraform
data "stackit_server_backup_schedules" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
server_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
```
<!-- schema generated by tfplugindocs -->
## Schema
### Required
- `project_id` (String) STACKIT Project ID (UUID) to which the server is associated.
- `server_id` (String) Server ID (UUID) to which the backup schedule is associated.
### Read-Only
- `id` (String) Terraform's internal data source identifier. It is structured as "`project_id`,`server_id`".
- `items` (Attributes List) (see [below for nested schema](#nestedatt--items))
<a id="nestedatt--items"></a>
### Nested Schema for `items`
Read-Only:
- `backup_properties` (Attributes) Backup schedule details for the backups. (see [below for nested schema](#nestedatt--items--backup_properties))
- `backup_schedule_id` (Number)
- `enabled` (Boolean) Is the backup schedule enabled or disabled.
- `name` (String) The backup schedule name.
- `rrule` (String) Backup schedule described in `rrule` (recurrence rule) format.
<a id="nestedatt--items--backup_properties"></a>
### Nested Schema for `items.backup_properties`
Read-Only:
- `name` (String)
- `retention_period` (Number)
- `volume_ids` (List of String)

View file

@ -161,6 +161,7 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de
- `region` (String) Region will be used as the default location for regional services. Not all services require a region, some are global
- `resourcemanager_custom_endpoint` (String) Custom endpoint for the Resource Manager service
- `secretsmanager_custom_endpoint` (String) Custom endpoint for the Secrets Manager service
- `server_backup_custom_endpoint` (String) Custom endpoint for the Server Backup service
- `service_account_email` (String) Service account email. It can also be set using the environment variable STACKIT_SERVICE_ACCOUNT_EMAIL. It is required if you want to use the resource manager project resource.
- `service_account_key` (String) Service account key used for authentication. If set, the key flow will be used to authenticate all operations.
- `service_account_key_path` (String) Path for the service account key used for authentication. If set, the key flow will be used to authenticate all operations.

View file

@ -0,0 +1,60 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "stackit_server_backup_schedule Resource - stackit"
subcategory: ""
description: |-
Server backup schedule resource schema. Must have a region specified in the provider configuration.
---
# stackit_server_backup_schedule (Resource)
Server backup schedule resource schema. Must have a `region` specified in the provider configuration.
## Example Usage
```terraform
resource "stackit_server_backup_schedule" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
server_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
name = "example_backup_schedule_name"
rrule = "DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"
enabled = true
backup_properties = {
name = "example_backup_name"
retention_period = 14
volume_ids = null
}
}
```
<!-- schema generated by tfplugindocs -->
## Schema
### Required
- `enabled` (Boolean) Is the backup schedule enabled or disabled.
- `name` (String) The schedule name.
- `project_id` (String) STACKIT Project ID to which the server is associated.
- `rrule` (String) Backup schedule described in `rrule` (recurrence rule) format.
- `server_id` (String) Server ID for the backup schedule.
### Optional
- `backup_properties` (Attributes) Backup schedule details for the backups. (see [below for nested schema](#nestedatt--backup_properties))
### Read-Only
- `backup_schedule_id` (Number) Backup schedule ID.
- `id` (String) Terraform's internal resource identifier. It is structured as "`project_id`,`server_id`,`backup_schedule_id`".
<a id="nestedatt--backup_properties"></a>
### Nested Schema for `backup_properties`
Required:
- `name` (String)
- `retention_period` (Number)
Optional:
- `volume_ids` (List of String)

View file

@ -0,0 +1,5 @@
data "stackit_server_backup_schedule" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
server_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
backup_schedule_id = xxxxx
}

View file

@ -0,0 +1,4 @@
data "stackit_server_backup_schedules" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
server_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}

View file

@ -0,0 +1,12 @@
resource "stackit_server_backup_schedule" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
server_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
name = "example_backup_schedule_name"
rrule = "DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"
enabled = true
backup_properties = {
name = "example_backup_name"
retention_period = 14
volume_ids = null
}
}

2
go.mod
View file

@ -27,8 +27,10 @@ require (
github.com/stackitcloud/stackit-sdk-go/services/redis v0.15.0
github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.9.0
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.8.0
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v0.1.0
github.com/stackitcloud/stackit-sdk-go/services/ske v0.16.0
github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v0.2.0
github.com/teambition/rrule-go v1.8.2
golang.org/x/mod v0.18.0
)

4
go.sum
View file

@ -176,6 +176,8 @@ github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.9.0 h1:qCbvGq
github.com/stackitcloud/stackit-sdk-go/services/resourcemanager v0.9.0/go.mod h1:p16qz/pAW8b1gEhqMpIgJfutRPeDPqQLlbVGyCo3f8o=
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.8.0 h1:pJBG455kmtbQFpCxcBfBK8wOuEnmsMv3h90LFcdj3q0=
github.com/stackitcloud/stackit-sdk-go/services/secretsmanager v0.8.0/go.mod h1:LX0Mcyr7/QP77zf7e05fHCJO38RMuTxr7nEDUDZ3oPQ=
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v0.1.0 h1:fYCBNvh4tqE+DXYDfbJEjC3n/I78zTZajdcPTPB/yig=
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v0.1.0/go.mod h1:ZYI3wj/NnhhWi25ugbdcniwnY/7mF6zN582c5HPe00o=
github.com/stackitcloud/stackit-sdk-go/services/ske v0.16.0 h1:trrJuRMzgXu6fiiMZiUx6+A1FNKEFhA1vGq5cr5Qn3U=
github.com/stackitcloud/stackit-sdk-go/services/ske v0.16.0/go.mod h1:0fFs4R7kg+gU7FNAIzzFvlCZJz6gyZ8CFhbK3eSrAwQ=
github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v0.2.0 h1:aIXxXx6u4+6C02MPb+hdItigeKeen7m+hEEG+Ej9sNs=
@ -184,6 +186,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8=
github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=

View file

@ -33,6 +33,7 @@ type ProviderData struct {
ResourceManagerCustomEndpoint string
SecretsManagerCustomEndpoint string
SQLServerFlexCustomEndpoint string
ServerBackupCustomEndpoint string
SKECustomEndpoint string
EnableBetaResources bool
}

View file

@ -0,0 +1,551 @@
package schedule
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/hashicorp/terraform-plugin-framework-validators/int64validator"
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
"github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
"github.com/stackitcloud/stackit-sdk-go/services/serverbackup"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &scheduleResource{}
_ resource.ResourceWithConfigure = &scheduleResource{}
_ resource.ResourceWithImportState = &scheduleResource{}
)
type Model struct {
ID types.String `tfsdk:"id"`
ProjectId types.String `tfsdk:"project_id"`
ServerId types.String `tfsdk:"server_id"`
BackupScheduleId types.Int64 `tfsdk:"backup_schedule_id"`
Name types.String `tfsdk:"name"`
Rrule types.String `tfsdk:"rrule"`
Enabled types.Bool `tfsdk:"enabled"`
BackupProperties *scheduleBackupPropertiesModel `tfsdk:"backup_properties"`
}
// scheduleBackupPropertiesModel maps schedule backup_properties data
type scheduleBackupPropertiesModel struct {
BackupName types.String `tfsdk:"name"`
RetentionPeriod types.Int64 `tfsdk:"retention_period"`
VolumeIds types.List `tfsdk:"volume_ids"`
}
// NewScheduleResource is a helper function to simplify the provider implementation.
func NewScheduleResource() resource.Resource {
return &scheduleResource{}
}
// scheduleResource is the resource implementation.
type scheduleResource struct {
client *serverbackup.APIClient
}
// Metadata returns the resource type name.
func (r *scheduleResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_server_backup_schedule"
}
// Configure adds the provider configured client to the resource.
func (r *scheduleResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Prevent panic if the provider has not been configured.
if req.ProviderData == nil {
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
var apiClient *serverbackup.APIClient
var err error
if providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", providerData.ServerBackupCustomEndpoint)
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServerBackupCustomEndpoint),
)
} else {
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.Region),
)
}
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err))
return
}
r.client = apiClient
tflog.Info(ctx, "Server backup client configured.")
}
// Schema defines the schema for the resource.
func (r *scheduleResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Description: "Server backup schedule resource schema. Must have a `region` specified in the provider configuration.",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal resource identifier. It is structured as \"`project_id`,`server_id`,`backup_schedule_id`\".",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"name": schema.StringAttribute{
Description: "The schedule name.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
stringvalidator.LengthBetween(1, 255),
},
},
"backup_schedule_id": schema.Int64Attribute{
Description: "Backup schedule ID.",
Computed: true,
PlanModifiers: []planmodifier.Int64{
int64planmodifier.UseStateForUnknown(),
},
Validators: []validator.Int64{
int64validator.AtLeast(1),
},
},
"project_id": schema.StringAttribute{
Description: "STACKIT Project ID to which the server is associated.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"server_id": schema.StringAttribute{
Description: "Server ID for the backup schedule.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"rrule": schema.StringAttribute{
Description: "Backup schedule described in `rrule` (recurrence rule) format.",
Required: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
stringplanmodifier.UseStateForUnknown(),
},
Validators: []validator.String{
validate.Rrule(),
validate.NoSeparator(),
},
},
"enabled": schema.BoolAttribute{
Description: "Is the backup schedule enabled or disabled.",
Required: true,
},
"backup_properties": schema.SingleNestedAttribute{
Description: "Backup schedule details for the backups.",
Optional: true,
Attributes: map[string]schema.Attribute{
"volume_ids": schema.ListAttribute{
ElementType: types.StringType,
Optional: true,
},
"name": schema.StringAttribute{
Required: true,
},
"retention_period": schema.Int64Attribute{
Required: true,
Validators: []validator.Int64{
int64validator.AtLeast(1),
},
},
},
},
},
}
}
// Create creates the resource and sets the initial Terraform state.
func (r *scheduleResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
// Enable backups if not already enabled
err := enableBackupsService(ctx, &model, r.client)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Enabling server backup project before creation: %v", err))
return
}
// Create new schedule
payload, err := toCreatePayload(&model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Creating API payload: %v", err))
return
}
scheduleResp, err := r.client.CreateBackupSchedule(ctx, projectId, serverId).CreateBackupSchedulePayload(*payload).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
ctx = tflog.SetField(ctx, "backup_schedule_id", *scheduleResp.Id)
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
}
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedule created.")
}
// Read refreshes the Terraform state with the latest data.
func (r *scheduleResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
scheduleResp, err := r.client.GetBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if ok && oapiErr.StatusCode == http.StatusNotFound {
resp.State.RemoveResource(ctx)
return
}
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
}
// Set refreshed state
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedule read.")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *scheduleResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
// Update schedule
payload, err := toUpdatePayload(&model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Creating API payload: %v", err))
return
}
scheduleResp, err := r.client.UpdateBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).UpdateBackupSchedulePayload(*payload).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
}
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedule updated.")
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *scheduleResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
err := r.client.DeleteBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
tflog.Info(ctx, "Server backup schedule deleted.")
// Disable backups service in case there are no backups and no backup schedules.
err = disableBackupsService(ctx, &model, r.client)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting server backup schedule", fmt.Sprintf("Disabling server backup service after deleting schedule: %v", err))
return
}
}
// ImportState imports a resource into the Terraform state on success.
// The expected format of the resource import identifier is: // project_id,server_id,schedule_id
func (r *scheduleResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing server backup schedule",
fmt.Sprintf("Expected import identifier with format [project_id],[server_id],[backup_schedule_id], got %q", req.ID),
)
return
}
intId, err := strconv.ParseInt(idParts[2], 10, 64)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing server backup schedule",
fmt.Sprintf("Expected backup_schedule_id to be int64, got %q", idParts[2]),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("server_id"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("backup_schedule_id"), intId)...)
tflog.Info(ctx, "Server backup schedule state imported.")
}
func mapFields(ctx context.Context, schedule *serverbackup.BackupSchedule, model *Model) error {
if schedule == nil {
return fmt.Errorf("response input is nil")
}
if model == nil {
return fmt.Errorf("model input is nil")
}
if schedule.Id == nil {
return fmt.Errorf("response id is nil")
}
model.BackupScheduleId = types.Int64PointerValue(schedule.Id)
idParts := []string{
model.ProjectId.ValueString(),
model.ServerId.ValueString(),
strconv.FormatInt(model.BackupScheduleId.ValueInt64(), 10),
}
model.ID = types.StringValue(
strings.Join(idParts, core.Separator),
)
model.Name = types.StringPointerValue(schedule.Name)
model.Rrule = types.StringPointerValue(schedule.Rrule)
model.Enabled = types.BoolPointerValue(schedule.Enabled)
if schedule.BackupProperties == nil {
model.BackupProperties = nil
return nil
}
ids, diags := types.ListValueFrom(ctx, types.StringType, schedule.BackupProperties.VolumeIds)
if diags.HasError() {
return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags))
}
model.BackupProperties = &scheduleBackupPropertiesModel{
BackupName: types.StringValue(*schedule.BackupProperties.Name),
RetentionPeriod: types.Int64Value(*schedule.BackupProperties.RetentionPeriod),
VolumeIds: ids,
}
return nil
}
// If already enabled, just continues
func enableBackupsService(ctx context.Context, model *Model, client *serverbackup.APIClient) error {
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
enableServicePayload := serverbackup.EnableServicePayload{}
tflog.Debug(ctx, "Enabling server backup service")
err := client.EnableService(ctx, projectId, serverId).EnableServicePayload(enableServicePayload).Execute()
if err != nil {
if strings.Contains(err.Error(), "Tried to activate already active service") {
tflog.Debug(ctx, "Service for server backup already enabled")
return nil
}
return fmt.Errorf("Failed to enable server backup service: %w", err)
}
tflog.Info(ctx, "Enabled server backup service")
return nil
}
// Disables only if no backup schedules are present and no backups are present
func disableBackupsService(ctx context.Context, model *Model, client *serverbackup.APIClient) error {
tflog.Debug(ctx, "Disabling server backup service (in case there are no backups and no backup schedules)")
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
tflog.Debug(ctx, "Checking for existing backup schedules")
schedules, err := client.ListBackupSchedules(ctx, projectId, serverId).Execute()
if err != nil {
return fmt.Errorf("list existing backup schedules: %w", err)
}
if *schedules.Items != nil && len(*schedules.Items) > 0 {
tflog.Debug(ctx, "Backup schedules found - will not disable server backup service")
return nil
}
tflog.Debug(ctx, "Checking for existing backups")
backups, err := client.ListBackups(ctx, projectId, serverId).Execute()
if err != nil {
return fmt.Errorf("list backups: %w", err)
}
if *backups.Items != nil && len(*backups.Items) > 0 {
tflog.Debug(ctx, "Backups found - will not disable server backup service")
return nil
}
err = client.DisableService(ctx, projectId, serverId).Execute()
if err != nil {
return fmt.Errorf("disable server backup service: %w", err)
}
tflog.Info(ctx, "Disabled server backup service")
return nil
}
func toCreatePayload(model *Model) (*serverbackup.CreateBackupSchedulePayload, error) {
if model == nil {
return nil, fmt.Errorf("nil model")
}
backupProperties := serverbackup.BackupProperties{}
if model.BackupProperties != nil {
ids := []string{}
var err error
if !(model.BackupProperties.VolumeIds.IsNull() || model.BackupProperties.VolumeIds.IsUnknown()) {
ids, err = utils.ListValuetoStringSlice(model.BackupProperties.VolumeIds)
if err != nil {
return nil, fmt.Errorf("Error by converting volume id: %w", err)
}
}
// we should provide null to the API in case no volumeIds were chosen, else it errors
if len(ids) == 0 {
ids = nil
}
backupProperties = serverbackup.BackupProperties{
Name: conversion.StringValueToPointer(model.BackupProperties.BackupName),
RetentionPeriod: conversion.Int64ValueToPointer(model.BackupProperties.RetentionPeriod),
VolumeIds: &ids,
}
}
return &serverbackup.CreateBackupSchedulePayload{
Enabled: conversion.BoolValueToPointer(model.Enabled),
Name: conversion.StringValueToPointer(model.Name),
Rrule: conversion.StringValueToPointer(model.Rrule),
BackupProperties: &backupProperties,
}, nil
}
func toUpdatePayload(model *Model) (*serverbackup.UpdateBackupSchedulePayload, error) {
if model == nil {
return nil, fmt.Errorf("nil model")
}
backupProperties := serverbackup.BackupProperties{}
if model.BackupProperties != nil {
ids := []string{}
var err error
if !(model.BackupProperties.VolumeIds.IsNull() || model.BackupProperties.VolumeIds.IsUnknown()) {
ids, err = utils.ListValuetoStringSlice(model.BackupProperties.VolumeIds)
if err != nil {
return nil, fmt.Errorf("Error by converting volume id: %w", err)
}
}
// we should provide null to the API in case no volumeIds were chosen, else it errors
if len(ids) == 0 {
ids = nil
}
backupProperties = serverbackup.BackupProperties{
Name: conversion.StringValueToPointer(model.BackupProperties.BackupName),
RetentionPeriod: conversion.Int64ValueToPointer(model.BackupProperties.RetentionPeriod),
VolumeIds: &ids,
}
}
return &serverbackup.UpdateBackupSchedulePayload{
Enabled: conversion.BoolValueToPointer(model.Enabled),
Name: conversion.StringValueToPointer(model.Name),
Rrule: conversion.StringValueToPointer(model.Rrule),
BackupProperties: &backupProperties,
}, nil
}

View file

@ -0,0 +1,237 @@
package schedule
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/stackitcloud/stackit-sdk-go/core/utils"
sdk "github.com/stackitcloud/stackit-sdk-go/services/serverbackup"
)
func TestMapFields(t *testing.T) {
tests := []struct {
description string
input *sdk.BackupSchedule
expected Model
isValid bool
}{
{
"default_values",
&sdk.BackupSchedule{
Id: utils.Ptr(int64(5)),
},
Model{
ID: types.StringValue("project_uid,server_uid,5"),
ProjectId: types.StringValue("project_uid"),
ServerId: types.StringValue("server_uid"),
BackupScheduleId: types.Int64Value(5),
},
true,
},
{
"simple_values",
&sdk.BackupSchedule{
Id: utils.Ptr(int64(5)),
Enabled: utils.Ptr(true),
Name: utils.Ptr("backup_schedule_name_1"),
Rrule: utils.Ptr("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
BackupProperties: &sdk.BackupProperties{
Name: utils.Ptr("backup_name_1"),
RetentionPeriod: utils.Ptr(int64(3)),
VolumeIds: &[]string{"uuid1", "uuid2"},
},
},
Model{
ServerId: types.StringValue("server_uid"),
ProjectId: types.StringValue("project_uid"),
BackupScheduleId: types.Int64Value(5),
ID: types.StringValue("project_uid,server_uid,5"),
Name: types.StringValue("backup_schedule_name_1"),
Rrule: types.StringValue("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
Enabled: types.BoolValue(true),
BackupProperties: &scheduleBackupPropertiesModel{
BackupName: types.StringValue("backup_name_1"),
RetentionPeriod: types.Int64Value(3),
VolumeIds: listValueFrom([]string{"uuid1", "uuid2"}),
},
},
true,
},
{
"nil_response",
nil,
Model{},
false,
},
{
"no_resource_id",
&sdk.BackupSchedule{},
Model{},
false,
},
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
state := &Model{
ProjectId: tt.expected.ProjectId,
ServerId: tt.expected.ServerId,
}
ctx := context.TODO()
err := mapFields(ctx, tt.input, state)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}
if tt.isValid && err != nil {
t.Fatalf("Should not have failed: %v", err)
}
if tt.isValid {
diff := cmp.Diff(state, &tt.expected)
if diff != "" {
t.Fatalf("Data does not match: %s", diff)
}
}
})
}
}
func TestToCreatePayload(t *testing.T) {
tests := []struct {
description string
input *Model
expected *sdk.CreateBackupSchedulePayload
isValid bool
}{
{
"default_values",
&Model{},
&sdk.CreateBackupSchedulePayload{
BackupProperties: &sdk.BackupProperties{},
},
true,
},
{
"simple_values",
&Model{
Name: types.StringValue("name"),
Enabled: types.BoolValue(true),
Rrule: types.StringValue("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
BackupProperties: nil,
},
&sdk.CreateBackupSchedulePayload{
Name: utils.Ptr("name"),
Enabled: utils.Ptr(true),
Rrule: utils.Ptr("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
BackupProperties: &sdk.BackupProperties{},
},
true,
},
{
"null_fields_and_int_conversions",
&Model{
Name: types.StringValue(""),
Rrule: types.StringValue(""),
},
&sdk.CreateBackupSchedulePayload{
BackupProperties: &sdk.BackupProperties{},
Name: utils.Ptr(""),
Rrule: utils.Ptr(""),
},
true,
},
{
"nil_model",
nil,
nil,
false,
},
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
output, err := toCreatePayload(tt.input)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}
if tt.isValid && err != nil {
t.Fatalf("Should not have failed: %v", err)
}
if tt.isValid {
diff := cmp.Diff(output, tt.expected)
if diff != "" {
t.Fatalf("Data does not match: %s", diff)
}
}
})
}
}
func TestToUpdatePayload(t *testing.T) {
tests := []struct {
description string
input *Model
expected *sdk.UpdateBackupSchedulePayload
isValid bool
}{
{
"default_values",
&Model{},
&sdk.UpdateBackupSchedulePayload{
BackupProperties: &sdk.BackupProperties{},
},
true,
},
{
"simple_values",
&Model{
Name: types.StringValue("name"),
Enabled: types.BoolValue(true),
Rrule: types.StringValue("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
BackupProperties: nil,
},
&sdk.UpdateBackupSchedulePayload{
Name: utils.Ptr("name"),
Enabled: utils.Ptr(true),
Rrule: utils.Ptr("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
BackupProperties: &sdk.BackupProperties{},
},
true,
},
{
"null_fields_and_int_conversions",
&Model{
Name: types.StringValue(""),
Rrule: types.StringValue(""),
},
&sdk.UpdateBackupSchedulePayload{
BackupProperties: &sdk.BackupProperties{},
Name: utils.Ptr(""),
Rrule: utils.Ptr(""),
},
true,
},
{
"nil_model",
nil,
nil,
false,
},
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
output, err := toUpdatePayload(tt.input)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}
if tt.isValid && err != nil {
t.Fatalf("Should not have failed: %v", err)
}
if tt.isValid {
diff := cmp.Diff(output, tt.expected)
if diff != "" {
t.Fatalf("Data does not match: %s", diff)
}
}
})
}
}

View file

@ -0,0 +1,180 @@
package schedule
import (
"context"
"fmt"
"net/http"
"strconv"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
"github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
"github.com/stackitcloud/stackit-sdk-go/services/serverbackup"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &scheduleDataSource{}
)
// NewScheduleDataSource is a helper function to simplify the provider implementation.
func NewScheduleDataSource() datasource.DataSource {
return &scheduleDataSource{}
}
// scheduleDataSource is the data source implementation.
type scheduleDataSource struct {
client *serverbackup.APIClient
}
// Metadata returns the data source type name.
func (r *scheduleDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_server_backup_schedule"
}
// Configure adds the provider configured client to the data source.
func (r *scheduleDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Prevent panic if the provider has not been configured.
if req.ProviderData == nil {
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
var apiClient *serverbackup.APIClient
var err error
if providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", providerData.ServerBackupCustomEndpoint)
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServerBackupCustomEndpoint),
)
} else {
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.Region),
)
}
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the data source configuration", err))
return
}
r.client = apiClient
tflog.Info(ctx, "Server backup client configured")
}
// Schema defines the schema for the data source.
func (r *scheduleDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Description: "Server backup schedule resource schema. Must have a `region` specified in the provider configuration.",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal resource identifier. It is structured as \"`project_id`,`server_id`,`backup_schedule_id`\".",
Computed: true,
},
"name": schema.StringAttribute{
Description: "The schedule name.",
Computed: true,
},
"backup_schedule_id": schema.Int64Attribute{
Description: "Backup schedule ID.",
Required: true,
},
"project_id": schema.StringAttribute{
Description: "STACKIT Project ID to which the server is associated.",
Required: true,
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"server_id": schema.StringAttribute{
Description: "Server ID for the backup schedule.",
Required: true,
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"rrule": schema.StringAttribute{
Description: "Backup schedule described in `rrule` (recurrence rule) format.",
Computed: true,
},
"enabled": schema.BoolAttribute{
Description: "Is the backup schedule enabled or disabled.",
Computed: true,
},
"backup_properties": schema.SingleNestedAttribute{
Description: "Backup schedule details for the backups.",
Computed: true,
Attributes: map[string]schema.Attribute{
"volume_ids": schema.ListAttribute{
ElementType: types.StringType,
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"retention_period": schema.Int64Attribute{
Computed: true,
},
},
},
},
}
}
// Read refreshes the Terraform state with the latest data.
func (r *scheduleDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.Config.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
backupScheduleId := model.BackupScheduleId.ValueInt64()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
ctx = tflog.SetField(ctx, "backup_schedule_id", backupScheduleId)
scheduleResp, err := r.client.GetBackupSchedule(ctx, projectId, serverId, strconv.FormatInt(backupScheduleId, 10)).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if ok && oapiErr.StatusCode == http.StatusNotFound {
resp.State.RemoveResource(ctx)
}
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading server backup schedule", fmt.Sprintf("Calling API: %v", err))
return
}
// Map response body to schema
err = mapFields(ctx, scheduleResp, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading server backup schedule", fmt.Sprintf("Processing API payload: %v", err))
return
}
// Set refreshed state
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedule read")
}

View file

@ -0,0 +1,238 @@
package schedule
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
"github.com/stackitcloud/stackit-sdk-go/services/serverbackup"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &schedulesDataSource{}
)
// NewSchedulesDataSource is a helper function to simplify the provider implementation.
func NewSchedulesDataSource() datasource.DataSource {
return &schedulesDataSource{}
}
// schedulesDataSource is the data source implementation.
type schedulesDataSource struct {
client *serverbackup.APIClient
}
// Metadata returns the data source type name.
func (r *schedulesDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_server_backup_schedules"
}
// Configure adds the provider configured client to the data source.
func (r *schedulesDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Prevent panic if the provider has not been configured.
if req.ProviderData == nil {
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
var apiClient *serverbackup.APIClient
var err error
if providerData.ServerBackupCustomEndpoint != "" {
ctx = tflog.SetField(ctx, "server_backup_custom_endpoint", providerData.ServerBackupCustomEndpoint)
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServerBackupCustomEndpoint),
)
} else {
apiClient, err = serverbackup.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.Region),
)
}
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the data source configuration", err))
return
}
r.client = apiClient
tflog.Info(ctx, "Server backup client configured")
}
// Schema defines the schema for the data source.
func (r *schedulesDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Description: "Server backup schedule data source schema.",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal data source identifier. It is structured as \"`project_id`,`server_id`\".",
Computed: true,
},
"project_id": schema.StringAttribute{
Description: "STACKIT Project ID (UUID) to which the server is associated.",
Required: true,
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"server_id": schema.StringAttribute{
Description: "Server ID (UUID) to which the backup schedule is associated.",
Required: true,
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
"items": schema.ListNestedAttribute{
Computed: true,
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"backup_schedule_id": schema.Int64Attribute{
Computed: true,
},
"name": schema.StringAttribute{
Description: "The backup schedule name.",
Computed: true,
},
"rrule": schema.StringAttribute{
Description: "Backup schedule described in `rrule` (recurrence rule) format.",
Computed: true,
},
"enabled": schema.BoolAttribute{
Description: "Is the backup schedule enabled or disabled.",
Computed: true,
},
"backup_properties": schema.SingleNestedAttribute{
Description: "Backup schedule details for the backups.",
Computed: true,
Attributes: map[string]schema.Attribute{
"volume_ids": schema.ListAttribute{
ElementType: types.StringType,
Computed: true,
},
"name": schema.StringAttribute{
Computed: true,
},
"retention_period": schema.Int64Attribute{
Computed: true,
},
},
},
},
},
},
},
}
}
// schedulesDataSourceModel maps the data source schema data.
type schedulesDataSourceModel struct {
ID types.String `tfsdk:"id"`
ProjectId types.String `tfsdk:"project_id"`
ServerId types.String `tfsdk:"server_id"`
Items []schedulesDatasourceItemModel `tfsdk:"items"`
}
// schedulesDatasourceItemModel maps schedule schema data.
type schedulesDatasourceItemModel struct {
BackupScheduleId types.Int64 `tfsdk:"backup_schedule_id"`
Name types.String `tfsdk:"name"`
Rrule types.String `tfsdk:"rrule"`
Enabled types.Bool `tfsdk:"enabled"`
BackupProperties *scheduleBackupPropertiesModel `tfsdk:"backup_properties"`
}
// Read refreshes the Terraform state with the latest data.
func (r *schedulesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var model schedulesDataSourceModel
diags := req.Config.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "server_id", serverId)
schedules, err := r.client.ListBackupSchedules(ctx, projectId, serverId).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if ok && oapiErr.StatusCode == http.StatusNotFound {
resp.State.RemoveResource(ctx)
}
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading server backup schedules", fmt.Sprintf("Calling API: %v", err))
return
}
// Map response body to schema
err = mapSchedulesDatasourceFields(ctx, schedules, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading server backup schedules", fmt.Sprintf("Processing API payload: %v", err))
return
}
// Set refreshed state
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "Server backup schedules read")
}
func mapSchedulesDatasourceFields(ctx context.Context, schedules *serverbackup.ListBackupSchedules200Response, model *schedulesDataSourceModel) error {
if schedules == nil {
return fmt.Errorf("response input is nil")
}
if model == nil {
return fmt.Errorf("model input is nil")
}
tflog.Debug(ctx, "response", map[string]any{"schedules": schedules})
projectId := model.ProjectId.ValueString()
serverId := model.ServerId.ValueString()
idParts := []string{projectId, serverId}
model.ID = types.StringValue(
strings.Join(idParts, core.Separator),
)
for _, schedule := range *schedules.Items {
scheduleState := schedulesDatasourceItemModel{
BackupScheduleId: types.Int64Value(*schedule.Id),
Name: types.StringValue(*schedule.Name),
Rrule: types.StringValue(*schedule.Rrule),
Enabled: types.BoolValue(*schedule.Enabled),
}
ids, diags := types.ListValueFrom(ctx, types.StringType, schedule.BackupProperties.VolumeIds)
if diags.HasError() {
return fmt.Errorf("failed to map hosts: %w", core.DiagsToError(diags))
}
scheduleState.BackupProperties = &scheduleBackupPropertiesModel{
BackupName: types.StringValue(*schedule.BackupProperties.Name),
RetentionPeriod: types.Int64Value(*schedule.BackupProperties.RetentionPeriod),
VolumeIds: ids,
}
model.Items = append(model.Items, scheduleState)
}
return nil
}

View file

@ -0,0 +1,105 @@
package schedule
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/stackitcloud/stackit-sdk-go/core/utils"
sdk "github.com/stackitcloud/stackit-sdk-go/services/serverbackup"
)
func listValueFrom(items []string) basetypes.ListValue {
val, _ := types.ListValueFrom(context.TODO(), types.StringType, items)
return val
}
func TestMapSchedulesDataSourceFields(t *testing.T) {
tests := []struct {
description string
input *sdk.ListBackupSchedules200Response
expected schedulesDataSourceModel
isValid bool
}{
{
"empty response",
&sdk.ListBackupSchedules200Response{
Items: &[]sdk.BackupSchedule{},
},
schedulesDataSourceModel{
ID: types.StringValue("project_uid,server_uid"),
ProjectId: types.StringValue("project_uid"),
ServerId: types.StringValue("server_uid"),
Items: nil,
},
true,
},
{
"simple_values",
&sdk.ListBackupSchedules200Response{
Items: &[]sdk.BackupSchedule{
{
Id: utils.Ptr(int64(5)),
Enabled: utils.Ptr(true),
Name: utils.Ptr("backup_schedule_name_1"),
Rrule: utils.Ptr("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
BackupProperties: &sdk.BackupProperties{
Name: utils.Ptr("backup_name_1"),
RetentionPeriod: utils.Ptr(int64(14)),
VolumeIds: &[]string{"uuid1", "uuid2"},
},
},
},
},
schedulesDataSourceModel{
ID: types.StringValue("project_uid,server_uid"),
ServerId: types.StringValue("server_uid"),
ProjectId: types.StringValue("project_uid"),
Items: []schedulesDatasourceItemModel{
{
BackupScheduleId: types.Int64Value(5),
Name: types.StringValue("backup_schedule_name_1"),
Rrule: types.StringValue("DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1"),
Enabled: types.BoolValue(true),
BackupProperties: &scheduleBackupPropertiesModel{
BackupName: types.StringValue("backup_name_1"),
RetentionPeriod: types.Int64Value(14),
VolumeIds: listValueFrom([]string{"uuid1", "uuid2"}),
},
},
},
},
true,
},
{
"nil_response",
nil,
schedulesDataSourceModel{},
false,
},
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
state := &schedulesDataSourceModel{
ProjectId: tt.expected.ProjectId,
ServerId: tt.expected.ServerId,
}
ctx := context.TODO()
err := mapSchedulesDatasourceFields(ctx, tt.input, state)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}
if tt.isValid && err != nil {
t.Fatalf("Should not have failed: %v", err)
}
if tt.isValid {
diff := cmp.Diff(state, &tt.expected)
if diff != "" {
t.Fatalf("Data does not match: %s", diff)
}
}
})
}
}

View file

@ -0,0 +1,231 @@
package serverbackup_test
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"testing"
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-plugin-testing/terraform"
"github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/core/utils"
"github.com/stackitcloud/stackit-sdk-go/services/serverbackup"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/testutil"
)
// Server backup schedule resource data
var serverBackupScheduleResource = map[string]string{
"project_id": testutil.ProjectId,
"server_id": testutil.ServerId,
"backup_schedule_name": testutil.ResourceNameWithDateTime("server-backup-schedule"),
"rrule": "DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1",
"backup_name": testutil.ResourceNameWithDateTime("server-backup-schedule-backup"),
}
func resourceConfig(retentionPeriod int64) string {
return fmt.Sprintf(`
%s
resource "stackit_server_backup_schedule" "test_schedule" {
project_id = "%s"
server_id = "%s"
name = "%s"
rrule = "%s"
enabled = true
backup_properties = {
name = "%s"
retention_period = %d
volume_ids = null
}
}
`,
testutil.ServerBackupProviderConfig(),
serverBackupScheduleResource["project_id"],
serverBackupScheduleResource["server_id"],
serverBackupScheduleResource["backup_schedule_name"],
serverBackupScheduleResource["rrule"],
serverBackupScheduleResource["backup_name"],
retentionPeriod,
)
}
func resourceConfigWithUpdate() string {
return fmt.Sprintf(`
%s
resource "stackit_server_backup_schedule" "test_schedule" {
project_id = "%s"
server_id = "%s"
name = "%s"
rrule = "%s"
enabled = false
backup_properties = {
name = "%s"
retention_period = 20
volume_ids = null
}
}
`,
testutil.ServerBackupProviderConfig(),
serverBackupScheduleResource["project_id"],
serverBackupScheduleResource["server_id"],
serverBackupScheduleResource["backup_schedule_name"],
serverBackupScheduleResource["rrule"],
serverBackupScheduleResource["backup_name"],
)
}
func TestAccServerBackupScheduleResource(t *testing.T) {
if testutil.ServerId == "" {
fmt.Println("TF_ACC_SERVER_ID not set, skipping test")
return
}
var invalidRetentionPeriod int64 = 0
var validRetentionPeriod int64 = 15
resource.Test(t, resource.TestCase{
ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
CheckDestroy: testAccCheckServerBackupScheduleDestroy,
Steps: []resource.TestStep{
// Creation fail
{
Config: resourceConfig(invalidRetentionPeriod),
ExpectError: regexp.MustCompile(`.*backup_properties.retention_period value must be at least 1*`),
},
// Creation
{
Config: resourceConfig(validRetentionPeriod),
Check: resource.ComposeAggregateTestCheckFunc(
// Backup schedule data
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "project_id", serverBackupScheduleResource["project_id"]),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "server_id", serverBackupScheduleResource["server_id"]),
resource.TestCheckResourceAttrSet("stackit_server_backup_schedule.test_schedule", "backup_schedule_id"),
resource.TestCheckResourceAttrSet("stackit_server_backup_schedule.test_schedule", "id"),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "name", serverBackupScheduleResource["backup_schedule_name"]),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "rrule", serverBackupScheduleResource["rrule"]),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "enabled", strconv.FormatBool(true)),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "backup_properties.name", serverBackupScheduleResource["backup_name"]),
),
},
// data source
{
Config: fmt.Sprintf(`
%s
data "stackit_server_backup_schedules" "schedules_data_test" {
project_id = stackit_server_backup_schedule.test_schedule.project_id
server_id = stackit_server_backup_schedule.test_schedule.server_id
}
data "stackit_server_backup_schedule" "schedule_data_test" {
project_id = stackit_server_backup_schedule.test_schedule.project_id
server_id = stackit_server_backup_schedule.test_schedule.server_id
backup_schedule_id = stackit_server_backup_schedule.test_schedule.backup_schedule_id
}`,
resourceConfig(validRetentionPeriod),
),
Check: resource.ComposeAggregateTestCheckFunc(
// Server backup schedule data
resource.TestCheckResourceAttr("data.stackit_server_backup_schedule.schedule_data_test", "project_id", serverBackupScheduleResource["project_id"]),
resource.TestCheckResourceAttr("data.stackit_server_backup_schedule.schedule_data_test", "server_id", serverBackupScheduleResource["server_id"]),
resource.TestCheckResourceAttrSet("data.stackit_server_backup_schedule.schedule_data_test", "backup_schedule_id"),
resource.TestCheckResourceAttrSet("data.stackit_server_backup_schedule.schedule_data_test", "id"),
resource.TestCheckResourceAttr("data.stackit_server_backup_schedule.schedule_data_test", "name", serverBackupScheduleResource["backup_schedule_name"]),
resource.TestCheckResourceAttr("data.stackit_server_backup_schedule.schedule_data_test", "rrule", serverBackupScheduleResource["rrule"]),
resource.TestCheckResourceAttr("data.stackit_server_backup_schedule.schedule_data_test", "enabled", strconv.FormatBool(true)),
resource.TestCheckResourceAttr("data.stackit_server_backup_schedule.schedule_data_test", "backup_properties.name", serverBackupScheduleResource["backup_name"]),
// Server backup schedules data
resource.TestCheckResourceAttr("data.stackit_server_backup_schedules.schedules_data_test", "project_id", serverBackupScheduleResource["project_id"]),
resource.TestCheckResourceAttr("data.stackit_server_backup_schedules.schedules_data_test", "server_id", serverBackupScheduleResource["server_id"]),
resource.TestCheckResourceAttrSet("data.stackit_server_backup_schedules.schedules_data_test", "id"),
),
},
// Import
{
ResourceName: "stackit_server_backup_schedule.test_schedule",
ImportStateIdFunc: func(s *terraform.State) (string, error) {
r, ok := s.RootModule().Resources["stackit_server_backup_schedule.test_schedule"]
if !ok {
return "", fmt.Errorf("couldn't find resource stackit_server_backup_schedule.test_schedule")
}
scheduleId, ok := r.Primary.Attributes["backup_schedule_id"]
if !ok {
return "", fmt.Errorf("couldn't find attribute backup_schedule_id")
}
return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, testutil.ServerId, scheduleId), nil
},
ImportState: true,
ImportStateVerify: true,
},
// Update
{
Config: resourceConfigWithUpdate(),
Check: resource.ComposeAggregateTestCheckFunc(
// Backup schedule data
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "project_id", serverBackupScheduleResource["project_id"]),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "server_id", serverBackupScheduleResource["server_id"]),
resource.TestCheckResourceAttrSet("stackit_server_backup_schedule.test_schedule", "backup_schedule_id"),
resource.TestCheckResourceAttrSet("stackit_server_backup_schedule.test_schedule", "id"),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "name", serverBackupScheduleResource["backup_schedule_name"]),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "rrule", serverBackupScheduleResource["rrule"]),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "enabled", strconv.FormatBool(false)),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "backup_properties.retention_period", strconv.FormatInt(20, 10)),
resource.TestCheckResourceAttr("stackit_server_backup_schedule.test_schedule", "backup_properties.name", serverBackupScheduleResource["backup_name"]),
),
},
// Deletion is done by the framework implicitly
},
})
}
func testAccCheckServerBackupScheduleDestroy(s *terraform.State) error {
ctx := context.Background()
var client *serverbackup.APIClient
var err error
if testutil.ServerBackupCustomEndpoint == "" {
client, err = serverbackup.NewAPIClient(
config.WithRegion("eu01"),
)
} else {
client, err = serverbackup.NewAPIClient(
config.WithEndpoint(testutil.ServerBackupCustomEndpoint),
)
}
if err != nil {
return fmt.Errorf("creating client: %w", err)
}
schedulesToDestroy := []string{}
for _, rs := range s.RootModule().Resources {
if rs.Type != "stackit_server_backup_schedule" {
continue
}
// server backup schedule terraform ID: "[project_id],[server_id],[backup_schedule_id]"
scheduleId := strings.Split(rs.Primary.ID, core.Separator)[2]
schedulesToDestroy = append(schedulesToDestroy, scheduleId)
}
schedulesResp, err := client.ListBackupSchedules(ctx, testutil.ProjectId, testutil.ServerId).Execute()
if err != nil {
return fmt.Errorf("getting schedulesResp: %w", err)
}
schedules := *schedulesResp.Items
for i := range schedules {
if schedules[i].Id == nil {
continue
}
scheduleId := strconv.FormatInt(*schedules[i].Id, 10)
if utils.Contains(schedulesToDestroy, scheduleId) {
err := client.DeleteBackupScheduleExecute(ctx, testutil.ProjectId, testutil.ServerId, scheduleId)
if err != nil {
return fmt.Errorf("destroying server backup schedule %s during CheckDestroy: %w", scheduleId, err)
}
}
}
return nil
}

View file

@ -30,6 +30,8 @@ var (
// ProjectId is the id of project used for tests
ProjectId = os.Getenv("TF_ACC_PROJECT_ID")
// ServerId is the id of a server used for some tests
ServerId = getenv("TF_ACC_SERVER_ID", "")
// TestProjectParentContainerID is the container id of the parent resource under which projects are created as part of the resource-manager acceptance tests
TestProjectParentContainerID = os.Getenv("TF_ACC_TEST_PROJECT_PARENT_CONTAINER_ID")
// TestProjectParentContainerID is the uuid of the parent resource under which projects are created as part of the resource-manager acceptance tests
@ -53,6 +55,7 @@ var (
ResourceManagerCustomEndpoint = os.Getenv("TF_ACC_RESOURCEMANAGER_CUSTOM_ENDPOINT")
SecretsManagerCustomEndpoint = os.Getenv("TF_ACC_SECRETSMANAGER_CUSTOM_ENDPOINT")
SQLServerFlexCustomEndpoint = os.Getenv("TF_ACC_SQLSERVERFLEX_CUSTOM_ENDPOINT")
ServerBackupCustomEndpoint = os.Getenv("TF_ACC_SERVER_BACKUP_CUSTOM_ENDPOINT")
SKECustomEndpoint = os.Getenv("TF_ACC_SKE_CUSTOM_ENDPOINT")
// OpenStack user domain name
@ -310,6 +313,21 @@ func SQLServerFlexProviderConfig() string {
)
}
func ServerBackupProviderConfig() string {
if ServerBackupCustomEndpoint == "" {
return `
provider "stackit" {
region = "eu01"
}`
}
return fmt.Sprintf(`
provider "stackit" {
server_backup_custom_endpoint = "%s"
}`,
ServerBackupCustomEndpoint,
)
}
func SKEProviderConfig() string {
if SKECustomEndpoint == "" {
return `
@ -373,3 +391,11 @@ func readTestTokenFromCredentialsFile(path string) (string, error) {
}
return credentials.TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_TOKEN, nil
}
func getenv(key, defaultValue string) string {
val := os.Getenv(key)
if val == "" {
return defaultValue
}
return val
}

View file

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/teambition/rrule-go"
)
const (
@ -225,3 +226,30 @@ func CIDR() *Validator {
},
}
}
func Rrule() *Validator {
description := "value must be in a valid RRULE format"
return &Validator{
description: description,
validate: func(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) {
// The go library rrule-go expects \n before RRULE (to be a newline and not a space)
// for example: "DTSTART;TZID=America/New_York:19970902T090000\nRRULE:FREQ=DAILY;COUNT=10"
// whereas a valid rrule according to the API docs is:
// for example: "DTSTART;TZID=America/New_York:19970902T090000 RRULE:FREQ=DAILY;COUNT=10"
//
// So we will accept a ' ' (which is valid per API docs),
// but replace it with a '\n' for the rrule-go validations
value := req.ConfigValue.ValueString()
value = strings.ReplaceAll(value, " ", "\n")
if _, err := rrule.StrToRRuleSet(value); err != nil {
resp.Diagnostics.Append(validatordiag.InvalidAttributeValueDiagnostic(
req.Path,
description,
req.ConfigValue.ValueString(),
))
}
},
}
}

View file

@ -574,3 +574,57 @@ func TestCIDR(t *testing.T) {
})
}
}
func TestRrule(t *testing.T) {
tests := []struct {
description string
input string
isValid bool
}{
{
"ok",
"DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=1",
true,
},
{
"ok-2",
"DTSTART;TZID=Europe/Sofia:20200803T023000\nRULE:FREQ=DAILY;INTERVAL=1",
true,
},
{
"Empty",
"",
false,
},
{
"not ok",
"afssfdfs",
false,
},
{
"not ok-missing-space-before-rrule",
"DTSTART;TZID=Europe/Sofia:20200803T023000RRULE:FREQ=DAILY;INTERVAL=1",
false,
},
{
"not ok-missing-interval",
"DTSTART;TZID=Europe/Sofia:20200803T023000 RRULE:FREQ=DAILY;INTERVAL=",
false,
},
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
r := validator.StringResponse{}
Rrule().ValidateString(context.Background(), validator.StringRequest{
ConfigValue: types.StringValue(tt.input),
}, &r)
if !tt.isValid && !r.Diagnostics.HasError() {
t.Fatalf("Should have failed")
}
if tt.isValid && r.Diagnostics.HasError() {
t.Fatalf("Should not have failed: %v", r.Diagnostics.Errors())
}
})
}
}

View file

@ -40,6 +40,7 @@ import (
resourceManagerProject "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/resourcemanager/project"
secretsManagerInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/secretsmanager/instance"
secretsManagerUser "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/secretsmanager/user"
serverBackupSchedule "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/serverbackup/schedule"
skeCluster "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/cluster"
skeKubeconfig "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/kubeconfig"
skeProject "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/project"
@ -101,6 +102,7 @@ type providerModel struct {
SecretsManagerCustomEndpoint types.String `tfsdk:"secretsmanager_custom_endpoint"`
SQLServerFlexCustomEndpoint types.String `tfsdk:"sqlserverflex_custom_endpoint"`
SKECustomEndpoint types.String `tfsdk:"ske_custom_endpoint"`
ServerBackupCustomEndpoint types.String `tfsdk:"server_backup_custom_endpoint"`
ResourceManagerCustomEndpoint types.String `tfsdk:"resourcemanager_custom_endpoint"`
TokenCustomEndpoint types.String `tfsdk:"token_custom_endpoint"`
JWKSCustomEndpoint types.String `tfsdk:"jwks_custom_endpoint"`
@ -131,6 +133,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
"postgresql_custom_endpoint": "Custom endpoint for the PostgreSQL service",
"postgresflex_custom_endpoint": "Custom endpoint for the PostgresFlex service",
"redis_custom_endpoint": "Custom endpoint for the Redis service",
"server_backup_custom_endpoint": "Custom endpoint for the Server Backup service",
"resourcemanager_custom_endpoint": "Custom endpoint for the Resource Manager service",
"secretsmanager_custom_endpoint": "Custom endpoint for the Secrets Manager service",
"sqlserverflex_custom_endpoint": "Custom endpoint for the SQL Server Flex service",
@ -242,6 +245,10 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
Optional: true,
Description: descriptions["ske_custom_endpoint"],
},
"server_backup_custom_endpoint": schema.StringAttribute{
Optional: true,
Description: descriptions["server_backup_custom_endpoint"],
},
"token_custom_endpoint": schema.StringAttribute{
Optional: true,
Description: descriptions["token_custom_endpoint"],
@ -400,6 +407,8 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource
secretsManagerUser.NewUserDataSource,
sqlServerFlexInstance.NewInstanceDataSource,
sqlServerFlexUser.NewUserDataSource,
serverBackupSchedule.NewScheduleDataSource,
serverBackupSchedule.NewSchedulesDataSource,
skeProject.NewProjectDataSource,
skeCluster.NewClusterDataSource,
}
@ -441,6 +450,7 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource {
secretsManagerUser.NewUserResource,
sqlServerFlexInstance.NewInstanceResource,
sqlServerFlexUser.NewUserResource,
serverBackupSchedule.NewScheduleResource,
skeProject.NewProjectResource,
skeCluster.NewClusterResource,
skeKubeconfig.NewKubeconfigResource,