Object Storage: implement bucket (#45)
* Add object storage dependency * Add object storage * Add object storage * Implement bucket resource * Add map fields test * Fix typos * Implement data source * Add Object Storage bucket * Fix typo * Implement Object Storage acc tests * Go mod tidy * Reword description * Fix typos * Fix typo * Implement check destroy * Add region in check destroy * Add timeout in check destroy --------- Co-authored-by: Henrique Santos <henrique.santos@freiheit.com>
This commit is contained in:
parent
175ce93f85
commit
d926e2d559
9 changed files with 740 additions and 3 deletions
3
go.mod
3
go.mod
|
|
@ -15,7 +15,8 @@ require (
|
|||
github.com/stackitcloud/stackit-sdk-go/services/dns v0.2.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/logme v0.2.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.2.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.3.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.2.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.2.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.1.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/postgresql v0.3.0
|
||||
github.com/stackitcloud/stackit-sdk-go/services/rabbitmq v0.2.0
|
||||
|
|
|
|||
6
go.sum
6
go.sum
|
|
@ -130,8 +130,10 @@ github.com/stackitcloud/stackit-sdk-go/services/logme v0.2.0 h1:DzG8h4irPUWUxa+s
|
|||
github.com/stackitcloud/stackit-sdk-go/services/logme v0.2.0/go.mod h1:KkWI0vWfBHRepIrH0VT2e1xwf1WaaNb/zPfSTRXAXjs=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.2.0 h1:lnqYVQ1kjMDJSVvKJjZW8eY1adWXlCiX5jIXyn2lOzg=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.2.0/go.mod h1:uTotoums0lfsmlTQVP5bYz4wWmfWlLuFx9i1ZzAyAkA=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.3.0 h1:32mjinxJ7xfdlTKShmyvfMU7BXI11DiV+W4SdkRL63o=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.3.0/go.mod h1:syCy6+8GsJu9lHyhN0Qeg66AgTv5LjlgtppbOVFIaPc=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.2.0 h1:Nf7HFWSTrD/A1ZudSSu8LaaOYN+4607D1qWNRtZuo8Y=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.2.0/go.mod h1:H0B0VBzyW90ksuG+Bu9iqOan80paw+J6Ik9AZuiz9M0=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.2.0 h1:pONEMH+p9xR1ACDEH6E1Jl5dnU1DixoJGD2iPWQOVuI=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.2.0/go.mod h1:pTTPSxx/BpcAm0ttcH+g4AiovC+oT7lXqca2C1XOxzY=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.1.0 h1:vm3AgvTA6TaHd0WpmzKT6HY6fSLOJUNPslqeLByO5P4=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/postgresflex v0.1.0/go.mod h1:sPEBUNRxaEsyhVOQKZrUdebexVX/z1RbvUFXxXOrICo=
|
||||
github.com/stackitcloud/stackit-sdk-go/services/postgresql v0.3.0 h1:svBOTZ4yOG3LFqBQbUoSkY1v4GWH9fG3VqyO5hQqGVQ=
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ type ProviderData struct {
|
|||
LogMeCustomEndpoint string
|
||||
RabbitMQCustomEndpoint string
|
||||
MariaDBCustomEndpoint string
|
||||
ObjectStorageCustomEndpoint string
|
||||
OpenSearchCustomEndpoint string
|
||||
RedisCustomEndpoint string
|
||||
ArgusCustomEndpoint string
|
||||
|
|
|
|||
150
stackit/internal/services/objectstorage/bucket/datasource.go
Normal file
150
stackit/internal/services/objectstorage/bucket/datasource.go
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
package objectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/config"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/objectstorage"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &bucketDataSource{}
|
||||
)
|
||||
|
||||
// NewBucketDataSource is a helper function to simplify the provider implementation.
|
||||
func NewBucketDataSource() datasource.DataSource {
|
||||
return &bucketDataSource{}
|
||||
}
|
||||
|
||||
// bucketDataSource is the data source implementation.
|
||||
type bucketDataSource struct {
|
||||
client *objectstorage.APIClient
|
||||
}
|
||||
|
||||
// Metadata returns the data source type name.
|
||||
func (r *bucketDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_objectstorage_bucket"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the data source.
|
||||
func (r *bucketDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
// Prevent panic if the provider has not been configured.
|
||||
if req.ProviderData == nil {
|
||||
return
|
||||
}
|
||||
|
||||
providerData, ok := req.ProviderData.(core.ProviderData)
|
||||
if !ok {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
|
||||
return
|
||||
}
|
||||
|
||||
var apiClient *objectstorage.APIClient
|
||||
var err error
|
||||
if providerData.ObjectStorageCustomEndpoint != "" {
|
||||
apiClient, err = objectstorage.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithEndpoint(providerData.ObjectStorageCustomEndpoint),
|
||||
)
|
||||
} else {
|
||||
apiClient, err = objectstorage.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithRegion(providerData.Region),
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
r.client = apiClient
|
||||
tflog.Info(ctx, "ObjectStorage bucket client configured")
|
||||
}
|
||||
|
||||
// Schema defines the schema for the data source.
|
||||
func (r *bucketDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
descriptions := map[string]string{
|
||||
"main": "ObjectStorage credentials data source schema.",
|
||||
"id": "Terraform's internal data source identifier. It is structured as \"`project_id`,`bucket_name`\".",
|
||||
"bucket_name": "The bucket name. It must be DNS conform.",
|
||||
"project_id": "STACKIT Project ID to which the bucket is associated.",
|
||||
"url_path_style": "URL in path style.",
|
||||
"url_virtual_hosted_style": "URL in virtual hosted style.",
|
||||
}
|
||||
|
||||
resp.Schema = schema.Schema{
|
||||
Description: descriptions["main"],
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"id": schema.StringAttribute{
|
||||
Description: descriptions["id"],
|
||||
Computed: true,
|
||||
},
|
||||
"bucket_name": schema.StringAttribute{
|
||||
Description: descriptions["bucket_name"],
|
||||
Required: true,
|
||||
Validators: []validator.String{
|
||||
validate.NoSeparator(),
|
||||
},
|
||||
},
|
||||
"project_id": schema.StringAttribute{
|
||||
Description: descriptions["project_id"],
|
||||
Required: true,
|
||||
Validators: []validator.String{
|
||||
validate.UUID(),
|
||||
validate.NoSeparator(),
|
||||
},
|
||||
},
|
||||
"url_path_style": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"url_virtual_hosted_style": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *bucketDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
diags := req.Config.Get(ctx, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
projectId := model.ProjectId.ValueString()
|
||||
bucketName := model.BucketName.ValueString()
|
||||
ctx = tflog.SetField(ctx, "project_id", projectId)
|
||||
ctx = tflog.SetField(ctx, "bucket_name", bucketName)
|
||||
|
||||
bucketResp, err := r.client.GetBucket(ctx, projectId, bucketName).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading bucket", fmt.Sprintf("Calling API: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Map response body to schema
|
||||
err = mapFields(bucketResp, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading bucket", fmt.Sprintf("Processing API payload: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
diags = resp.State.Set(ctx, model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "ObjectStorage bucket read")
|
||||
}
|
||||
295
stackit/internal/services/objectstorage/bucket/resource.go
Normal file
295
stackit/internal/services/objectstorage/bucket/resource.go
Normal file
|
|
@ -0,0 +1,295 @@
|
|||
package objectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/path"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/config"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/objectstorage"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ resource.Resource = &bucketResource{}
|
||||
_ resource.ResourceWithConfigure = &bucketResource{}
|
||||
_ resource.ResourceWithImportState = &bucketResource{}
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
Id types.String `tfsdk:"id"` // needed by TF
|
||||
BucketName types.String `tfsdk:"bucket_name"`
|
||||
ProjectId types.String `tfsdk:"project_id"`
|
||||
URLPathStyle types.String `tfsdk:"url_path_style"`
|
||||
URLVirtualHostedStyle types.String `tfsdk:"url_virtual_hosted_style"`
|
||||
}
|
||||
|
||||
// NewBucketResource is a helper function to simplify the provider implementation.
|
||||
func NewBucketResource() resource.Resource {
|
||||
return &bucketResource{}
|
||||
}
|
||||
|
||||
// bucketResource is the resource implementation.
|
||||
type bucketResource struct {
|
||||
client *objectstorage.APIClient
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *bucketResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_objectstorage_bucket"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *bucketResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
||||
// Prevent panic if the provider has not been configured.
|
||||
if req.ProviderData == nil {
|
||||
return
|
||||
}
|
||||
|
||||
providerData, ok := req.ProviderData.(core.ProviderData)
|
||||
if !ok {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
|
||||
return
|
||||
}
|
||||
|
||||
var apiClient *objectstorage.APIClient
|
||||
var err error
|
||||
if providerData.ObjectStorageCustomEndpoint != "" {
|
||||
apiClient, err = objectstorage.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithEndpoint(providerData.ObjectStorageCustomEndpoint),
|
||||
)
|
||||
} else {
|
||||
apiClient, err = objectstorage.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithRegion(providerData.Region),
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
r.client = apiClient
|
||||
tflog.Info(ctx, "ObjectStorage bucket client configured")
|
||||
}
|
||||
|
||||
// Schema defines the schema for the resource.
|
||||
func (r *bucketResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
||||
descriptions := map[string]string{
|
||||
"main": "ObjectStorage bucket resource schema.",
|
||||
"id": "Terraform's internal resource identifier. It is structured as \"`project_id`,`bucket_name`\".",
|
||||
"bucket_name": "The bucket name. It must be DNS conform.",
|
||||
"project_id": "STACKIT Project ID to which the bucket is associated.",
|
||||
"url_path_style": "URL in path style.",
|
||||
"url_virtual_hosted_style": "URL in virtual hosted style.",
|
||||
}
|
||||
|
||||
resp.Schema = schema.Schema{
|
||||
Description: descriptions["main"],
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"id": schema.StringAttribute{
|
||||
Description: descriptions["id"],
|
||||
Computed: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
},
|
||||
"bucket_name": schema.StringAttribute{
|
||||
Description: descriptions["bucket_name"],
|
||||
Required: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.RequiresReplace(),
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
Validators: []validator.String{
|
||||
validate.NoSeparator(),
|
||||
},
|
||||
},
|
||||
"project_id": schema.StringAttribute{
|
||||
Description: descriptions["project_id"],
|
||||
Required: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.RequiresReplace(),
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
Validators: []validator.String{
|
||||
validate.UUID(),
|
||||
validate.NoSeparator(),
|
||||
},
|
||||
},
|
||||
"url_path_style": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
"url_virtual_hosted_style": schema.StringAttribute{
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates the resource and sets the initial Terraform state.
|
||||
func (r *bucketResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
diags := req.Plan.Get(ctx, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
projectId := model.ProjectId.ValueString()
|
||||
bucketName := model.BucketName.ValueString()
|
||||
ctx = tflog.SetField(ctx, "project_id", projectId)
|
||||
ctx = tflog.SetField(ctx, "bucket_name", bucketName)
|
||||
|
||||
// Create new recordset
|
||||
_, err := r.client.CreateBucket(ctx, projectId, bucketName).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating bucket", fmt.Sprintf("Calling API: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
wr, err := objectstorage.CreateBucketWaitHandler(ctx, r.client, projectId, bucketName).SetTimeout(1 * time.Minute).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating bucket", fmt.Sprintf("Bucket creation waiting: %v", err))
|
||||
return
|
||||
}
|
||||
got, ok := wr.(*objectstorage.GetBucketResponse)
|
||||
if !ok {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating bucket", fmt.Sprintf("Wait result conversion, got %+v", wr))
|
||||
return
|
||||
}
|
||||
|
||||
// Map response body to schema
|
||||
err = mapFields(got, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating bucket", fmt.Sprintf("Processing API payload: %v", err))
|
||||
return
|
||||
}
|
||||
diags = resp.State.Set(ctx, model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "ObjectStorage bucket created")
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *bucketResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
diags := req.State.Get(ctx, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
projectId := model.ProjectId.ValueString()
|
||||
bucketName := model.BucketName.ValueString()
|
||||
ctx = tflog.SetField(ctx, "project_id", projectId)
|
||||
ctx = tflog.SetField(ctx, "bucket_name", bucketName)
|
||||
|
||||
bucketResp, err := r.client.GetBucket(ctx, projectId, bucketName).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading bucket", fmt.Sprintf("Calling API: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Map response body to schema
|
||||
err = mapFields(bucketResp, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading bucket", fmt.Sprintf("Processing API payload: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Set refreshed state
|
||||
diags = resp.State.Set(ctx, model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "ObjectStorage bucket read")
|
||||
}
|
||||
|
||||
// Update updates the resource and sets the updated Terraform state on success.
|
||||
func (r *bucketResource) Update(ctx context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
// Update shouldn't be called
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating bucket", "Bucket can't be updated")
|
||||
}
|
||||
|
||||
// Delete deletes the resource and removes the Terraform state on success.
|
||||
func (r *bucketResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
diags := req.State.Get(ctx, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
projectId := model.ProjectId.ValueString()
|
||||
bucketName := model.BucketName.ValueString()
|
||||
ctx = tflog.SetField(ctx, "project_id", projectId)
|
||||
ctx = tflog.SetField(ctx, "bucket_name", bucketName)
|
||||
|
||||
// Delete existing bucket
|
||||
_, err := r.client.DeleteBucket(ctx, projectId, bucketName).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting bucket", fmt.Sprintf("Calling API: %v", err))
|
||||
}
|
||||
_, err = objectstorage.DeleteBucketWaitHandler(ctx, r.client, projectId, bucketName).SetTimeout(1 * time.Minute).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting bucket", fmt.Sprintf("Bucket deletion waiting: %v", err))
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "ObjectStorage bucket deleted")
|
||||
}
|
||||
|
||||
// ImportState imports a resource into the Terraform state on success.
|
||||
// The expected format of the resource import identifier is: project_id,bucket_name
|
||||
func (r *bucketResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
|
||||
idParts := strings.Split(req.ID, core.Separator)
|
||||
if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics,
|
||||
"Error importing bucket",
|
||||
fmt.Sprintf("Expected import identifier with format [project_id],[bucket_name], got %q", req.ID),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
|
||||
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("bucket_name"), idParts[1])...)
|
||||
tflog.Info(ctx, "ObjectStorage bucket state imported")
|
||||
}
|
||||
|
||||
func mapFields(bucketResp *objectstorage.GetBucketResponse, model *Model) error {
|
||||
if bucketResp == nil {
|
||||
return fmt.Errorf("response input is nil")
|
||||
}
|
||||
if bucketResp.Bucket == nil {
|
||||
return fmt.Errorf("response bucket is nil")
|
||||
}
|
||||
if model == nil {
|
||||
return fmt.Errorf("model input is nil")
|
||||
}
|
||||
bucket := bucketResp.Bucket
|
||||
|
||||
idParts := []string{
|
||||
model.ProjectId.ValueString(),
|
||||
model.BucketName.ValueString(),
|
||||
}
|
||||
model.Id = types.StringValue(
|
||||
strings.Join(idParts, core.Separator),
|
||||
)
|
||||
model.URLPathStyle = types.StringPointerValue(bucket.UrlPathStyle)
|
||||
model.URLVirtualHostedStyle = types.StringPointerValue(bucket.UrlVirtualHostedStyle)
|
||||
return nil
|
||||
}
|
||||
101
stackit/internal/services/objectstorage/bucket/resource_test.go
Normal file
101
stackit/internal/services/objectstorage/bucket/resource_test.go
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
package objectstorage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/utils"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/objectstorage"
|
||||
)
|
||||
|
||||
func TestMapFields(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
input *objectstorage.GetBucketResponse
|
||||
expected Model
|
||||
isValid bool
|
||||
}{
|
||||
{
|
||||
"default_values",
|
||||
&objectstorage.GetBucketResponse{
|
||||
Bucket: &objectstorage.Bucket{},
|
||||
},
|
||||
Model{
|
||||
Id: types.StringValue("pid,bname"),
|
||||
BucketName: types.StringValue("bname"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
URLPathStyle: types.StringNull(),
|
||||
URLVirtualHostedStyle: types.StringNull(),
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"simple_values",
|
||||
&objectstorage.GetBucketResponse{
|
||||
Bucket: &objectstorage.Bucket{
|
||||
UrlPathStyle: utils.Ptr("url/path/style"),
|
||||
UrlVirtualHostedStyle: utils.Ptr("url/virtual/hosted/style"),
|
||||
},
|
||||
},
|
||||
Model{
|
||||
Id: types.StringValue("pid,bname"),
|
||||
BucketName: types.StringValue("bname"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
URLPathStyle: types.StringValue("url/path/style"),
|
||||
URLVirtualHostedStyle: types.StringValue("url/virtual/hosted/style"),
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"empty_strings",
|
||||
&objectstorage.GetBucketResponse{
|
||||
Bucket: &objectstorage.Bucket{
|
||||
UrlPathStyle: utils.Ptr(""),
|
||||
UrlVirtualHostedStyle: utils.Ptr(""),
|
||||
},
|
||||
},
|
||||
Model{
|
||||
Id: types.StringValue("pid,bname"),
|
||||
BucketName: types.StringValue("bname"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
URLPathStyle: types.StringValue(""),
|
||||
URLVirtualHostedStyle: types.StringValue(""),
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"nil_response",
|
||||
nil,
|
||||
Model{},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"no_bucket",
|
||||
&objectstorage.GetBucketResponse{},
|
||||
Model{},
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.description, func(t *testing.T) {
|
||||
model := &Model{
|
||||
ProjectId: tt.expected.ProjectId,
|
||||
BucketName: tt.expected.BucketName,
|
||||
}
|
||||
err := mapFields(tt.input, model)
|
||||
if !tt.isValid && err == nil {
|
||||
t.Fatalf("Should have failed")
|
||||
}
|
||||
if tt.isValid && err != nil {
|
||||
t.Fatalf("Should not have failed: %v", err)
|
||||
}
|
||||
if tt.isValid {
|
||||
diff := cmp.Diff(model, &tt.expected)
|
||||
if diff != "" {
|
||||
t.Fatalf("Data does not match: %s", diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
package objectstorage_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
|
||||
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-testing/terraform"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/config"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/utils"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/objectstorage"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/testutil"
|
||||
)
|
||||
|
||||
// Bucket resource data
|
||||
var bucketResource = map[string]string{
|
||||
"project_id": testutil.ProjectId,
|
||||
"bucket_name": fmt.Sprintf("acc-test-%s", acctest.RandStringFromCharSet(20, acctest.CharSetAlpha)),
|
||||
}
|
||||
|
||||
func resourceConfig() string {
|
||||
return fmt.Sprintf(`
|
||||
%s
|
||||
|
||||
resource "stackit_objectstorage_bucket" "bucket" {
|
||||
project_id = "%s"
|
||||
bucket_name = "%s"
|
||||
}
|
||||
`,
|
||||
testutil.ObjectStorageProviderConfig(),
|
||||
bucketResource["project_id"],
|
||||
bucketResource["bucket_name"],
|
||||
)
|
||||
}
|
||||
|
||||
func TestAccObjectStorageResource(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
|
||||
CheckDestroy: testAccCheckObjectStorageDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
|
||||
// Creation
|
||||
{
|
||||
Config: resourceConfig(),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// Instance data
|
||||
resource.TestCheckResourceAttr("stackit_objectstorage_bucket.bucket", "project_id", bucketResource["project_id"]),
|
||||
resource.TestCheckResourceAttr("stackit_objectstorage_bucket.bucket", "bucket_name", bucketResource["bucket_name"]),
|
||||
resource.TestCheckResourceAttrSet("stackit_objectstorage_bucket.bucket", "url_path_style"),
|
||||
resource.TestCheckResourceAttrSet("stackit_objectstorage_bucket.bucket", "url_virtual_hosted_style"),
|
||||
),
|
||||
},
|
||||
// Data source
|
||||
{
|
||||
Config: fmt.Sprintf(`
|
||||
%s
|
||||
|
||||
data "stackit_objectstorage_bucket" "bucket" {
|
||||
project_id = stackit_objectstorage_bucket.bucket.project_id
|
||||
bucket_name = stackit_objectstorage_bucket.bucket.bucket_name
|
||||
}`,
|
||||
resourceConfig(),
|
||||
),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// Instance data
|
||||
resource.TestCheckResourceAttr("data.stackit_objectstorage_bucket.bucket", "project_id", bucketResource["project_id"]),
|
||||
resource.TestCheckResourceAttrPair(
|
||||
"stackit_objectstorage_bucket.bucket", "bucket_name",
|
||||
"data.stackit_objectstorage_bucket.bucket", "bucket_name",
|
||||
),
|
||||
resource.TestCheckResourceAttrPair(
|
||||
"stackit_objectstorage_bucket.bucket", "url_path_style",
|
||||
"data.stackit_objectstorage_bucket.bucket", "url_path_style",
|
||||
),
|
||||
resource.TestCheckResourceAttrPair(
|
||||
"stackit_objectstorage_bucket.bucket", "url_virtual_hosted_style",
|
||||
"data.stackit_objectstorage_bucket.bucket", "url_virtual_hosted_style",
|
||||
),
|
||||
),
|
||||
},
|
||||
// Import
|
||||
{
|
||||
ResourceName: "stackit_objectstorage_bucket.bucket",
|
||||
ImportStateIdFunc: func(s *terraform.State) (string, error) {
|
||||
r, ok := s.RootModule().Resources["stackit_objectstorage_bucket.bucket"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find resource stackit_objectstorage_bucket.bucket")
|
||||
}
|
||||
bucketName, ok := r.Primary.Attributes["bucket_name"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find attribute bucket_name")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s,%s", testutil.ProjectId, bucketName), nil
|
||||
},
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
// Deletion is done by the framework implicitly
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckObjectStorageDestroy(s *terraform.State) error {
|
||||
ctx := context.Background()
|
||||
var client *objectstorage.APIClient
|
||||
var err error
|
||||
if testutil.ObjectStorageCustomEndpoint == "" {
|
||||
client, err = objectstorage.NewAPIClient(
|
||||
config.WithRegion("eu01"),
|
||||
)
|
||||
} else {
|
||||
client, err = objectstorage.NewAPIClient(
|
||||
config.WithEndpoint(testutil.ObjectStorageCustomEndpoint),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating client: %w", err)
|
||||
}
|
||||
|
||||
bucketsToDestroy := []string{}
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "stackit_objectstorage_bucket" {
|
||||
continue
|
||||
}
|
||||
// bucket terraform ID: "[project_id],[bucket_name]"
|
||||
bucketName := strings.Split(rs.Primary.ID, core.Separator)[1]
|
||||
bucketsToDestroy = append(bucketsToDestroy, bucketName)
|
||||
}
|
||||
|
||||
bucketsResp, err := client.GetBuckets(ctx, testutil.ProjectId).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting bucketsResp: %w", err)
|
||||
}
|
||||
|
||||
buckets := *bucketsResp.Buckets
|
||||
for _, bucket := range buckets {
|
||||
if bucket.Name == nil {
|
||||
continue
|
||||
}
|
||||
bucketName := *bucket.Name
|
||||
if utils.Contains(bucketsToDestroy, *bucket.Name) {
|
||||
_, err := client.DeleteBucketExecute(ctx, testutil.ProjectId, bucketName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying bucket %s during CheckDestroy: %w", bucketName, err)
|
||||
}
|
||||
_, err = objectstorage.DeleteBucketWaitHandler(ctx, client, testutil.ProjectId, bucketName).SetTimeout(1 * time.Minute).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", bucketName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -40,6 +40,7 @@ var (
|
|||
LogMeCustomEndpoint = os.Getenv("TF_ACC_LOGME_CUSTOM_ENDPOINT")
|
||||
MariaDBCustomEndpoint = os.Getenv("TF_ACC_MARIADB_CUSTOM_ENDPOINT")
|
||||
OpenSearchCustomEndpoint = os.Getenv("TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT")
|
||||
ObjectStorageCustomEndpoint = os.Getenv("TF_ACC_OBJECTSTORAGE_CUSTOM_ENDPOINT")
|
||||
PostgreSQLCustomEndpoint = os.Getenv("TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT")
|
||||
PostgresFlexCustomEndpoint = os.Getenv("TF_ACC_POSTGRESFLEX_CUSTOM_ENDPOINT")
|
||||
RabbitMQCustomEndpoint = os.Getenv("TF_ACC_RABBITMQ_CUSTOM_ENDPOINT")
|
||||
|
|
@ -104,6 +105,21 @@ func MariaDBProviderConfig() string {
|
|||
)
|
||||
}
|
||||
|
||||
func ObjectStorageProviderConfig() string {
|
||||
if ObjectStorageCustomEndpoint == "" {
|
||||
return `
|
||||
provider "stackit" {
|
||||
region = "eu01"
|
||||
}`
|
||||
}
|
||||
return fmt.Sprintf(`
|
||||
provider "stackit" {
|
||||
objectstorage_custom_endpoint = "%s"
|
||||
}`,
|
||||
ObjectStorageCustomEndpoint,
|
||||
)
|
||||
}
|
||||
|
||||
func OpenSearchProviderConfig() string {
|
||||
if OpenSearchCustomEndpoint == "" {
|
||||
return `
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
logMeInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/logme/instance"
|
||||
mariaDBCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/mariadb/credentials"
|
||||
mariaDBInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/mariadb/instance"
|
||||
objectStorageBucket "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/objectstorage/bucket"
|
||||
openSearchCredentials "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/opensearch/credentials"
|
||||
openSearchInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/opensearch/instance"
|
||||
postgresFlexInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/postgresflex/instance"
|
||||
|
|
@ -73,6 +74,7 @@ type providerModel struct {
|
|||
LogMeCustomEndpoint types.String `tfsdk:"logme_custom_endpoint"`
|
||||
RabbitMQCustomEndpoint types.String `tfsdk:"rabbitmq_custom_endpoint"`
|
||||
MariaDBCustomEndpoint types.String `tfsdk:"mariadb_custom_endpoint"`
|
||||
ObjectStorageCustomEndpoint types.String `tfsdk:"objectstorage_custom_endpoint"`
|
||||
OpenSearchCustomEndpoint types.String `tfsdk:"opensearch_custom_endpoint"`
|
||||
RedisCustomEndpoint types.String `tfsdk:"redis_custom_endpoint"`
|
||||
ArgusCustomEndpoint types.String `tfsdk:"argus_custom_endpoint"`
|
||||
|
|
@ -93,6 +95,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
|
|||
"logme_custom_endpoint": "Custom endpoint for the LogMe service",
|
||||
"rabbitmq_custom_endpoint": "Custom endpoint for the RabbitMQ service",
|
||||
"mariadb_custom_endpoint": "Custom endpoint for the MariaDB service",
|
||||
"objectstorage_custom_endpoint": "Custom endpoint for the Object Storage service",
|
||||
"opensearch_custom_endpoint": "Custom endpoint for the OpenSearch service",
|
||||
"argus_custom_endpoint": "Custom endpoint for the Argus service",
|
||||
"ske_custom_endpoint": "Custom endpoint for the Kubernetes Engine (SKE) service",
|
||||
|
|
@ -141,6 +144,10 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
|
|||
Optional: true,
|
||||
Description: descriptions["mariadb_custom_endpoint"],
|
||||
},
|
||||
"objectstorage_custom_endpoint": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: descriptions["objectstorage_custom_endpoint"],
|
||||
},
|
||||
"opensearch_custom_endpoint": schema.StringAttribute{
|
||||
Optional: true,
|
||||
Description: descriptions["opensearch_custom_endpoint"],
|
||||
|
|
@ -209,6 +216,9 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest,
|
|||
if !(providerConfig.MariaDBCustomEndpoint.IsUnknown() || providerConfig.MariaDBCustomEndpoint.IsNull()) {
|
||||
providerData.MariaDBCustomEndpoint = providerConfig.MariaDBCustomEndpoint.ValueString()
|
||||
}
|
||||
if !(providerConfig.ObjectStorageCustomEndpoint.IsUnknown() || providerConfig.ObjectStorageCustomEndpoint.IsNull()) {
|
||||
providerData.ObjectStorageCustomEndpoint = providerConfig.ObjectStorageCustomEndpoint.ValueString()
|
||||
}
|
||||
if !(providerConfig.OpenSearchCustomEndpoint.IsUnknown() || providerConfig.OpenSearchCustomEndpoint.IsNull()) {
|
||||
providerData.OpenSearchCustomEndpoint = providerConfig.OpenSearchCustomEndpoint.ValueString()
|
||||
}
|
||||
|
|
@ -248,6 +258,7 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource
|
|||
logMeCredentials.NewCredentialsDataSource,
|
||||
mariaDBInstance.NewInstanceDataSource,
|
||||
mariaDBCredentials.NewCredentialsDataSource,
|
||||
objectStorageBucket.NewBucketDataSource,
|
||||
openSearchInstance.NewInstanceDataSource,
|
||||
openSearchCredentials.NewCredentialsDataSource,
|
||||
rabbitMQInstance.NewInstanceDataSource,
|
||||
|
|
@ -275,6 +286,7 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource {
|
|||
logMeCredentials.NewCredentialsResource,
|
||||
mariaDBInstance.NewInstanceResource,
|
||||
mariaDBCredentials.NewCredentialsResource,
|
||||
objectStorageBucket.NewBucketResource,
|
||||
openSearchInstance.NewInstanceResource,
|
||||
openSearchCredentials.NewCredentialsResource,
|
||||
rabbitMQInstance.NewInstanceResource,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue