Ft/region adjustment service enablement (#718)

* feat(serviceenablement): Region adjustment

Signed-off-by: Alexander Dahmen <alexander.dahmen@inovex.de>

* chore(ske): Remove deprecated ske project

This resource was removed on October 10th 2024.

Signed-off-by: Alexander Dahmen <alexander.dahmen@inovex.de>

---------

Signed-off-by: Alexander Dahmen <alexander.dahmen@inovex.de>
This commit is contained in:
Alexander Dahmen 2025-03-24 14:37:43 +01:00 committed by GitHub
parent 646c15d7f8
commit 3dc4fedba1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 163 additions and 514 deletions

View file

@ -14,6 +14,7 @@ import (
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
"github.com/stackitcloud/stackit-sdk-go/services/ske"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
)
@ -29,7 +30,8 @@ func NewClusterDataSource() datasource.DataSource {
// clusterDataSource is the data source implementation.
type clusterDataSource struct {
client *ske.APIClient
client *ske.APIClient
providerData core.ProviderData
}
// Metadata returns the data source type name.
@ -44,7 +46,8 @@ func (r *clusterDataSource) Configure(ctx context.Context, req datasource.Config
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
var ok bool
r.providerData, ok = req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
@ -52,15 +55,15 @@ func (r *clusterDataSource) Configure(ctx context.Context, req datasource.Config
var apiClient *ske.APIClient
var err error
if providerData.SKECustomEndpoint != "" {
if r.providerData.SKECustomEndpoint != "" {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.SKECustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.SKECustomEndpoint),
)
} else {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithRegion(r.providerData.GetRegion()),
)
}
@ -319,6 +322,11 @@ func (r *clusterDataSource) Schema(_ context.Context, _ datasource.SchemaRequest
},
},
},
"region": schema.StringAttribute{
// the region cannot be found, so it has to be passed
Optional: true,
Description: "The resource region. If not defined, the provider region is used.",
},
},
}
}
@ -334,8 +342,15 @@ func (r *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest
projectId := state.ProjectId.ValueString()
name := state.Name.ValueString()
var region string
if utils.IsUndefined(state.Region) {
region = r.providerData.GetRegion()
} else {
region = state.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", name)
ctx = tflog.SetField(ctx, "region", region)
clusterResp, err := r.client.GetCluster(ctx, projectId, name).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
@ -346,7 +361,7 @@ func (r *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest
return
}
err = mapFields(ctx, clusterResp, &state)
err = mapFields(ctx, clusterResp, &state, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Processing API payload: %v", err))
return

View file

@ -59,6 +59,7 @@ var (
_ resource.Resource = &clusterResource{}
_ resource.ResourceWithConfigure = &clusterResource{}
_ resource.ResourceWithImportState = &clusterResource{}
_ resource.ResourceWithModifyPlan = &clusterResource{}
)
type skeClient interface {
@ -79,6 +80,7 @@ type Model struct {
Hibernations types.List `tfsdk:"hibernations"`
Extensions types.Object `tfsdk:"extensions"`
EgressAddressRanges types.List `tfsdk:"egress_address_ranges"`
Region types.String `tfsdk:"region"`
}
// Struct corresponding to Model.NodePools[i]
@ -236,6 +238,37 @@ func NewClusterResource() resource.Resource {
type clusterResource struct {
skeClient *ske.APIClient
enablementClient *serviceenablement.APIClient
providerData core.ProviderData
}
// ModifyPlan implements resource.ResourceWithModifyPlan.
// Use the modifier to set the effective region in the current plan.
func (r *clusterResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { // nolint:gocritic // function signature required by Terraform
var configModel Model
// skip initial empty configuration to avoid follow-up errors
if req.Config.Raw.IsNull() {
return
}
resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
if resp.Diagnostics.HasError() {
return
}
var planModel Model
resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
if resp.Diagnostics.HasError() {
return
}
utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp)
if resp.Diagnostics.HasError() {
return
}
resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the resource type name.
@ -250,7 +283,8 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
var ok bool
r.providerData, ok = req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
@ -259,15 +293,15 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
var skeClient *ske.APIClient
var enablementClient *serviceenablement.APIClient
var err error
if providerData.SKECustomEndpoint != "" {
if r.providerData.SKECustomEndpoint != "" {
skeClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.SKECustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.SKECustomEndpoint),
)
} else {
skeClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithRegion(r.providerData.GetRegion()),
)
}
@ -276,15 +310,15 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
return
}
if providerData.ServiceEnablementCustomEndpoint != "" {
if r.providerData.ServiceEnablementCustomEndpoint != "" {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServiceEnablementCustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.ServiceEnablementCustomEndpoint),
)
} else {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithRegion(r.providerData.GetRegion()),
)
}
@ -307,6 +341,7 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
"max_surge": "Maximum number of additional VMs that are created during an update.",
"max_unavailable": "Maximum number of VMs that that can be unavailable during an update.",
"nodepool_validators": "If set (larger than 0), then it must be at least the amount of zones configured for the nodepool. The `max_surge` and `max_unavailable` fields cannot both be unset at the same time.",
"region": "The resource region. If not defined, the provider region is used.",
}
resp.Schema = schema.Schema{
@ -315,7 +350,7 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
MarkdownDescription: fmt.Sprintf("%s\n\n-> %s", descriptions["main"], descriptions["node_pools_plan_note"]),
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`name`\".",
Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`name`\".",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
@ -643,6 +678,15 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
},
},
},
"region": schema.StringAttribute{
Optional: true,
// must be computed to allow for storing the override value from the provider
Computed: true,
Description: descriptions["region"],
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
},
}
}
@ -707,18 +751,20 @@ func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest
}
projectId := model.ProjectId.ValueString()
region := model.Region.ValueString()
clusterName := model.Name.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", clusterName)
ctx = tflog.SetField(ctx, "region", region)
// If SKE functionality is not enabled, enable it
err := r.enablementClient.EnableService(ctx, projectId, utils.SKEServiceId).Execute()
err := r.enablementClient.EnableServiceRegional(ctx, region, projectId, utils.SKEServiceId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating cluster", fmt.Sprintf("Calling API to enable SKE: %v", err))
return
}
_, err = enablementWait.EnableServiceWaitHandler(ctx, r.enablementClient, projectId, utils.SKEServiceId).WaitWithContext(ctx)
_, err = enablementWait.EnableServiceWaitHandler(ctx, r.enablementClient, region, projectId, utils.SKEServiceId).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating cluster", fmt.Sprintf("Wait for SKE enablement: %v", err))
return
@ -821,6 +867,7 @@ func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag
// cluster vars
projectId := model.ProjectId.ValueString()
name := model.Name.ValueString()
region := model.Region.ValueString()
kubernetes, hasDeprecatedVersion, err := toKubernetesPayload(model, availableKubernetesVersions, currentKubernetesVersion, diags)
if err != nil {
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Creating cluster config API payload: %v", err))
@ -881,7 +928,7 @@ func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag
core.LogAndAddWarning(ctx, diags, "Warning during creating/updating cluster", fmt.Sprintf("Cluster is in Impaired state due to an invalid argus instance id, the cluster is usable but metrics won't be forwarded: %s", *waitResp.Status.Error.Message))
}
err = mapFields(ctx, waitResp, model)
err = mapFields(ctx, waitResp, model, region)
if err != nil {
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Processing API payload: %v", err))
return
@ -1324,7 +1371,7 @@ func toNetworkPayload(ctx context.Context, m *Model) (*ske.Network, error) {
}, nil
}
func mapFields(ctx context.Context, cl *ske.Cluster, m *Model) error {
func mapFields(ctx context.Context, cl *ske.Cluster, m *Model, region string) error {
if cl == nil {
return fmt.Errorf("response input is nil")
}
@ -1343,11 +1390,13 @@ func mapFields(ctx context.Context, cl *ske.Cluster, m *Model) error {
m.Name = types.StringValue(name)
idParts := []string{
m.ProjectId.ValueString(),
region,
name,
}
m.Id = types.StringValue(
strings.Join(idParts, core.Separator),
)
m.Region = types.StringValue(region)
if cl.Kubernetes != nil {
m.KubernetesVersionUsed = types.StringPointerValue(cl.Kubernetes.Version)
@ -2034,8 +2083,13 @@ func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, re
}
projectId := state.ProjectId.ValueString()
name := state.Name.ValueString()
region := state.Region.ValueString()
if region == "" {
region = r.providerData.GetRegion()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", name)
ctx = tflog.SetField(ctx, "region", region)
clResp, err := r.skeClient.GetCluster(ctx, projectId, name).Execute()
if err != nil {
@ -2048,7 +2102,7 @@ func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, re
return
}
err = mapFields(ctx, clResp, &state)
err = mapFields(ctx, clResp, &state, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Processing API payload: %v", err))
return
@ -2084,8 +2138,10 @@ func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest
projectId := model.ProjectId.ValueString()
clName := model.Name.ValueString()
region := model.Region.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", clName)
ctx = tflog.SetField(ctx, "region", region)
availableKubernetesVersions, availableMachines, err := r.loadAvailableVersions(ctx)
if err != nil {
@ -2116,8 +2172,10 @@ func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest
}
projectId := model.ProjectId.ValueString()
name := model.Name.ValueString()
region := model.Region.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", name)
ctx = tflog.SetField(ctx, "region", region)
c := r.skeClient
_, err := c.DeleteCluster(ctx, projectId, name).Execute()
@ -2138,15 +2196,16 @@ func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest
func (r *clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" {
if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing cluster",
fmt.Sprintf("Expected import identifier with format: [project_id],[name] Got: %q", req.ID),
fmt.Sprintf("Expected import identifier with format: [project_id],[region],[name] Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[2])...)
tflog.Info(ctx, "SKE cluster state imported")
}

View file

@ -33,11 +33,13 @@ func (c *skeClientMocked) GetClusterExecute(_ context.Context, _, _ string) (*sk
func TestMapFields(t *testing.T) {
cs := ske.ClusterStatusState("OK")
const testRegion = "region"
tests := []struct {
description string
stateExtensions types.Object
stateNodePools types.List
input *ske.Cluster
region string
expected Model
isValid bool
}{
@ -48,8 +50,9 @@ func TestMapFields(t *testing.T) {
&ske.Cluster{
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -60,6 +63,7 @@ func TestMapFields(t *testing.T) {
Hibernations: types.ListNull(types.ObjectType{AttrTypes: hibernationTypes}),
Extensions: types.ObjectNull(extensionsTypes),
EgressAddressRanges: types.ListNull(types.StringType),
Region: types.StringValue(testRegion),
},
true,
},
@ -149,8 +153,9 @@ func TestMapFields(t *testing.T) {
EgressAddressRanges: &[]string{"0.0.0.0/32", "1.1.1.1/32"},
},
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -253,6 +258,7 @@ func TestMapFields(t *testing.T) {
}),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -264,8 +270,9 @@ func TestMapFields(t *testing.T) {
Name: utils.Ptr("name"),
Network: &ske.Network{},
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -276,6 +283,7 @@ func TestMapFields(t *testing.T) {
Hibernations: types.ListNull(types.ObjectType{AttrTypes: hibernationTypes}),
Extensions: types.ObjectNull(extensionsTypes),
EgressAddressRanges: types.ListNull(types.StringType),
Region: types.StringValue(testRegion),
},
true,
},
@ -300,8 +308,9 @@ func TestMapFields(t *testing.T) {
},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -324,6 +333,7 @@ func TestMapFields(t *testing.T) {
"zones": types.ListNull(types.StringType),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -348,8 +358,9 @@ func TestMapFields(t *testing.T) {
Extensions: &ske.Extension{},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -372,6 +383,7 @@ func TestMapFields(t *testing.T) {
"zones": types.ListNull(types.StringType),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -407,8 +419,9 @@ func TestMapFields(t *testing.T) {
},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -433,6 +446,7 @@ func TestMapFields(t *testing.T) {
"zones": types.ListNull(types.StringType),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -444,8 +458,9 @@ func TestMapFields(t *testing.T) {
Extensions: &ske.Extension{},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -455,6 +470,7 @@ func TestMapFields(t *testing.T) {
Hibernations: types.ListNull(types.ObjectType{AttrTypes: hibernationTypes}),
Extensions: types.ObjectNull(extensionsTypes),
EgressAddressRanges: types.ListNull(types.StringType),
Region: types.StringValue(testRegion),
},
true,
},
@ -573,8 +589,9 @@ func TestMapFields(t *testing.T) {
Hibernated: nil,
},
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -659,6 +676,7 @@ func TestMapFields(t *testing.T) {
}),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -667,6 +685,7 @@ func TestMapFields(t *testing.T) {
types.ObjectNull(extensionsTypes),
types.ListNull(types.ObjectType{AttrTypes: nodePoolTypes}),
nil,
testRegion,
Model{},
false,
},
@ -675,6 +694,7 @@ func TestMapFields(t *testing.T) {
types.ObjectNull(extensionsTypes),
types.ListNull(types.ObjectType{AttrTypes: nodePoolTypes}),
&ske.Cluster{},
testRegion,
Model{},
false,
},
@ -686,7 +706,7 @@ func TestMapFields(t *testing.T) {
Extensions: tt.stateExtensions,
NodePools: tt.stateNodePools,
}
err := mapFields(context.Background(), tt.input, state)
err := mapFields(context.Background(), tt.input, state, tt.region)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}