Ft/region adjustment service enablement (#718)

* feat(serviceenablement): Region adjustment

Signed-off-by: Alexander Dahmen <alexander.dahmen@inovex.de>

* chore(ske): Remove deprecated ske project

This resource was removed on October 10th 2024.

Signed-off-by: Alexander Dahmen <alexander.dahmen@inovex.de>

---------

Signed-off-by: Alexander Dahmen <alexander.dahmen@inovex.de>
This commit is contained in:
Alexander Dahmen 2025-03-24 14:37:43 +01:00 committed by GitHub
parent 646c15d7f8
commit 3dc4fedba1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 163 additions and 514 deletions

View file

@ -27,6 +27,10 @@ data "stackit_ske_cluster" "example" {
- `name` (String) The cluster name.
- `project_id` (String) STACKIT project ID to which the cluster is associated.
### Optional
- `region` (String) The resource region. If not defined, the provider region is used.
### Read-Only
- `allow_privileged_containers` (Boolean, Deprecated) DEPRECATED as of Kubernetes 1.25+

View file

@ -1,30 +0,0 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "stackit_ske_project Data Source - stackit"
subcategory: ""
description: |-
SKE project data source schema. Must have a region specified in the provider configuration. Warning: SKE project resource is no longer in use and will be removed with the next release. SKE service enablement is done automatically when a new cluster is created.
---
# stackit_ske_project (Data Source)
SKE project data source schema. Must have a `region` specified in the provider configuration. Warning: SKE project resource is no longer in use and will be removed with the next release. SKE service enablement is done automatically when a new cluster is created.
## Example Usage
```terraform
data "stackit_ske_project" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
```
<!-- schema generated by tfplugindocs -->
## Schema
### Required
- `project_id` (String) STACKIT Project ID in which the kubernetes project is enabled.
### Read-Only
- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`".

View file

@ -59,11 +59,12 @@ Deprecated as of Kubernetes 1.25 and later
- `kubernetes_version_min` (String) The minimum Kubernetes version. This field will be used to set the minimum kubernetes version on creation/update of the cluster. If unset, the latest supported Kubernetes version will be used. SKE automatically updates the cluster Kubernetes version if you have set `maintenance.enable_kubernetes_version_updates` to true or if there is a mandatory update, as described in [Updates for Kubernetes versions and Operating System versions in SKE](https://docs.stackit.cloud/stackit/en/version-updates-in-ske-10125631.html). To get the current kubernetes version being used for your cluster, use the read-only `kubernetes_version_used` field.
- `maintenance` (Attributes) A single maintenance block as defined below. (see [below for nested schema](#nestedatt--maintenance))
- `network` (Attributes) Network block as defined below. (see [below for nested schema](#nestedatt--network))
- `region` (String) The resource region. If not defined, the provider region is used.
### Read-Only
- `egress_address_ranges` (List of String) The outgoing network ranges (in CIDR notation) of traffic originating from workload on the cluster.
- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`name`".
- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`name`".
- `kubernetes_version_used` (String) Full Kubernetes version used. For example, if 1.22 was set in `kubernetes_version_min`, this value may result to 1.22.15. SKE automatically updates the cluster Kubernetes version if you have set `maintenance.enable_kubernetes_version_updates` to true or if there is a mandatory update, as described in [Updates for Kubernetes versions and Operating System versions in SKE](https://docs.stackit.cloud/stackit/en/version-updates-in-ske-10125631.html).
<a id="nestedatt--node_pools"></a>

View file

@ -1,30 +0,0 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "stackit_ske_project Resource - stackit"
subcategory: ""
description: |-
SKE project resource schema. Must have a region specified in the provider configuration. This resource allows you to enable the SKE service and you can only have one per project. Before deleting this resource, all SKE clusters associated to the project must be deleted. Otherwise, error would occur due to the existing clusters. In such case, it is highly recommended to remove the SKE project from the state, directly using the "terraform state rm". Warning: SKE project resource is no longer in use and will be removed after October 10th 2024. SKE service enablement is done automatically when a new cluster is created.
---
# stackit_ske_project (Resource)
SKE project resource schema. Must have a `region` specified in the provider configuration. This resource allows you to enable the SKE service and you can only have one per project. Before deleting this resource, all SKE clusters associated to the project must be deleted. Otherwise, error would occur due to the existing clusters. In such case, it is highly recommended to remove the SKE project from the state, directly using the "`terraform state rm`". Warning: SKE project resource is no longer in use and will be removed after October 10th 2024. SKE service enablement is done automatically when a new cluster is created.
## Example Usage
```terraform
resource "stackit_ske_project" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
```
<!-- schema generated by tfplugindocs -->
## Schema
### Required
- `project_id` (String) STACKIT Project ID in which the kubernetes project is enabled.
### Read-Only
- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`".

View file

@ -1,3 +0,0 @@
data "stackit_ske_project" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}

View file

@ -1,3 +0,0 @@
resource "stackit_ske_project" "example" {
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}

2
go.mod
View file

@ -30,7 +30,7 @@ require (
github.com/stackitcloud/stackit-sdk-go/services/serverbackup v0.6.0
github.com/stackitcloud/stackit-sdk-go/services/serverupdate v0.5.0
github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.6.0
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v0.5.0
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v1.0.0
github.com/stackitcloud/stackit-sdk-go/services/ske v0.22.0
github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.0.0
github.com/teambition/rrule-go v1.8.2

4
go.sum
View file

@ -191,8 +191,8 @@ github.com/stackitcloud/stackit-sdk-go/services/serverupdate v0.5.0 h1:TMUxDh8XG
github.com/stackitcloud/stackit-sdk-go/services/serverupdate v0.5.0/go.mod h1:giHnHz3kHeLY8Av9MZLsyJlaTXYz+BuGqdP/SKB5Vo0=
github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.6.0 h1:y+XzJcntHJ7M+IWWvAUkiVFA8op+jZxwHs3ktW2aLoA=
github.com/stackitcloud/stackit-sdk-go/services/serviceaccount v0.6.0/go.mod h1:J/Wa67cbDI1wAyxib9PiEbNqGfIoFUH+DSLueVazQx8=
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v0.5.0 h1:QG+rGBHsyXOlJ3ZIeOgExGqu9PoTlGY1rltW/VpG6lw=
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v0.5.0/go.mod h1:16dOVT052cMuHhUJ3NIcPuY7TrpCr9QlxmvvfjLZubA=
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v1.0.0 h1:Xxd5KUSWRt7FytnNWClLEa0n9GM6e5xAKo835ODSpAM=
github.com/stackitcloud/stackit-sdk-go/services/serviceenablement v1.0.0/go.mod h1:EMqjiq/72WKXSwnJGLpumUJS4Uwlyhg5vtNg7qWoGtc=
github.com/stackitcloud/stackit-sdk-go/services/ske v0.22.0 h1:3KUVls8zXsbT2tOYRSHyp3/l0Kpjl4f3INmQKYTe65Y=
github.com/stackitcloud/stackit-sdk-go/services/ske v0.22.0/go.mod h1:63IvXpBJTIVONAnGPSDo0sRJ+6n6tzO918OLqfYBxto=
github.com/stackitcloud/stackit-sdk-go/services/sqlserverflex v1.0.0 h1:RYJO0rZea9+sxVfaJDWRo2zgfKNgiUcA5c0nbvZURiU=

View file

@ -14,6 +14,7 @@ import (
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
"github.com/stackitcloud/stackit-sdk-go/services/ske"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
)
@ -30,6 +31,7 @@ func NewClusterDataSource() datasource.DataSource {
// clusterDataSource is the data source implementation.
type clusterDataSource struct {
client *ske.APIClient
providerData core.ProviderData
}
// Metadata returns the data source type name.
@ -44,7 +46,8 @@ func (r *clusterDataSource) Configure(ctx context.Context, req datasource.Config
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
var ok bool
r.providerData, ok = req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
@ -52,15 +55,15 @@ func (r *clusterDataSource) Configure(ctx context.Context, req datasource.Config
var apiClient *ske.APIClient
var err error
if providerData.SKECustomEndpoint != "" {
if r.providerData.SKECustomEndpoint != "" {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.SKECustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.SKECustomEndpoint),
)
} else {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithRegion(r.providerData.GetRegion()),
)
}
@ -319,6 +322,11 @@ func (r *clusterDataSource) Schema(_ context.Context, _ datasource.SchemaRequest
},
},
},
"region": schema.StringAttribute{
// the region cannot be found, so it has to be passed
Optional: true,
Description: "The resource region. If not defined, the provider region is used.",
},
},
}
}
@ -334,8 +342,15 @@ func (r *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest
projectId := state.ProjectId.ValueString()
name := state.Name.ValueString()
var region string
if utils.IsUndefined(state.Region) {
region = r.providerData.GetRegion()
} else {
region = state.Region.ValueString()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", name)
ctx = tflog.SetField(ctx, "region", region)
clusterResp, err := r.client.GetCluster(ctx, projectId, name).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
@ -346,7 +361,7 @@ func (r *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest
return
}
err = mapFields(ctx, clusterResp, &state)
err = mapFields(ctx, clusterResp, &state, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Processing API payload: %v", err))
return

View file

@ -59,6 +59,7 @@ var (
_ resource.Resource = &clusterResource{}
_ resource.ResourceWithConfigure = &clusterResource{}
_ resource.ResourceWithImportState = &clusterResource{}
_ resource.ResourceWithModifyPlan = &clusterResource{}
)
type skeClient interface {
@ -79,6 +80,7 @@ type Model struct {
Hibernations types.List `tfsdk:"hibernations"`
Extensions types.Object `tfsdk:"extensions"`
EgressAddressRanges types.List `tfsdk:"egress_address_ranges"`
Region types.String `tfsdk:"region"`
}
// Struct corresponding to Model.NodePools[i]
@ -236,6 +238,37 @@ func NewClusterResource() resource.Resource {
type clusterResource struct {
skeClient *ske.APIClient
enablementClient *serviceenablement.APIClient
providerData core.ProviderData
}
// ModifyPlan implements resource.ResourceWithModifyPlan.
// Use the modifier to set the effective region in the current plan.
func (r *clusterResource) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { // nolint:gocritic // function signature required by Terraform
var configModel Model
// skip initial empty configuration to avoid follow-up errors
if req.Config.Raw.IsNull() {
return
}
resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
if resp.Diagnostics.HasError() {
return
}
var planModel Model
resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
if resp.Diagnostics.HasError() {
return
}
utils.AdaptRegion(ctx, configModel.Region, &planModel.Region, r.providerData.GetRegion(), resp)
if resp.Diagnostics.HasError() {
return
}
resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
if resp.Diagnostics.HasError() {
return
}
}
// Metadata returns the resource type name.
@ -250,7 +283,8 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
var ok bool
r.providerData, ok = req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
@ -259,15 +293,15 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
var skeClient *ske.APIClient
var enablementClient *serviceenablement.APIClient
var err error
if providerData.SKECustomEndpoint != "" {
if r.providerData.SKECustomEndpoint != "" {
skeClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.SKECustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.SKECustomEndpoint),
)
} else {
skeClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithRegion(r.providerData.GetRegion()),
)
}
@ -276,15 +310,15 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
return
}
if providerData.ServiceEnablementCustomEndpoint != "" {
if r.providerData.ServiceEnablementCustomEndpoint != "" {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServiceEnablementCustomEndpoint),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithEndpoint(r.providerData.ServiceEnablementCustomEndpoint),
)
} else {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
config.WithCustomAuth(r.providerData.RoundTripper),
config.WithRegion(r.providerData.GetRegion()),
)
}
@ -307,6 +341,7 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
"max_surge": "Maximum number of additional VMs that are created during an update.",
"max_unavailable": "Maximum number of VMs that that can be unavailable during an update.",
"nodepool_validators": "If set (larger than 0), then it must be at least the amount of zones configured for the nodepool. The `max_surge` and `max_unavailable` fields cannot both be unset at the same time.",
"region": "The resource region. If not defined, the provider region is used.",
}
resp.Schema = schema.Schema{
@ -315,7 +350,7 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
MarkdownDescription: fmt.Sprintf("%s\n\n-> %s", descriptions["main"], descriptions["node_pools_plan_note"]),
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`name`\".",
Description: "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`name`\".",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
@ -643,6 +678,15 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
},
},
},
"region": schema.StringAttribute{
Optional: true,
// must be computed to allow for storing the override value from the provider
Computed: true,
Description: descriptions["region"],
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
},
}
}
@ -707,18 +751,20 @@ func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest
}
projectId := model.ProjectId.ValueString()
region := model.Region.ValueString()
clusterName := model.Name.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", clusterName)
ctx = tflog.SetField(ctx, "region", region)
// If SKE functionality is not enabled, enable it
err := r.enablementClient.EnableService(ctx, projectId, utils.SKEServiceId).Execute()
err := r.enablementClient.EnableServiceRegional(ctx, region, projectId, utils.SKEServiceId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating cluster", fmt.Sprintf("Calling API to enable SKE: %v", err))
return
}
_, err = enablementWait.EnableServiceWaitHandler(ctx, r.enablementClient, projectId, utils.SKEServiceId).WaitWithContext(ctx)
_, err = enablementWait.EnableServiceWaitHandler(ctx, r.enablementClient, region, projectId, utils.SKEServiceId).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating cluster", fmt.Sprintf("Wait for SKE enablement: %v", err))
return
@ -821,6 +867,7 @@ func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag
// cluster vars
projectId := model.ProjectId.ValueString()
name := model.Name.ValueString()
region := model.Region.ValueString()
kubernetes, hasDeprecatedVersion, err := toKubernetesPayload(model, availableKubernetesVersions, currentKubernetesVersion, diags)
if err != nil {
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Creating cluster config API payload: %v", err))
@ -881,7 +928,7 @@ func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag
core.LogAndAddWarning(ctx, diags, "Warning during creating/updating cluster", fmt.Sprintf("Cluster is in Impaired state due to an invalid argus instance id, the cluster is usable but metrics won't be forwarded: %s", *waitResp.Status.Error.Message))
}
err = mapFields(ctx, waitResp, model)
err = mapFields(ctx, waitResp, model, region)
if err != nil {
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Processing API payload: %v", err))
return
@ -1324,7 +1371,7 @@ func toNetworkPayload(ctx context.Context, m *Model) (*ske.Network, error) {
}, nil
}
func mapFields(ctx context.Context, cl *ske.Cluster, m *Model) error {
func mapFields(ctx context.Context, cl *ske.Cluster, m *Model, region string) error {
if cl == nil {
return fmt.Errorf("response input is nil")
}
@ -1343,11 +1390,13 @@ func mapFields(ctx context.Context, cl *ske.Cluster, m *Model) error {
m.Name = types.StringValue(name)
idParts := []string{
m.ProjectId.ValueString(),
region,
name,
}
m.Id = types.StringValue(
strings.Join(idParts, core.Separator),
)
m.Region = types.StringValue(region)
if cl.Kubernetes != nil {
m.KubernetesVersionUsed = types.StringPointerValue(cl.Kubernetes.Version)
@ -2034,8 +2083,13 @@ func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, re
}
projectId := state.ProjectId.ValueString()
name := state.Name.ValueString()
region := state.Region.ValueString()
if region == "" {
region = r.providerData.GetRegion()
}
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", name)
ctx = tflog.SetField(ctx, "region", region)
clResp, err := r.skeClient.GetCluster(ctx, projectId, name).Execute()
if err != nil {
@ -2048,7 +2102,7 @@ func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, re
return
}
err = mapFields(ctx, clResp, &state)
err = mapFields(ctx, clResp, &state, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Processing API payload: %v", err))
return
@ -2084,8 +2138,10 @@ func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest
projectId := model.ProjectId.ValueString()
clName := model.Name.ValueString()
region := model.Region.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", clName)
ctx = tflog.SetField(ctx, "region", region)
availableKubernetesVersions, availableMachines, err := r.loadAvailableVersions(ctx)
if err != nil {
@ -2116,8 +2172,10 @@ func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest
}
projectId := model.ProjectId.ValueString()
name := model.Name.ValueString()
region := model.Region.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", name)
ctx = tflog.SetField(ctx, "region", region)
c := r.skeClient
_, err := c.DeleteCluster(ctx, projectId, name).Execute()
@ -2138,15 +2196,16 @@ func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest
func (r *clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" {
if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing cluster",
fmt.Sprintf("Expected import identifier with format: [project_id],[name] Got: %q", req.ID),
fmt.Sprintf("Expected import identifier with format: [project_id],[region],[name] Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("region"), idParts[1])...)
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[2])...)
tflog.Info(ctx, "SKE cluster state imported")
}

View file

@ -33,11 +33,13 @@ func (c *skeClientMocked) GetClusterExecute(_ context.Context, _, _ string) (*sk
func TestMapFields(t *testing.T) {
cs := ske.ClusterStatusState("OK")
const testRegion = "region"
tests := []struct {
description string
stateExtensions types.Object
stateNodePools types.List
input *ske.Cluster
region string
expected Model
isValid bool
}{
@ -48,8 +50,9 @@ func TestMapFields(t *testing.T) {
&ske.Cluster{
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -60,6 +63,7 @@ func TestMapFields(t *testing.T) {
Hibernations: types.ListNull(types.ObjectType{AttrTypes: hibernationTypes}),
Extensions: types.ObjectNull(extensionsTypes),
EgressAddressRanges: types.ListNull(types.StringType),
Region: types.StringValue(testRegion),
},
true,
},
@ -149,8 +153,9 @@ func TestMapFields(t *testing.T) {
EgressAddressRanges: &[]string{"0.0.0.0/32", "1.1.1.1/32"},
},
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -253,6 +258,7 @@ func TestMapFields(t *testing.T) {
}),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -264,8 +270,9 @@ func TestMapFields(t *testing.T) {
Name: utils.Ptr("name"),
Network: &ske.Network{},
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -276,6 +283,7 @@ func TestMapFields(t *testing.T) {
Hibernations: types.ListNull(types.ObjectType{AttrTypes: hibernationTypes}),
Extensions: types.ObjectNull(extensionsTypes),
EgressAddressRanges: types.ListNull(types.StringType),
Region: types.StringValue(testRegion),
},
true,
},
@ -300,8 +308,9 @@ func TestMapFields(t *testing.T) {
},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -324,6 +333,7 @@ func TestMapFields(t *testing.T) {
"zones": types.ListNull(types.StringType),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -348,8 +358,9 @@ func TestMapFields(t *testing.T) {
Extensions: &ske.Extension{},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -372,6 +383,7 @@ func TestMapFields(t *testing.T) {
"zones": types.ListNull(types.StringType),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -407,8 +419,9 @@ func TestMapFields(t *testing.T) {
},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -433,6 +446,7 @@ func TestMapFields(t *testing.T) {
"zones": types.ListNull(types.StringType),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -444,8 +458,9 @@ func TestMapFields(t *testing.T) {
Extensions: &ske.Extension{},
Name: utils.Ptr("name"),
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -455,6 +470,7 @@ func TestMapFields(t *testing.T) {
Hibernations: types.ListNull(types.ObjectType{AttrTypes: hibernationTypes}),
Extensions: types.ObjectNull(extensionsTypes),
EgressAddressRanges: types.ListNull(types.StringType),
Region: types.StringValue(testRegion),
},
true,
},
@ -573,8 +589,9 @@ func TestMapFields(t *testing.T) {
Hibernated: nil,
},
},
testRegion,
Model{
Id: types.StringValue("pid,name"),
Id: types.StringValue("pid,region,name"),
ProjectId: types.StringValue("pid"),
Name: types.StringValue("name"),
KubernetesVersion: types.StringNull(),
@ -659,6 +676,7 @@ func TestMapFields(t *testing.T) {
}),
}),
}),
Region: types.StringValue(testRegion),
},
true,
},
@ -667,6 +685,7 @@ func TestMapFields(t *testing.T) {
types.ObjectNull(extensionsTypes),
types.ListNull(types.ObjectType{AttrTypes: nodePoolTypes}),
nil,
testRegion,
Model{},
false,
},
@ -675,6 +694,7 @@ func TestMapFields(t *testing.T) {
types.ObjectNull(extensionsTypes),
types.ListNull(types.ObjectType{AttrTypes: nodePoolTypes}),
&ske.Cluster{},
testRegion,
Model{},
false,
},
@ -686,7 +706,7 @@ func TestMapFields(t *testing.T) {
Extensions: tt.stateExtensions,
NodePools: tt.stateNodePools,
}
err := mapFields(context.Background(), tt.input, state)
err := mapFields(context.Background(), tt.input, state, tt.region)
if !tt.isValid && err == nil {
t.Fatalf("Should have failed")
}

View file

@ -1,142 +0,0 @@
package ske
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/services/serviceenablement"
"github.com/stackitcloud/stackit-sdk-go/services/ske"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ datasource.DataSource = &projectDataSource{}
)
// NewProjectDataSource is a helper function to simplify the provider implementation.
func NewProjectDataSource() datasource.DataSource {
return &projectDataSource{}
}
// projectDataSource is the data source implementation.
type projectDataSource struct {
skeClient *ske.APIClient
enablementClient *serviceenablement.APIClient
}
// Metadata returns the data source type name.
func (r *projectDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_ske_project"
}
// Configure adds the provider configured client to the data source.
func (r *projectDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
// Prevent panic if the provider has not been configured.
if req.ProviderData == nil {
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
var apiClient *ske.APIClient
var enablementClient *serviceenablement.APIClient
var err error
if providerData.SKECustomEndpoint != "" {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.SKECustomEndpoint),
)
} else {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
)
}
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the data source configuration", err))
return
}
if providerData.ServiceEnablementCustomEndpoint != "" {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServiceEnablementCustomEndpoint),
)
} else {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
)
}
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring Service Enablement API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err))
return
}
r.skeClient = apiClient
r.enablementClient = enablementClient
tflog.Info(ctx, "SKE client configured")
}
// Schema defines the schema for the data source.
func (r *projectDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Description: "SKE project data source schema. Must have a `region` specified in the provider configuration. Warning: SKE project resource is no longer in use and will be removed with the next release. SKE service enablement is done automatically when a new cluster is created.",
DeprecationMessage: "SKE project datasource is no longer in use and will be removed after October 10th 2024. SKE service enablement is done automatically when a new cluster is created.",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal data source. ID. It is structured as \"`project_id`\".",
Computed: true,
},
"project_id": schema.StringAttribute{
Description: "STACKIT Project ID in which the kubernetes project is enabled.",
Required: true,
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
},
},
}
}
// Read refreshes the Terraform state with the latest data.
func (r *projectDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.Config.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
_, err := r.enablementClient.GetServiceStatus(ctx, projectId, utils.SKEServiceId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading project", fmt.Sprintf("Calling API: %v", err))
return
}
model.Id = types.StringValue(projectId)
model.ProjectId = types.StringValue(projectId)
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE project read")
}

View file

@ -1,249 +0,0 @@
package ske
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/services/serviceenablement"
enablementWait "github.com/stackitcloud/stackit-sdk-go/services/serviceenablement/wait"
"github.com/stackitcloud/stackit-sdk-go/services/ske"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/utils"
"github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate"
)
// Ensure the implementation satisfies the expected interfaces.
var (
_ resource.Resource = &projectResource{}
_ resource.ResourceWithConfigure = &projectResource{}
_ resource.ResourceWithImportState = &projectResource{}
)
type Model struct {
Id types.String `tfsdk:"id"`
ProjectId types.String `tfsdk:"project_id"`
}
// NewProjectResource is a helper function to simplify the provider implementation.
func NewProjectResource() resource.Resource {
return &projectResource{}
}
// projectResource is the resource implementation.
type projectResource struct {
skeClient *ske.APIClient
enablementClient *serviceenablement.APIClient
}
// Metadata returns the resource type name.
func (r *projectResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_ske_project"
}
// Configure adds the provider configured client to the resource.
func (r *projectResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
// Prevent panic if the provider has not been configured.
if req.ProviderData == nil {
return
}
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
var apiClient *ske.APIClient
var enablementClient *serviceenablement.APIClient
var err error
if providerData.SKECustomEndpoint != "" {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.SKECustomEndpoint),
)
} else {
apiClient, err = ske.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
)
}
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring SKE API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err))
return
}
if providerData.ServiceEnablementCustomEndpoint != "" {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithEndpoint(providerData.ServiceEnablementCustomEndpoint),
)
} else {
enablementClient, err = serviceenablement.NewAPIClient(
config.WithCustomAuth(providerData.RoundTripper),
config.WithRegion(providerData.GetRegion()),
)
}
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring Service Enablement API client", fmt.Sprintf("Configuring client: %v. This is an error related to the provider configuration, not to the resource configuration", err))
return
}
r.skeClient = apiClient
r.enablementClient = enablementClient
tflog.Info(ctx, "SKE project client configured")
}
// Schema returns the Terraform schema structure
func (r *projectResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
resp.Schema = schema.Schema{
Description: "SKE project resource schema. Must have a `region` specified in the provider configuration. This resource allows you to enable the SKE service and you can only have one per project. " +
"Before deleting this resource, all SKE clusters associated to the project must be deleted. Otherwise, error would occur due to the existing clusters. In such case, it is highly recommended to remove the SKE project from the state, directly using the \"`terraform state rm`\". " +
"Warning: SKE project resource is no longer in use and will be removed after October 10th 2024. SKE service enablement is done automatically when a new cluster is created.",
DeprecationMessage: "SKE project resource is no longer in use and will be removed after October 10th 2024. SKE service enablement is done automatically when a new cluster is created. " +
"For deleting the SKE project resource, it is highly recommended to remove the SKE project from the state, directly using the \"`terraform state rm`\".",
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Description: "Terraform's internal resource ID. It is structured as \"`project_id`\".",
Computed: true,
PlanModifiers: []planmodifier.String{
stringplanmodifier.UseStateForUnknown(),
},
},
"project_id": schema.StringAttribute{
Description: "STACKIT Project ID in which the kubernetes project is enabled.",
Required: true,
Validators: []validator.String{
validate.UUID(),
validate.NoSeparator(),
},
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
},
}
}
// Create creates the resource and sets the initial Terraform state.
func (r *projectResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
resp.Diagnostics.Append(req.Plan.Get(ctx, &model)...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
// If SKE functionality is not enabled, enable it
err := r.enablementClient.EnableService(ctx, projectId, utils.SKEServiceId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", fmt.Sprintf("Calling API to enable SKE: %v", err))
return
}
_, err = enablementWait.EnableServiceWaitHandler(ctx, r.enablementClient, projectId, utils.SKEServiceId).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", fmt.Sprintf("Wait for SKE enablement: %v", err))
return
}
diags := resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE project created")
}
// Read refreshes the Terraform state with the latest data.
func (r *projectResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
_, err := r.enablementClient.GetServiceStatus(ctx, projectId, utils.SKEServiceId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading project", fmt.Sprintf("Calling API: %v", err))
return
}
model.Id = types.StringValue(projectId)
model.ProjectId = types.StringValue(projectId)
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE project read")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *projectResource) Update(ctx context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
// Update shouldn't be called
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating project", "Project can't be updated")
}
// Delete deletes the resource and removes the Terraform state on success.
func (r *projectResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
var model Model
resp.Diagnostics.Append(req.State.Get(ctx, &model)...)
if resp.Diagnostics.HasError() {
return
}
projectId := model.ProjectId.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
c := r.skeClient
clusters, err := c.ListClusters(ctx, projectId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintf("Calling API to get the list of clusters: %v", err))
return
}
if clusters != nil && len(*clusters.Items) > 0 {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintln("You still have clusters in the project. Please delete them before deleting the project."))
return
}
err = r.enablementClient.DisableService(ctx, projectId, utils.SKEServiceId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintf("Calling API to disable SKE: %v", err))
return
}
_, err = enablementWait.DisableServiceWaitHandler(ctx, r.enablementClient, projectId, utils.SKEServiceId).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintf("Wait for SKE disabling: %v", err))
return
}
tflog.Info(ctx, "SKE project deleted")
}
// ImportState imports a resource into the Terraform state on success.
// The expected format of the resource import identifier is: project_id
func (r *projectResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { // nolint:gocritic // function signature required by Terraform
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 1 || idParts[0] == "" {
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing project",
fmt.Sprintf("Expected import identifier with format: [project_id] Got: %q", req.ID),
)
return
}
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
tflog.Info(ctx, "SKE project state imported")
}

View file

@ -22,16 +22,16 @@ var clusterResource = map[string]string{
"name": fmt.Sprintf("cl-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
"name_min": fmt.Sprintf("cl-min-%s", acctest.RandStringFromCharSet(3, acctest.CharSetAlphaNum)),
"kubernetes_version_min": "1.30",
"kubernetes_version_used": "1.30.7",
"kubernetes_version_used": "1.30.10",
"kubernetes_version_min_new": "1.31",
"kubernetes_version_used_new": "1.31.4",
"nodepool_name": "np-acc-test",
"nodepool_name_min": "np-acc-min-test",
"nodepool_machine_type": "b1.2",
"nodepool_os_version_min": "4081.2.0",
"nodepool_os_version_used": "4081.2.0",
"nodepool_os_version_min_new": "4081.2.1",
"nodepool_os_version_used_new": "4081.2.1",
"nodepool_os_version_min": "4081.2.1",
"nodepool_os_version_used": "4081.2.1",
"nodepool_os_version_min_new": "4152.2.1",
"nodepool_os_version_used_new": "4152.2.1",
"nodepool_os_name": "flatcar",
"nodepool_minimum": "2",
"nodepool_maximum": "3",
@ -86,11 +86,15 @@ func getDnsConfig() string {
)
}
func getConfig(kubernetesVersion, nodePoolMachineOSVersion string, maintenanceEnd *string) string {
func getConfig(kubernetesVersion, nodePoolMachineOSVersion string, maintenanceEnd, region *string) string {
maintenanceEndTF := clusterResource["maintenance_end"]
if maintenanceEnd != nil {
maintenanceEndTF = *maintenanceEnd
}
var regionConfig string
if region != nil {
regionConfig = fmt.Sprintf(`region = %q`, *region)
}
return fmt.Sprintf(`
%s
@ -148,6 +152,7 @@ func getConfig(kubernetesVersion, nodePoolMachineOSVersion string, maintenanceEn
start = "%s"
end = "%s"
}
%s
}
resource "stackit_ske_kubeconfig" "kubeconfig" {
@ -193,6 +198,7 @@ func getConfig(kubernetesVersion, nodePoolMachineOSVersion string, maintenanceEn
clusterResource["maintenance_enable_machine_image_version_updates"],
clusterResource["maintenance_start"],
maintenanceEndTF,
regionConfig,
// Kubeconfig
clusterResource["kubeconfig_expiration"],
@ -200,6 +206,7 @@ func getConfig(kubernetesVersion, nodePoolMachineOSVersion string, maintenanceEn
}
func TestAccSKE(t *testing.T) {
testRegion := utils.Ptr("eu01")
resource.Test(t, resource.TestCase{
ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
CheckDestroy: testAccCheckSKEDestroy,
@ -208,7 +215,7 @@ func TestAccSKE(t *testing.T) {
// 1) Creation
{
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil),
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil, testRegion),
Check: resource.ComposeAggregateTestCheckFunc(
// cluster data
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "name", clusterResource["name"]),
@ -250,6 +257,7 @@ func TestAccSKE(t *testing.T) {
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "region", *testRegion),
// Kubeconfig
@ -277,15 +285,16 @@ func TestAccSKE(t *testing.T) {
}
`,
getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil),
getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil, testRegion),
clusterResource["project_id"],
clusterResource["name"],
),
Check: resource.ComposeAggregateTestCheckFunc(
// cluster data
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "id", fmt.Sprintf("%s,%s",
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "id", fmt.Sprintf("%s,%s,%s",
clusterResource["project_id"],
*testRegion,
clusterResource["name"],
)),
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
@ -339,7 +348,7 @@ func TestAccSKE(t *testing.T) {
if !ok {
return "", fmt.Errorf("couldn't find attribute name")
}
return fmt.Sprintf("%s,%s", testutil.ProjectId, name), nil
return fmt.Sprintf("%s,%s,%s", testutil.ProjectId, testutil.Region, name), nil
},
ImportState: true,
ImportStateVerify: true,
@ -348,7 +357,7 @@ func TestAccSKE(t *testing.T) {
},
// 4) Update kubernetes version, OS version and maintenance end
{
Config: getConfig(clusterResource["kubernetes_version_min_new"], clusterResource["nodepool_os_version_min_new"], utils.Ptr(clusterResource["maintenance_end_new"])),
Config: getConfig(clusterResource["kubernetes_version_min_new"], clusterResource["nodepool_os_version_min_new"], utils.Ptr(clusterResource["maintenance_end_new"]), testRegion),
Check: resource.ComposeAggregateTestCheckFunc(
// cluster data
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
@ -390,11 +399,12 @@ func TestAccSKE(t *testing.T) {
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end_new"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "region", *testRegion),
),
},
// 5) Downgrade kubernetes and nodepool machine OS version
{
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], utils.Ptr(clusterResource["maintenance_end_new"])),
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], utils.Ptr(clusterResource["maintenance_end_new"]), testRegion),
Check: resource.ComposeAggregateTestCheckFunc(
// cluster data
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
@ -432,7 +442,7 @@ func testAccCheckSKEDestroy(s *terraform.State) error {
if rs.Type != "stackit_ske_cluster" {
continue
}
// cluster terraform ID: = "[project_id],[cluster_name]"
// cluster terraform ID: = "[project_id],[region],[cluster_name]"
clusterName := strings.Split(rs.Primary.ID, core.Separator)[1]
clustersToDestroy = append(clustersToDestroy, clusterName)
}

View file

@ -70,7 +70,6 @@ import (
serviceAccountToken "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/serviceaccount/token"
skeCluster "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/cluster"
skeKubeconfig "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/kubeconfig"
skeProject "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/ske/project"
sqlServerFlexInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sqlserverflex/instance"
sqlServerFlexUser "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/sqlserverflex/user"
@ -505,7 +504,6 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource
serverUpdateSchedule.NewScheduleDataSource,
serverUpdateSchedule.NewSchedulesDataSource,
serviceAccount.NewServiceAccountDataSource,
skeProject.NewProjectDataSource,
skeCluster.NewClusterDataSource,
}
}
@ -568,7 +566,6 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource {
serviceAccount.NewServiceAccountResource,
serviceAccountToken.NewServiceAccountTokenResource,
serviceAccountKey.NewServiceAccountKeyResource,
skeProject.NewProjectResource,
skeCluster.NewClusterResource,
skeKubeconfig.NewKubeconfigResource,
}