Initial commit
This commit is contained in:
commit
e4c8a6fbf4
186 changed files with 29501 additions and 0 deletions
319
stackit/services/ske/cluster/datasource.go
Normal file
319
stackit/services/ske/cluster/datasource.go
Normal file
|
|
@ -0,0 +1,319 @@
|
|||
package ske
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/diag"
|
||||
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/config"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/ske"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/core"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/validate"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &clusterDataSource{}
|
||||
)
|
||||
|
||||
// NewClusterDataSource is a helper function to simplify the provider implementation.
|
||||
func NewClusterDataSource() datasource.DataSource {
|
||||
return &clusterDataSource{}
|
||||
}
|
||||
|
||||
// clusterDataSource is the data source implementation.
|
||||
type clusterDataSource struct {
|
||||
client *ske.APIClient
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *clusterDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_ske_cluster"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *clusterDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
// Prevent panic if the provider has not been configured.
|
||||
if req.ProviderData == nil {
|
||||
return
|
||||
}
|
||||
|
||||
providerData, ok := req.ProviderData.(core.ProviderData)
|
||||
if !ok {
|
||||
resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData))
|
||||
return
|
||||
}
|
||||
|
||||
var apiClient *ske.APIClient
|
||||
var err error
|
||||
if providerData.SKECustomEndpoint != "" {
|
||||
apiClient, err = ske.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithEndpoint(providerData.SKECustomEndpoint),
|
||||
)
|
||||
} else {
|
||||
apiClient, err = ske.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithRegion(providerData.Region),
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Could not Configure API Client", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "SKE client configured")
|
||||
r.client = apiClient
|
||||
}
|
||||
func (r *clusterDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Description: "SKE Cluster data source schema.",
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"id": schema.StringAttribute{
|
||||
Description: "Terraform's internal resource ID.",
|
||||
Computed: true,
|
||||
},
|
||||
"project_id": schema.StringAttribute{
|
||||
Description: "STACKIT project ID to which the cluster is associated.",
|
||||
Required: true,
|
||||
Validators: []validator.String{
|
||||
validate.UUID(),
|
||||
validate.NoSeparator(),
|
||||
},
|
||||
},
|
||||
"name": schema.StringAttribute{
|
||||
Description: "The cluster name.",
|
||||
Required: true,
|
||||
},
|
||||
"kubernetes_version": schema.StringAttribute{
|
||||
Description: "Kubernetes version.",
|
||||
Computed: true,
|
||||
},
|
||||
"kubernetes_version_used": schema.StringAttribute{
|
||||
Description: "Full Kubernetes version used. For example, if `1.22` was selected, this value may result to `1.22.15`",
|
||||
Computed: true,
|
||||
},
|
||||
"allow_privileged_containers": schema.BoolAttribute{
|
||||
Description: "DEPRECATED as of Kubernetes 1.25+\n Flag to specify if privileged mode for containers is enabled or not.\nThis should be used with care since it also disables a couple of other features like the use of some volume type (e.g. PVCs).",
|
||||
DeprecationMessage: "Please remove this flag from your configuration when using Kubernetes version 1.25+.",
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"node_pools": schema.ListNestedAttribute{
|
||||
Description: "One or more `node_pool` block as defined below.",
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"name": schema.StringAttribute{
|
||||
Description: "Specifies the name of the node pool.",
|
||||
Computed: true,
|
||||
},
|
||||
"machine_type": schema.StringAttribute{
|
||||
Description: "The machine type.",
|
||||
Computed: true,
|
||||
},
|
||||
"os_name": schema.StringAttribute{
|
||||
Description: "The name of the OS image.",
|
||||
Computed: true,
|
||||
},
|
||||
"os_version": schema.StringAttribute{
|
||||
Description: "The OS image version.",
|
||||
Computed: true,
|
||||
},
|
||||
"minimum": schema.Int64Attribute{
|
||||
Description: "Minimum number of nodes in the pool.",
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"maximum": schema.Int64Attribute{
|
||||
Description: "Maximum number of nodes in the pool.",
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"max_surge": schema.Int64Attribute{
|
||||
Description: "The maximum number of nodes upgraded simultaneously.",
|
||||
Computed: true,
|
||||
},
|
||||
"max_unavailable": schema.Int64Attribute{
|
||||
Description: "The maximum number of nodes unavailable during upgraded.",
|
||||
Computed: true,
|
||||
},
|
||||
"volume_type": schema.StringAttribute{
|
||||
Description: "Specifies the volume type.",
|
||||
Computed: true,
|
||||
},
|
||||
"volume_size": schema.Int64Attribute{
|
||||
Description: "The volume size in GB.",
|
||||
Computed: true,
|
||||
},
|
||||
"labels": schema.MapAttribute{
|
||||
Description: "Labels to add to each node.",
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
"taints": schema.ListNestedAttribute{
|
||||
Description: "Specifies a taint list as defined below.",
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"effect": schema.StringAttribute{
|
||||
Description: "The taint effect.",
|
||||
Computed: true,
|
||||
},
|
||||
"key": schema.StringAttribute{
|
||||
Description: "Taint key to be applied to a node.",
|
||||
Computed: true,
|
||||
},
|
||||
"value": schema.StringAttribute{
|
||||
Description: "Taint value corresponding to the taint key.",
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cri": schema.StringAttribute{
|
||||
Description: "Specifies the container runtime.",
|
||||
Computed: true,
|
||||
},
|
||||
"availability_zones": schema.ListAttribute{
|
||||
Description: "Specify a list of availability zones.",
|
||||
ElementType: types.StringType,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"maintenance": schema.SingleNestedAttribute{
|
||||
Description: "A single maintenance block as defined below",
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"enable_kubernetes_version_updates": schema.BoolAttribute{
|
||||
Description: "Flag to enable/disable auto-updates of the Kubernetes version.",
|
||||
Computed: true,
|
||||
},
|
||||
"enable_machine_image_version_updates": schema.BoolAttribute{
|
||||
Description: "Flag to enable/disable auto-updates of the OS image version.",
|
||||
Computed: true,
|
||||
},
|
||||
"start": schema.StringAttribute{
|
||||
Description: "Date time for maintenance window start.",
|
||||
Computed: true,
|
||||
},
|
||||
"end": schema.StringAttribute{
|
||||
Description: "Date time for maintenance window end.",
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"hibernations": schema.ListNestedAttribute{
|
||||
Description: "One or more hibernation block as defined below.",
|
||||
Computed: true,
|
||||
NestedObject: schema.NestedAttributeObject{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"start": schema.StringAttribute{
|
||||
Description: "Start time of cluster hibernation in crontab syntax.",
|
||||
Computed: true,
|
||||
},
|
||||
"end": schema.StringAttribute{
|
||||
Description: "End time of hibernation, in crontab syntax.",
|
||||
Computed: true,
|
||||
},
|
||||
"timezone": schema.StringAttribute{
|
||||
Description: "Timezone name corresponding to a file in the IANA Time Zone database.",
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"extensions": schema.SingleNestedAttribute{
|
||||
Description: "A single extensions block as defined below",
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"argus": schema.SingleNestedAttribute{
|
||||
Description: "A single argus block as defined below",
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"enabled": schema.BoolAttribute{
|
||||
Description: "Flag to enable/disable argus extensions.",
|
||||
Computed: true,
|
||||
},
|
||||
"argus_instance_id": schema.StringAttribute{
|
||||
Description: "Instance ID of argus",
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"acl": schema.SingleNestedAttribute{
|
||||
Description: "Cluster access control configuration",
|
||||
Computed: true,
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"enabled": schema.BoolAttribute{
|
||||
Description: "Is ACL enabled?",
|
||||
Computed: true,
|
||||
},
|
||||
"allowed_cidrs": schema.ListAttribute{
|
||||
Description: "Specify a list of CIDRs to whitelist",
|
||||
Computed: true,
|
||||
ElementType: types.StringType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"kube_config": schema.StringAttribute{
|
||||
Description: "Kube config file used for connecting to the cluster",
|
||||
Sensitive: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var state Cluster
|
||||
diags := req.Config.Get(ctx, &state)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
projectId := state.ProjectId.ValueString()
|
||||
name := state.Name.ValueString()
|
||||
ctx = tflog.SetField(ctx, "project_id", projectId)
|
||||
ctx = tflog.SetField(ctx, "name", name)
|
||||
clusterResp, err := r.client.GetCluster(ctx, projectId, name).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, fmt.Sprintf("Unable to read cluster, project_id = %s, name = %s", projectId, name), err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = mapFields(ctx, clusterResp, &state)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error())
|
||||
return
|
||||
}
|
||||
r.getCredential(ctx, &diags, &state)
|
||||
// Set refreshed state
|
||||
diags = resp.State.Set(ctx, state)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
tflog.Info(ctx, "SKE cluster read")
|
||||
}
|
||||
|
||||
func (r *clusterDataSource) getCredential(ctx context.Context, diags *diag.Diagnostics, model *Cluster) {
|
||||
c := r.client
|
||||
res, err := c.GetCredentials(ctx, model.ProjectId.ValueString(), model.Name.ValueString()).Execute()
|
||||
if err != nil {
|
||||
diags.AddError("failed fetching cluster credentials for data source", err.Error())
|
||||
return
|
||||
}
|
||||
model.KubeConfig = types.StringPointerValue(res.Kubeconfig)
|
||||
}
|
||||
1170
stackit/services/ske/cluster/resource.go
Normal file
1170
stackit/services/ske/cluster/resource.go
Normal file
File diff suppressed because it is too large
Load diff
635
stackit/services/ske/cluster/resource_test.go
Normal file
635
stackit/services/ske/cluster/resource_test.go
Normal file
|
|
@ -0,0 +1,635 @@
|
|||
package ske
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/hashicorp/terraform-plugin-framework/attr"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/utils"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/ske"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/core"
|
||||
)
|
||||
|
||||
func TestMapFields(t *testing.T) {
|
||||
cs := ske.ClusterStatusState("OK")
|
||||
tests := []struct {
|
||||
description string
|
||||
input *ske.ClusterResponse
|
||||
expected Cluster
|
||||
isValid bool
|
||||
}{
|
||||
{
|
||||
"default_values",
|
||||
&ske.ClusterResponse{
|
||||
Name: utils.Ptr("name"),
|
||||
},
|
||||
Cluster{
|
||||
Id: types.StringValue("pid,name"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
Name: types.StringValue("name"),
|
||||
KubernetesVersion: types.StringNull(),
|
||||
AllowPrivilegedContainers: types.BoolNull(),
|
||||
NodePools: []NodePool{},
|
||||
Maintenance: types.ObjectNull(map[string]attr.Type{}),
|
||||
Hibernations: nil,
|
||||
Extensions: nil,
|
||||
KubeConfig: types.StringNull(),
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"simple_values",
|
||||
&ske.ClusterResponse{
|
||||
Extensions: &ske.Extension{
|
||||
Acl: &ske.ACL{
|
||||
AllowedCidrs: &[]string{"cidr1"},
|
||||
Enabled: utils.Ptr(true),
|
||||
},
|
||||
Argus: &ske.Argus{
|
||||
ArgusInstanceId: utils.Ptr("aid"),
|
||||
Enabled: utils.Ptr(true),
|
||||
},
|
||||
},
|
||||
Hibernation: &ske.Hibernation{
|
||||
Schedules: &[]ske.HibernationSchedule{
|
||||
{
|
||||
End: utils.Ptr("2"),
|
||||
Start: utils.Ptr("1"),
|
||||
Timezone: utils.Ptr("CET"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Kubernetes: &ske.Kubernetes{
|
||||
AllowPrivilegedContainers: utils.Ptr(true),
|
||||
Version: utils.Ptr("1.2.3"),
|
||||
},
|
||||
Maintenance: &ske.Maintenance{
|
||||
AutoUpdate: &ske.MaintenanceAutoUpdate{
|
||||
KubernetesVersion: utils.Ptr(true),
|
||||
MachineImageVersion: utils.Ptr(true),
|
||||
},
|
||||
TimeWindow: &ske.TimeWindow{
|
||||
Start: utils.Ptr("0000-01-02T03:04:05+06:00"),
|
||||
End: utils.Ptr("0010-11-12T13:14:15Z"),
|
||||
},
|
||||
},
|
||||
Name: utils.Ptr("name"),
|
||||
Nodepools: &[]ske.Nodepool{
|
||||
{
|
||||
AvailabilityZones: &[]string{"z1", "z2"},
|
||||
Cri: &ske.CRI{
|
||||
Name: utils.Ptr("cri"),
|
||||
},
|
||||
Labels: &map[string]string{"k": "v"},
|
||||
Machine: &ske.Machine{
|
||||
Image: &ske.Image{
|
||||
Name: utils.Ptr("os"),
|
||||
Version: utils.Ptr("os-ver"),
|
||||
},
|
||||
Type: utils.Ptr("B"),
|
||||
},
|
||||
MaxSurge: utils.Ptr(int32(3)),
|
||||
MaxUnavailable: nil,
|
||||
Maximum: utils.Ptr(int32(5)),
|
||||
Minimum: utils.Ptr(int32(1)),
|
||||
Name: utils.Ptr("node"),
|
||||
Taints: &[]ske.Taint{
|
||||
{
|
||||
Effect: utils.Ptr("effect"),
|
||||
Key: utils.Ptr("key"),
|
||||
Value: utils.Ptr("value"),
|
||||
},
|
||||
},
|
||||
Volume: &ske.Volume{
|
||||
Size: utils.Ptr(int32(3)),
|
||||
Type: utils.Ptr("type"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: &ske.ClusterStatus{
|
||||
Aggregated: &cs,
|
||||
Error: nil,
|
||||
Hibernated: nil,
|
||||
},
|
||||
},
|
||||
Cluster{
|
||||
Id: types.StringValue("pid,name"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
Name: types.StringValue("name"),
|
||||
KubernetesVersion: types.StringValue("1.2"),
|
||||
KubernetesVersionUsed: types.StringValue("1.2.3"),
|
||||
AllowPrivilegedContainers: types.BoolValue(true),
|
||||
|
||||
NodePools: []NodePool{
|
||||
{
|
||||
Name: types.StringValue("node"),
|
||||
MachineType: types.StringValue("B"),
|
||||
OSName: types.StringValue("os"),
|
||||
OSVersion: types.StringValue("os-ver"),
|
||||
Minimum: types.Int64Value(1),
|
||||
Maximum: types.Int64Value(5),
|
||||
MaxSurge: types.Int64Value(3),
|
||||
MaxUnavailable: types.Int64Null(),
|
||||
VolumeType: types.StringValue("type"),
|
||||
VolumeSize: types.Int64Value(3),
|
||||
Labels: types.MapValueMust(types.StringType, map[string]attr.Value{"k": types.StringValue("v")}),
|
||||
Taints: []Taint{
|
||||
{
|
||||
Effect: types.StringValue("effect"),
|
||||
Key: types.StringValue("key"),
|
||||
Value: types.StringValue("value"),
|
||||
},
|
||||
},
|
||||
CRI: types.StringValue("cri"),
|
||||
AvailabilityZones: types.ListValueMust(types.StringType, []attr.Value{types.StringValue("z1"), types.StringValue("z2")}),
|
||||
},
|
||||
},
|
||||
Maintenance: types.ObjectValueMust(maintenanceTypes, map[string]attr.Value{
|
||||
"enable_kubernetes_version_updates": types.BoolValue(true),
|
||||
"enable_machine_image_version_updates": types.BoolValue(true),
|
||||
"start": types.StringValue("03:04:05+06:00"),
|
||||
"end": types.StringValue("13:14:15Z"),
|
||||
}),
|
||||
Hibernations: []Hibernation{
|
||||
{
|
||||
Start: types.StringValue("1"),
|
||||
End: types.StringValue("2"),
|
||||
Timezone: types.StringValue("CET"),
|
||||
},
|
||||
},
|
||||
Extensions: &Extensions{
|
||||
Argus: &ArgusExtension{
|
||||
Enabled: types.BoolValue(true),
|
||||
ArgusInstanceId: types.StringValue("aid"),
|
||||
},
|
||||
ACL: &ACL{
|
||||
Enabled: types.BoolValue(true),
|
||||
AllowedCIDRs: types.ListValueMust(types.StringType, []attr.Value{
|
||||
types.StringValue("cidr1"),
|
||||
}),
|
||||
},
|
||||
},
|
||||
KubeConfig: types.StringNull(),
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"nil_response",
|
||||
nil,
|
||||
Cluster{},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"no_resource_id",
|
||||
&ske.ClusterResponse{},
|
||||
Cluster{},
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.description, func(t *testing.T) {
|
||||
state := &Cluster{
|
||||
ProjectId: tt.expected.ProjectId,
|
||||
}
|
||||
err := mapFields(context.Background(), tt.input, state)
|
||||
if !tt.isValid && err == nil {
|
||||
t.Fatalf("Should have failed")
|
||||
}
|
||||
if tt.isValid && err != nil {
|
||||
t.Fatalf("Should not have failed: %v", err)
|
||||
}
|
||||
if tt.isValid {
|
||||
diff := cmp.Diff(state, &tt.expected)
|
||||
if diff != "" {
|
||||
t.Fatalf("Data does not match: %s", diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLatestMatchingVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
availableVersions []ske.KubernetesVersion
|
||||
providedVersion *string
|
||||
expectedVersionUsed *string
|
||||
expectedHasDeprecatedVersion bool
|
||||
isValid bool
|
||||
}{
|
||||
{
|
||||
"available_version",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.20.1"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.20.2"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
},
|
||||
utils.Ptr("1.20"),
|
||||
utils.Ptr("1.20.2"),
|
||||
false,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"available_version_no_patch",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
},
|
||||
utils.Ptr("1.20"),
|
||||
utils.Ptr("1.20.0"),
|
||||
false,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"deprecated_version",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateDeprecated),
|
||||
},
|
||||
},
|
||||
utils.Ptr("1.19"),
|
||||
utils.Ptr("1.19.0"),
|
||||
true,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"deprecated_version_not_selected",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateDeprecated),
|
||||
},
|
||||
},
|
||||
utils.Ptr("1.20"),
|
||||
utils.Ptr("1.20.0"),
|
||||
false,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"preview_version",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStatePreview),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
},
|
||||
utils.Ptr("1.20"),
|
||||
utils.Ptr("1.20.0"),
|
||||
false,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"no_matching_available_versions",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
},
|
||||
utils.Ptr("1.21"),
|
||||
nil,
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"no_available_version",
|
||||
[]ske.KubernetesVersion{},
|
||||
utils.Ptr("1.20"),
|
||||
nil,
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"nil_available_version",
|
||||
nil,
|
||||
utils.Ptr("1.20"),
|
||||
nil,
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"empty_provided_version",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
},
|
||||
utils.Ptr(""),
|
||||
nil,
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"nil_provided_version",
|
||||
[]ske.KubernetesVersion{
|
||||
{
|
||||
Version: utils.Ptr("1.20.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
{
|
||||
Version: utils.Ptr("1.19.0"),
|
||||
State: utils.Ptr(VersionStateSupported),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.description, func(t *testing.T) {
|
||||
versionUsed, hasDeprecatedVersion, err := latestMatchingVersion(tt.availableVersions, tt.providedVersion)
|
||||
if !tt.isValid && err == nil {
|
||||
t.Fatalf("Should have failed")
|
||||
}
|
||||
if tt.isValid && err != nil {
|
||||
t.Fatalf("Should not have failed: %v", err)
|
||||
}
|
||||
if tt.isValid {
|
||||
if *versionUsed != *tt.expectedVersionUsed {
|
||||
t.Fatalf("Used version does not match: expecting %s, got %s", *tt.expectedVersionUsed, *versionUsed)
|
||||
}
|
||||
if tt.expectedHasDeprecatedVersion != hasDeprecatedVersion {
|
||||
t.Fatalf("hasDeprecatedVersion flag is wrong: expecting %t, got %t", tt.expectedHasDeprecatedVersion, hasDeprecatedVersion)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestGetMaintenanceTimes(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
startAPI string
|
||||
startTF *string
|
||||
endAPI string
|
||||
endTF *string
|
||||
isValid bool
|
||||
startExpected string
|
||||
endExpected string
|
||||
}{
|
||||
{
|
||||
description: "base",
|
||||
startAPI: "0001-02-03T04:05:06+07:08",
|
||||
endAPI: "0011-12-13T14:15:16+17:18",
|
||||
isValid: true,
|
||||
startExpected: "04:05:06+07:08",
|
||||
endExpected: "14:15:16+17:18",
|
||||
},
|
||||
{
|
||||
description: "base_utc",
|
||||
startAPI: "0001-02-03T04:05:06Z",
|
||||
endAPI: "0011-12-13T14:15:16Z",
|
||||
isValid: true,
|
||||
startExpected: "04:05:06Z",
|
||||
endExpected: "14:15:16Z",
|
||||
},
|
||||
{
|
||||
description: "api_wrong_format_1",
|
||||
startAPI: "T04:05:06+07:08",
|
||||
endAPI: "0011-12-13T14:15:16+17:18",
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "api_wrong_format_2",
|
||||
startAPI: "0001-02-03T04:05:06+07:08",
|
||||
endAPI: "14:15:16+17:18",
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "tf_state_filled_in_1",
|
||||
startAPI: "0001-02-03T04:05:06+07:08",
|
||||
startTF: utils.Ptr("04:05:06+07:08"),
|
||||
endAPI: "0011-12-13T14:15:16+17:18",
|
||||
endTF: utils.Ptr("14:15:16+17:18"),
|
||||
isValid: true,
|
||||
startExpected: "04:05:06+07:08",
|
||||
endExpected: "14:15:16+17:18",
|
||||
},
|
||||
{
|
||||
description: "tf_state_filled_in_2",
|
||||
startAPI: "0001-02-03T04:05:06Z",
|
||||
startTF: utils.Ptr("04:05:06+00:00"),
|
||||
endAPI: "0011-12-13T14:15:16Z",
|
||||
endTF: utils.Ptr("14:15:16+00:00"),
|
||||
isValid: true,
|
||||
startExpected: "04:05:06+00:00",
|
||||
endExpected: "14:15:16+00:00",
|
||||
},
|
||||
{
|
||||
description: "tf_state_filled_in_3",
|
||||
startAPI: "0001-02-03T04:05:06+00:00",
|
||||
startTF: utils.Ptr("04:05:06Z"),
|
||||
endAPI: "0011-12-13T14:15:16+00:00",
|
||||
endTF: utils.Ptr("14:15:16Z"),
|
||||
isValid: true,
|
||||
startExpected: "04:05:06Z",
|
||||
endExpected: "14:15:16Z",
|
||||
},
|
||||
{
|
||||
description: "tf_state_doesnt_match_1",
|
||||
startAPI: "0001-02-03T04:05:06+07:08",
|
||||
startTF: utils.Ptr("00:00:00+07:08"),
|
||||
endAPI: "0011-12-13T14:15:16+17:18",
|
||||
endTF: utils.Ptr("14:15:16+17:18"),
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "tf_state_doesnt_match_2",
|
||||
startAPI: "0001-02-03T04:05:06+07:08",
|
||||
startTF: utils.Ptr("04:05:06+07:08"),
|
||||
endAPI: "0011-12-13T14:15:16+17:18",
|
||||
endTF: utils.Ptr("00:00:00+17:18"),
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "tf_state_doesnt_match_3",
|
||||
startAPI: "0001-02-03T04:05:06+07:08",
|
||||
startTF: utils.Ptr("04:05:06Z"),
|
||||
endAPI: "0011-12-13T14:15:16+17:18",
|
||||
endTF: utils.Ptr("14:15:16+17:18"),
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "tf_state_doesnt_match_4",
|
||||
startAPI: "0001-02-03T04:05:06+07:08",
|
||||
startTF: utils.Ptr("04:05:06+07:08"),
|
||||
endAPI: "0011-12-13T14:15:16+17:18",
|
||||
endTF: utils.Ptr("14:15:16Z"),
|
||||
isValid: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.description, func(t *testing.T) {
|
||||
apiResponse := &ske.ClusterResponse{
|
||||
Maintenance: &ske.Maintenance{
|
||||
TimeWindow: &ske.TimeWindow{
|
||||
Start: utils.Ptr(tt.startAPI),
|
||||
End: utils.Ptr(tt.endAPI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
maintenanceValues := map[string]attr.Value{
|
||||
"enable_kubernetes_version_updates": types.BoolNull(),
|
||||
"enable_machine_image_version_updates": types.BoolNull(),
|
||||
"start": types.StringPointerValue(tt.startTF),
|
||||
"end": types.StringPointerValue(tt.endTF),
|
||||
}
|
||||
maintenanceObject, diags := types.ObjectValue(maintenanceTypes, maintenanceValues)
|
||||
if diags.HasError() {
|
||||
t.Fatalf("failed to create flavor: %v", core.DiagsToError(diags))
|
||||
}
|
||||
tfState := &Cluster{
|
||||
Maintenance: maintenanceObject,
|
||||
}
|
||||
|
||||
start, end, err := getMaintenanceTimes(context.Background(), apiResponse, tfState)
|
||||
|
||||
if err != nil {
|
||||
if tt.isValid {
|
||||
t.Errorf("getMaintenanceTimes failed on valid input: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !tt.isValid {
|
||||
t.Fatalf("getMaintenanceTimes didn't fail on invalid input")
|
||||
}
|
||||
if tt.startExpected != start {
|
||||
t.Errorf("extected start '%s', got '%s'", tt.startExpected, start)
|
||||
}
|
||||
if tt.endExpected != end {
|
||||
t.Errorf("extected end '%s', got '%s'", tt.endExpected, end)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAllowPrivilegedContainers(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
kubernetesVersion *string
|
||||
allowPrivilegeContainers *bool
|
||||
isValid bool
|
||||
}{
|
||||
{
|
||||
description: "null_version_1",
|
||||
kubernetesVersion: nil,
|
||||
allowPrivilegeContainers: nil,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "null_version_2",
|
||||
kubernetesVersion: nil,
|
||||
allowPrivilegeContainers: utils.Ptr(false),
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "flag_required_1",
|
||||
kubernetesVersion: utils.Ptr("0.999.999"),
|
||||
allowPrivilegeContainers: nil,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "flag_required_2",
|
||||
kubernetesVersion: utils.Ptr("0.999.999"),
|
||||
allowPrivilegeContainers: utils.Ptr(false),
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
description: "flag_required_3",
|
||||
kubernetesVersion: utils.Ptr("1.24.999"),
|
||||
allowPrivilegeContainers: nil,
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "flag_required_4",
|
||||
kubernetesVersion: utils.Ptr("1.24.999"),
|
||||
allowPrivilegeContainers: utils.Ptr(false),
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
description: "flag_deprecated_1",
|
||||
kubernetesVersion: utils.Ptr("1.25"),
|
||||
allowPrivilegeContainers: nil,
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
description: "flag_deprecated_2",
|
||||
kubernetesVersion: utils.Ptr("1.25"),
|
||||
allowPrivilegeContainers: utils.Ptr(false),
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "flag_deprecated_3",
|
||||
kubernetesVersion: utils.Ptr("2.0.0"),
|
||||
allowPrivilegeContainers: nil,
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
description: "flag_deprecated_4",
|
||||
kubernetesVersion: utils.Ptr("2.0.0"),
|
||||
allowPrivilegeContainers: utils.Ptr(false),
|
||||
isValid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.description, func(t *testing.T) {
|
||||
diags := checkAllowPrivilegedContainers(
|
||||
types.BoolPointerValue(tt.allowPrivilegeContainers),
|
||||
types.StringPointerValue(tt.kubernetesVersion),
|
||||
)
|
||||
|
||||
if tt.isValid && diags.HasError() {
|
||||
t.Errorf("checkAllowPrivilegedContainers failed on valid input: %v", core.DiagsToError(diags))
|
||||
}
|
||||
if !tt.isValid && !diags.HasError() {
|
||||
t.Errorf("checkAllowPrivilegedContainers didn't fail on valid input")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
115
stackit/services/ske/project/datasource.go
Normal file
115
stackit/services/ske/project/datasource.go
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
package ske
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/config"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/ske"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/core"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/validate"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ datasource.DataSource = &projectDataSource{}
|
||||
)
|
||||
|
||||
// NewProjectDataSource is a helper function to simplify the provider implementation.
|
||||
func NewProjectDataSource() datasource.DataSource {
|
||||
return &projectDataSource{}
|
||||
}
|
||||
|
||||
// projectDataSource is the data source implementation.
|
||||
type projectDataSource struct {
|
||||
client *ske.APIClient
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *projectDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_ske_project"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *projectDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
|
||||
// Prevent panic if the provider has not been configured.
|
||||
if req.ProviderData == nil {
|
||||
return
|
||||
}
|
||||
|
||||
providerData, ok := req.ProviderData.(core.ProviderData)
|
||||
if !ok {
|
||||
resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData))
|
||||
return
|
||||
}
|
||||
|
||||
var apiClient *ske.APIClient
|
||||
var err error
|
||||
if providerData.SKECustomEndpoint != "" {
|
||||
apiClient, err = ske.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithEndpoint(providerData.SKECustomEndpoint),
|
||||
)
|
||||
} else {
|
||||
apiClient, err = ske.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithRegion(providerData.Region),
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Could not Configure API Client", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "SKE client configured")
|
||||
r.client = apiClient
|
||||
}
|
||||
|
||||
// Schema defines the schema for the resource.
|
||||
func (r *projectDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"id": schema.StringAttribute{
|
||||
Description: "Terraform's internal resource ID.",
|
||||
Computed: true,
|
||||
},
|
||||
"project_id": schema.StringAttribute{
|
||||
Description: "STACKIT Project ID in which the kubernetes project is enabled.",
|
||||
Required: true,
|
||||
Validators: []validator.String{
|
||||
validate.UUID(),
|
||||
validate.NoSeparator(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *projectDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var state Model
|
||||
diags := req.Config.Get(ctx, &state)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
projectId := state.ProjectId.ValueString()
|
||||
ctx = tflog.SetField(ctx, "project_id", projectId)
|
||||
_, err := r.client.GetProject(ctx, projectId).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read project", err.Error())
|
||||
return
|
||||
}
|
||||
state.Id = types.StringValue(projectId)
|
||||
state.ProjectId = types.StringValue(projectId)
|
||||
diags = resp.State.Set(ctx, state)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
tflog.Info(ctx, "SKE project read")
|
||||
}
|
||||
210
stackit/services/ske/project/resource.go
Normal file
210
stackit/services/ske/project/resource.go
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
package ske
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-framework/path"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
|
||||
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
|
||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||
"github.com/hashicorp/terraform-plugin-log/tflog"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/config"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/ske"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/core"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/validate"
|
||||
)
|
||||
|
||||
// Ensure the implementation satisfies the expected interfaces.
|
||||
var (
|
||||
_ resource.Resource = &projectResource{}
|
||||
_ resource.ResourceWithConfigure = &projectResource{}
|
||||
_ resource.ResourceWithImportState = &projectResource{}
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
Id types.String `tfsdk:"id"`
|
||||
ProjectId types.String `tfsdk:"project_id"`
|
||||
}
|
||||
|
||||
// NewProjectResource is a helper function to simplify the provider implementation.
|
||||
func NewProjectResource() resource.Resource {
|
||||
return &projectResource{}
|
||||
}
|
||||
|
||||
// projectResource is the resource implementation.
|
||||
type projectResource struct {
|
||||
client *ske.APIClient
|
||||
}
|
||||
|
||||
// Metadata returns the resource type name.
|
||||
func (r *projectResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
|
||||
resp.TypeName = req.ProviderTypeName + "_ske_project"
|
||||
}
|
||||
|
||||
// Configure adds the provider configured client to the resource.
|
||||
func (r *projectResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
|
||||
// Prevent panic if the provider has not been configured.
|
||||
if req.ProviderData == nil {
|
||||
return
|
||||
}
|
||||
|
||||
providerData, ok := req.ProviderData.(core.ProviderData)
|
||||
if !ok {
|
||||
resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData))
|
||||
return
|
||||
}
|
||||
|
||||
var apiClient *ske.APIClient
|
||||
var err error
|
||||
if providerData.SKECustomEndpoint != "" {
|
||||
apiClient, err = ske.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithEndpoint(providerData.SKECustomEndpoint),
|
||||
)
|
||||
} else {
|
||||
apiClient, err = ske.NewAPIClient(
|
||||
config.WithCustomAuth(providerData.RoundTripper),
|
||||
config.WithRegion(providerData.Region),
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Could not Configure API Client", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tflog.Info(ctx, "SKE project client configured")
|
||||
r.client = apiClient
|
||||
}
|
||||
|
||||
// Schema returns the Terraform schema structure
|
||||
func (r *projectResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
|
||||
resp.Schema = schema.Schema{
|
||||
Attributes: map[string]schema.Attribute{
|
||||
"id": schema.StringAttribute{
|
||||
Description: "Terraform's internal resource ID.",
|
||||
Computed: true,
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
},
|
||||
"project_id": schema.StringAttribute{
|
||||
Description: "STACKIT Project ID in which the kubernetes project is enabled.",
|
||||
Required: true,
|
||||
Validators: []validator.String{
|
||||
validate.UUID(),
|
||||
validate.NoSeparator(),
|
||||
},
|
||||
PlanModifiers: []planmodifier.String{
|
||||
stringplanmodifier.RequiresReplace(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates the resource and sets the initial Terraform state.
|
||||
func (r *projectResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
resp.Diagnostics.Append(req.Plan.Get(ctx, &model)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
projectId := model.ProjectId.ValueString()
|
||||
_, err := r.client.CreateProject(ctx, projectId).Execute()
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("failed during SKE project creation", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
model.Id = types.StringValue(projectId)
|
||||
wr, err := ske.CreateProjectWaitHandler(ctx, r.client, projectId).SetTimeout(5 * time.Minute).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("Error creating cluster", fmt.Sprintf("Project creation waiting: %v", err))
|
||||
return
|
||||
}
|
||||
got, ok := wr.(*ske.ProjectResponse)
|
||||
if !ok {
|
||||
resp.Diagnostics.AddError("Error creating cluster", fmt.Sprintf("Wait result conversion, got %+v", got))
|
||||
return
|
||||
}
|
||||
diags := resp.State.Set(ctx, model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "SKE project created or updated")
|
||||
}
|
||||
|
||||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *projectResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
diags := req.State.Get(ctx, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
projectId := model.ProjectId.ValueString()
|
||||
// read
|
||||
_, err := r.client.GetProject(ctx, projectId).Execute()
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("failed during SKE project read", err.Error())
|
||||
return
|
||||
}
|
||||
model.Id = types.StringValue(projectId)
|
||||
model.ProjectId = types.StringValue(projectId)
|
||||
diags = resp.State.Set(ctx, model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
tflog.Info(ctx, "SKE project read")
|
||||
}
|
||||
|
||||
// Update updates the resource and sets the updated Terraform state on success.
|
||||
func (r *projectResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
// Update shouldn't be called
|
||||
resp.Diagnostics.AddError("Error updating ", "project can't be updated")
|
||||
}
|
||||
|
||||
// Delete deletes the resource and removes the Terraform state on success.
|
||||
func (r *projectResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
resp.Diagnostics.Append(req.State.Get(ctx, &model)...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
projectId := model.ProjectId.ValueString()
|
||||
ctx = tflog.SetField(ctx, "project_id", projectId)
|
||||
|
||||
c := r.client
|
||||
_, err := c.DeleteProject(ctx, projectId).Execute()
|
||||
if err != nil {
|
||||
resp.Diagnostics.AddError("failed deleting project", err.Error())
|
||||
return
|
||||
}
|
||||
_, err = ske.DeleteProjectWaitHandler(ctx, r.client, projectId).SetTimeout(10 * time.Minute).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintf("Project deletion waiting: %v", err))
|
||||
return
|
||||
}
|
||||
tflog.Info(ctx, "SKE project deleted")
|
||||
}
|
||||
|
||||
// ImportState imports a resource into the Terraform state on success.
|
||||
// The expected format of the resource import identifier is: project_id
|
||||
func (r *projectResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
idParts := strings.Split(req.ID, core.Separator)
|
||||
if len(idParts) != 1 || idParts[0] == "" {
|
||||
resp.Diagnostics.AddError(
|
||||
"Unexpected Import Identifier",
|
||||
fmt.Sprintf("Expected import identifier with format: [project_id] Got: %q", req.ID),
|
||||
)
|
||||
return
|
||||
}
|
||||
resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...)
|
||||
tflog.Info(ctx, "SKE project state imported")
|
||||
}
|
||||
541
stackit/services/ske/ske_acc_test.go
Normal file
541
stackit/services/ske/ske_acc_test.go
Normal file
|
|
@ -0,0 +1,541 @@
|
|||
package ske_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-testing/helper/acctest"
|
||||
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-testing/terraform"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/config"
|
||||
"github.com/stackitcloud/stackit-sdk-go/core/utils"
|
||||
"github.com/stackitcloud/stackit-sdk-go/services/ske"
|
||||
"github.com/stackitcloud/terraform-provider-stackit/stackit/testutil"
|
||||
)
|
||||
|
||||
var projectResource = map[string]string{
|
||||
"project_id": testutil.ProjectId,
|
||||
}
|
||||
|
||||
var clusterResource = map[string]string{
|
||||
"project_id": testutil.ProjectId,
|
||||
"name": fmt.Sprintf("cl-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)),
|
||||
"name_min": fmt.Sprintf("cl-min-%s", acctest.RandStringFromCharSet(3, acctest.CharSetAlphaNum)),
|
||||
"kubernetes_version": "1.24",
|
||||
"kubernetes_version_used": "1.24.16",
|
||||
"kubernetes_version_new": "1.25",
|
||||
"kubernetes_version_used_new": "1.25.12",
|
||||
"allowPrivilegedContainers": "true",
|
||||
"nodepool_name": "np-acc-test",
|
||||
"nodepool_name_min": "np-acc-min-test",
|
||||
"nodepool_machine_type": "b1.2",
|
||||
"nodepool_os_version": "3510.2.5",
|
||||
"nodepool_os_version_min": "3510.2.5",
|
||||
"nodepool_os_name": "flatcar",
|
||||
"nodepool_minimum": "2",
|
||||
"nodepool_maximum": "3",
|
||||
"nodepool_max_surge": "1",
|
||||
"nodepool_max_unavailable": "1",
|
||||
"nodepool_volume_size": "20",
|
||||
"nodepool_volume_type": "storage_premium_perf0",
|
||||
"nodepool_zone": "eu01-3",
|
||||
"nodepool_cri": "containerd",
|
||||
"nodepool_label_key": "key",
|
||||
"nodepool_label_value": "value",
|
||||
"nodepool_taints_effect": "PreferNoSchedule",
|
||||
"nodepool_taints_key": "tkey",
|
||||
"nodepool_taints_value": "tvalue",
|
||||
"extensions_acl_enabled": "true",
|
||||
"extensions_acl_cidrs": "192.168.0.0/24",
|
||||
"extensions_argus_enabled": "false",
|
||||
"extensions_argus_instance_id": "aaaaaaaa-1111-2222-3333-444444444444", // A not-existing Argus ID let the creation time-out.
|
||||
"hibernations_start": "0 16 * * *",
|
||||
"hibernations_end": "0 18 * * *",
|
||||
"hibernations_timezone": "Europe/Berlin",
|
||||
"maintenance_enable_kubernetes_version_updates": "true",
|
||||
"maintenance_enable_machine_image_version_updates": "true",
|
||||
"maintenance_start": "01:23:45Z",
|
||||
"maintenance_end": "05:00:00+02:00",
|
||||
}
|
||||
|
||||
func getConfig(version string, apc *bool, maintenanceEnd *string) string {
|
||||
apcConfig := ""
|
||||
if apc != nil {
|
||||
if *apc {
|
||||
apcConfig = "allow_privileged_containers = true"
|
||||
} else {
|
||||
apcConfig = "allow_privileged_containers = false"
|
||||
}
|
||||
}
|
||||
maintenanceEndTF := clusterResource["maintenance_end"]
|
||||
if maintenanceEnd != nil {
|
||||
maintenanceEndTF = *maintenanceEnd
|
||||
}
|
||||
return fmt.Sprintf(`
|
||||
%s
|
||||
|
||||
resource "stackit_ske_project" "project" {
|
||||
project_id = "%s"
|
||||
}
|
||||
|
||||
resource "stackit_ske_cluster" "cluster" {
|
||||
project_id = stackit_ske_project.project.project_id
|
||||
name = "%s"
|
||||
kubernetes_version = "%s"
|
||||
%s
|
||||
node_pools = [{
|
||||
name = "%s"
|
||||
machine_type = "%s"
|
||||
minimum = "%s"
|
||||
maximum = "%s"
|
||||
max_surge = "%s"
|
||||
max_unavailable = "%s"
|
||||
os_name = "%s"
|
||||
os_version = "%s"
|
||||
volume_size = "%s"
|
||||
volume_type = "%s"
|
||||
cri = "%s"
|
||||
availability_zones = ["%s"]
|
||||
labels = {
|
||||
%s = "%s"
|
||||
}
|
||||
taints = [{
|
||||
effect = "%s"
|
||||
key = "%s"
|
||||
value = "%s"
|
||||
}]
|
||||
}]
|
||||
extensions = {
|
||||
acl = {
|
||||
enabled = %s
|
||||
allowed_cidrs = ["%s"]
|
||||
}
|
||||
argus = {
|
||||
enabled = %s
|
||||
argus_instance_id = "%s"
|
||||
}
|
||||
}
|
||||
hibernations = [{
|
||||
start = "%s"
|
||||
end = "%s"
|
||||
timezone = "%s"
|
||||
}]
|
||||
maintenance = {
|
||||
enable_kubernetes_version_updates = %s
|
||||
enable_machine_image_version_updates = %s
|
||||
start = "%s"
|
||||
end = "%s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "stackit_ske_cluster" "cluster_min" {
|
||||
project_id = stackit_ske_project.project.project_id
|
||||
name = "%s"
|
||||
kubernetes_version = "%s"
|
||||
node_pools = [{
|
||||
name = "%s"
|
||||
machine_type = "%s"
|
||||
os_version = "%s"
|
||||
minimum = "%s"
|
||||
maximum = "%s"
|
||||
availability_zones = ["%s"]
|
||||
}]
|
||||
}
|
||||
`,
|
||||
testutil.SKEProviderConfig(),
|
||||
projectResource["project_id"],
|
||||
clusterResource["name"],
|
||||
version,
|
||||
apcConfig,
|
||||
clusterResource["nodepool_name"],
|
||||
clusterResource["nodepool_machine_type"],
|
||||
clusterResource["nodepool_minimum"],
|
||||
clusterResource["nodepool_maximum"],
|
||||
clusterResource["nodepool_max_surge"],
|
||||
clusterResource["nodepool_max_unavailable"],
|
||||
clusterResource["nodepool_os_name"],
|
||||
clusterResource["nodepool_os_version"],
|
||||
clusterResource["nodepool_volume_size"],
|
||||
clusterResource["nodepool_volume_type"],
|
||||
clusterResource["nodepool_cri"],
|
||||
clusterResource["nodepool_zone"],
|
||||
clusterResource["nodepool_label_key"],
|
||||
clusterResource["nodepool_label_value"],
|
||||
clusterResource["nodepool_taints_effect"],
|
||||
clusterResource["nodepool_taints_key"],
|
||||
clusterResource["nodepool_taints_value"],
|
||||
clusterResource["extensions_acl_enabled"],
|
||||
clusterResource["extensions_acl_cidrs"],
|
||||
clusterResource["extensions_argus_enabled"],
|
||||
clusterResource["extensions_argus_instance_id"],
|
||||
clusterResource["hibernations_start"],
|
||||
clusterResource["hibernations_end"],
|
||||
clusterResource["hibernations_timezone"],
|
||||
clusterResource["maintenance_enable_kubernetes_version_updates"],
|
||||
clusterResource["maintenance_enable_machine_image_version_updates"],
|
||||
clusterResource["maintenance_start"],
|
||||
maintenanceEndTF,
|
||||
|
||||
// Minimal
|
||||
clusterResource["name_min"],
|
||||
clusterResource["kubernetes_version_new"],
|
||||
clusterResource["nodepool_name_min"],
|
||||
clusterResource["nodepool_machine_type"],
|
||||
clusterResource["nodepool_os_version_min"],
|
||||
clusterResource["nodepool_minimum"],
|
||||
clusterResource["nodepool_maximum"],
|
||||
clusterResource["nodepool_zone"],
|
||||
)
|
||||
}
|
||||
|
||||
func TestAccSKE(t *testing.T) {
|
||||
resource.ParallelTest(t, resource.TestCase{
|
||||
ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
|
||||
CheckDestroy: testAccCheckSKEDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
|
||||
// 1) Creation
|
||||
{
|
||||
Config: getConfig(clusterResource["kubernetes_version"], utils.Ptr(true), nil),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// project data
|
||||
resource.TestCheckResourceAttr("stackit_ske_project.project", "project_id", projectResource["project_id"]),
|
||||
// cluster data
|
||||
resource.TestCheckResourceAttrPair(
|
||||
"stackit_ske_project.project", "project_id",
|
||||
"stackit_ske_cluster.cluster", "project_id",
|
||||
),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "name", clusterResource["name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version", clusterResource["kubernetes_version"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version_used", clusterResource["kubernetes_version_used"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "allow_privileged_containers", clusterResource["allowPrivilegedContainers"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.name", clusterResource["nodepool_name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_name", clusterResource["nodepool_os_name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_version", clusterResource["nodepool_os_version"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.minimum", clusterResource["nodepool_minimum"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.maximum", clusterResource["nodepool_maximum"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_surge", clusterResource["nodepool_max_surge"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_unavailable", clusterResource["nodepool_max_unavailable"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_type", clusterResource["nodepool_volume_type"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", fmt.Sprintf("node_pools.0.labels.%s", clusterResource["nodepool_label_key"]), clusterResource["nodepool_label_value"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.effect", clusterResource["nodepool_taints_effect"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.key", clusterResource["nodepool_taints_key"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.value", clusterResource["nodepool_taints_value"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.cri", clusterResource["nodepool_cri"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.enabled", clusterResource["extensions_acl_enabled"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.0", clusterResource["extensions_acl_cidrs"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.enabled", clusterResource["extensions_argus_enabled"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.argus_instance_id", clusterResource["extensions_argus_instance_id"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.start", clusterResource["hibernations_start"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.timezone", clusterResource["hibernations_timezone"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_kubernetes_version_updates", clusterResource["maintenance_enable_kubernetes_version_updates"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end"]),
|
||||
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster", "kube_config"),
|
||||
|
||||
// Minimal cluster
|
||||
resource.TestCheckResourceAttrPair(
|
||||
"stackit_ske_project.project", "project_id",
|
||||
"stackit_ske_cluster.cluster_min", "project_id",
|
||||
),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "name", clusterResource["name_min"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "kubernetes_version", clusterResource["kubernetes_version_new"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "kubernetes_version_used", clusterResource["kubernetes_version_used_new"]),
|
||||
resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "allow_privileged_containers"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.name", clusterResource["nodepool_name_min"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.os_name"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.os_version", clusterResource["nodepool_os_version_min"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.minimum", clusterResource["nodepool_minimum"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.maximum", clusterResource["nodepool_maximum"]),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.max_surge"),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.max_unavailable"),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "node_pools.0.volume_type"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.labels.%", "0"),
|
||||
resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.taints"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster_min", "node_pools.0.cri", clusterResource["nodepool_cri"]),
|
||||
resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "extensions"),
|
||||
resource.TestCheckNoResourceAttr("stackit_ske_cluster.cluster_min", "hibernations"),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.enable_kubernetes_version_updates"),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.enable_machine_image_version_updates"),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.start"),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "maintenance.end"),
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster_min", "kube_config"),
|
||||
),
|
||||
},
|
||||
// 2) Data source
|
||||
{
|
||||
Config: fmt.Sprintf(`
|
||||
%s
|
||||
|
||||
data "stackit_ske_project" "project" {
|
||||
project_id = "%s"
|
||||
depends_on = [stackit_ske_project.project]
|
||||
}
|
||||
|
||||
data "stackit_ske_cluster" "cluster" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
depends_on = [stackit_ske_cluster.cluster]
|
||||
}
|
||||
|
||||
data "stackit_ske_cluster" "cluster_min" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
depends_on = [stackit_ske_cluster.cluster_min]
|
||||
}
|
||||
|
||||
`,
|
||||
getConfig(clusterResource["kubernetes_version"], utils.Ptr(true), nil),
|
||||
projectResource["project_id"],
|
||||
clusterResource["project_id"],
|
||||
clusterResource["name"],
|
||||
clusterResource["project_id"],
|
||||
clusterResource["name_min"],
|
||||
),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// project data
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_project.project", "id", projectResource["project_id"]),
|
||||
|
||||
// cluster data
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "id", fmt.Sprintf("%s,%s",
|
||||
clusterResource["project_id"],
|
||||
clusterResource["name"],
|
||||
)),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "name", clusterResource["name"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "kubernetes_version", clusterResource["kubernetes_version"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "kubernetes_version_used", clusterResource["kubernetes_version_used"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "allow_privileged_containers", clusterResource["allowPrivilegedContainers"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.name", clusterResource["nodepool_name"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.availability_zones.#", "1"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.os_name", clusterResource["nodepool_os_name"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.os_version", clusterResource["nodepool_os_version"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.minimum", clusterResource["nodepool_minimum"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.maximum", clusterResource["nodepool_maximum"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.max_surge", clusterResource["nodepool_max_surge"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.max_unavailable", clusterResource["nodepool_max_unavailable"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.volume_type", clusterResource["nodepool_volume_type"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", fmt.Sprintf("node_pools.0.labels.%s", clusterResource["nodepool_label_key"]), clusterResource["nodepool_label_value"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.#", "1"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.0.effect", clusterResource["nodepool_taints_effect"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.0.key", clusterResource["nodepool_taints_key"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.taints.0.value", clusterResource["nodepool_taints_value"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.cri", clusterResource["nodepool_cri"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "extensions.acl.enabled", clusterResource["extensions_acl_enabled"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.#", "1"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.0", clusterResource["extensions_acl_cidrs"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.#", "1"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.start", clusterResource["hibernations_start"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.timezone", clusterResource["hibernations_timezone"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.enable_kubernetes_version_updates", clusterResource["maintenance_enable_kubernetes_version_updates"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end"]),
|
||||
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster", "kube_config"),
|
||||
|
||||
// Minimal cluster
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "name", clusterResource["name_min"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "kubernetes_version", clusterResource["kubernetes_version_new"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "kubernetes_version_used", clusterResource["kubernetes_version_used_new"]),
|
||||
resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "allow_privileged_containers"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.name", clusterResource["nodepool_name_min"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.#", "1"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.os_name"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.os_version", clusterResource["nodepool_os_version_min"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.minimum", clusterResource["nodepool_minimum"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.maximum", clusterResource["nodepool_maximum"]),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.max_surge"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.max_unavailable"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "node_pools.0.volume_type"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.labels.%", "0"),
|
||||
resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.taints"),
|
||||
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster_min", "node_pools.0.cri", clusterResource["nodepool_cri"]),
|
||||
resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "extensions"),
|
||||
resource.TestCheckNoResourceAttr("data.stackit_ske_cluster.cluster_min", "hibernations"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.enable_kubernetes_version_updates"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.enable_machine_image_version_updates"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.start"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "maintenance.end"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_ske_cluster.cluster_min", "kube_config"),
|
||||
),
|
||||
},
|
||||
// 3) Import project
|
||||
{
|
||||
ResourceName: "stackit_ske_project.project",
|
||||
ImportStateIdFunc: func(s *terraform.State) (string, error) {
|
||||
_, ok := s.RootModule().Resources["stackit_ske_project.project"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find resource stackit_ske_project.project")
|
||||
}
|
||||
return testutil.ProjectId, nil
|
||||
},
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
// 4) Import cluster
|
||||
{
|
||||
ResourceName: "stackit_ske_cluster.cluster",
|
||||
ImportStateIdFunc: func(s *terraform.State) (string, error) {
|
||||
r, ok := s.RootModule().Resources["stackit_ske_cluster.cluster"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find resource stackit_ske_cluster.cluster")
|
||||
}
|
||||
_, ok = r.Primary.Attributes["project_id"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find attribute project_id")
|
||||
}
|
||||
name, ok := r.Primary.Attributes["name"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find attribute name")
|
||||
}
|
||||
return fmt.Sprintf("%s,%s", testutil.ProjectId, name), nil
|
||||
},
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
// The fields are not provided in the SKE API when disabled, although set actively.
|
||||
ImportStateVerifyIgnore: []string{"kube_config", "extensions.argus.%", "extensions.argus.argus_instance_id", "extensions.argus.enabled", "extensions.acl.enabled", "extensions.acl.allowed_cidrs", "extensions.acl.allowed_cidrs.#", "extensions.acl.%"},
|
||||
},
|
||||
// ) Import minimal cluster
|
||||
{
|
||||
ResourceName: "stackit_ske_cluster.cluster_min",
|
||||
ImportStateIdFunc: func(s *terraform.State) (string, error) {
|
||||
r, ok := s.RootModule().Resources["stackit_ske_cluster.cluster_min"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find resource stackit_ske_cluster.cluster_min")
|
||||
}
|
||||
_, ok = r.Primary.Attributes["project_id"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find attribute project_id")
|
||||
}
|
||||
name, ok := r.Primary.Attributes["name"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't find attribute name")
|
||||
}
|
||||
return fmt.Sprintf("%s,%s", testutil.ProjectId, name), nil
|
||||
},
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ImportStateVerifyIgnore: []string{"kube_config"},
|
||||
},
|
||||
// 6) Update kubernetes version and maximum
|
||||
{
|
||||
Config: getConfig("1.25.12", nil, utils.Ptr("03:03:03+00:00")),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// cluster data
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "name", clusterResource["name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version", "1.25"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version_used", "1.25.12"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.name", clusterResource["nodepool_name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_name", clusterResource["nodepool_os_name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.os_version", clusterResource["nodepool_os_version"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.machine_type", clusterResource["nodepool_machine_type"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.minimum", clusterResource["nodepool_minimum"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.maximum", clusterResource["nodepool_maximum"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_surge", clusterResource["nodepool_max_surge"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.max_unavailable", clusterResource["nodepool_max_unavailable"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_type", clusterResource["nodepool_volume_type"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.volume_size", clusterResource["nodepool_volume_size"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", fmt.Sprintf("node_pools.0.labels.%s", clusterResource["nodepool_label_key"]), clusterResource["nodepool_label_value"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.effect", clusterResource["nodepool_taints_effect"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.key", clusterResource["nodepool_taints_key"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.taints.0.value", clusterResource["nodepool_taints_value"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.cri", clusterResource["nodepool_cri"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.enabled", clusterResource["extensions_acl_enabled"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.acl.allowed_cidrs.0", clusterResource["extensions_acl_cidrs"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.enabled", clusterResource["extensions_argus_enabled"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "extensions.argus.argus_instance_id", clusterResource["extensions_argus_instance_id"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.start", clusterResource["hibernations_start"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.end", clusterResource["hibernations_end"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "hibernations.0.timezone", clusterResource["hibernations_timezone"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_kubernetes_version_updates", clusterResource["maintenance_enable_kubernetes_version_updates"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", "03:03:03+00:00"),
|
||||
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster", "kube_config"),
|
||||
),
|
||||
},
|
||||
// Deletion is done by the framework implicitly
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckSKEDestroy(s *terraform.State) error {
|
||||
ctx := context.Background()
|
||||
var client *ske.APIClient
|
||||
var err error
|
||||
if testutil.SKECustomEndpoint == "" {
|
||||
client, err = ske.NewAPIClient()
|
||||
} else {
|
||||
client, err = ske.NewAPIClient(
|
||||
config.WithEndpoint(testutil.SKECustomEndpoint),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating client: %w", err)
|
||||
}
|
||||
|
||||
projectsToDestroy := []string{}
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "stackit_ske_project" {
|
||||
continue
|
||||
}
|
||||
projectsToDestroy = append(projectsToDestroy, rs.Primary.ID)
|
||||
}
|
||||
for _, projectId := range projectsToDestroy {
|
||||
_, err := client.GetProject(ctx, projectId).Execute()
|
||||
if err != nil {
|
||||
oapiErr, ok := err.(*ske.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
|
||||
if !ok {
|
||||
return fmt.Errorf("could not convert error to GenericOpenApiError in acc test destruction, %w", err)
|
||||
}
|
||||
if oapiErr.StatusCode() == http.StatusNotFound || oapiErr.StatusCode() == http.StatusForbidden {
|
||||
// Already gone
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("getting project: %w", err)
|
||||
}
|
||||
|
||||
_, err = client.DeleteProjectExecute(ctx, projectId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying project %s during CheckDestroy: %w", projectId, err)
|
||||
}
|
||||
_, err = ske.DeleteProjectWaitHandler(ctx, client, projectId).SetTimeout(15 * time.Minute).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying project %s during CheckDestroy: waiting for deletion %w", projectId, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue