Acceptance tests fixes (#120)
* Update Kubernetes version * Update nodepool OS * Revert Kubernetes version to 1.24 * Add maintenance field to cluster-min * Remove hardcoded kubernetes version * Remove hardcoded setting * Revert minor version of Kubernetes upgrade * Fix post test check destroy * Fix missing pointer * Fix labels using wrong delimiters * Fix typo in provider config * Lint fix --------- Co-authored-by: Henrique Santos <henrique.santos@freiheit.com>
This commit is contained in:
parent
b6100ec8d5
commit
71bf63cbc9
4 changed files with 42 additions and 22 deletions
|
|
@ -229,7 +229,7 @@ func (r *projectResource) Create(ctx context.Context, req resource.CreateRequest
|
|||
// Read refreshes the Terraform state with the latest data.
|
||||
func (r *projectResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
|
||||
var model Model
|
||||
diags := req.State.Get(ctx, model)
|
||||
diags := req.State.Get(ctx, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -23,7 +23,11 @@ var projectResource = map[string]string{
|
|||
"new_label": "a-label",
|
||||
}
|
||||
|
||||
func resourceConfig(name, label string) string {
|
||||
func resourceConfig(name string, label *string) string {
|
||||
labelConfig := ""
|
||||
if label != nil {
|
||||
labelConfig = fmt.Sprintf("new_label = %q", *label)
|
||||
}
|
||||
return fmt.Sprintf(`
|
||||
%s
|
||||
|
||||
|
|
@ -41,7 +45,7 @@ func resourceConfig(name, label string) string {
|
|||
projectResource["parent_container_id"],
|
||||
name,
|
||||
projectResource["billing_reference"],
|
||||
label,
|
||||
labelConfig,
|
||||
testutil.TestProjectServiceAccountEmail,
|
||||
)
|
||||
}
|
||||
|
|
@ -53,7 +57,7 @@ func TestAccResourceManagerResource(t *testing.T) {
|
|||
Steps: []resource.TestStep{
|
||||
// Creation
|
||||
{
|
||||
Config: resourceConfig(projectResource["name"], ""),
|
||||
Config: resourceConfig(projectResource["name"], nil),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// Project data
|
||||
resource.TestCheckResourceAttrSet("stackit_resourcemanager_project.project", "container_id"),
|
||||
|
|
@ -71,7 +75,7 @@ func TestAccResourceManagerResource(t *testing.T) {
|
|||
data "stackit_resourcemanager_project" "project" {
|
||||
container_id = stackit_resourcemanager_project.project.container_id
|
||||
}`,
|
||||
resourceConfig(projectResource["name"], ""),
|
||||
resourceConfig(projectResource["name"], nil),
|
||||
),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// Project data
|
||||
|
|
@ -108,7 +112,7 @@ func TestAccResourceManagerResource(t *testing.T) {
|
|||
},
|
||||
// Update
|
||||
{
|
||||
Config: resourceConfig(fmt.Sprintf("%s-new", projectResource["name"]), "new_label='a-label'"),
|
||||
Config: resourceConfig(fmt.Sprintf("%s-new", projectResource["name"]), utils.Ptr("a-label")),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// Project data
|
||||
resource.TestCheckResourceAttrSet("stackit_resourcemanager_project.project", "container_id"),
|
||||
|
|
@ -156,15 +160,20 @@ func testAccCheckResourceManagerDestroy(s *terraform.State) error {
|
|||
|
||||
items := *projectsResp.Items
|
||||
for i := range items {
|
||||
if utils.Contains(projectsToDestroy, *items[i].ContainerId) {
|
||||
err := client.DeleteProjectExecute(ctx, *items[i].ContainerId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying project %s during CheckDestroy: %w", *items[i].ContainerId, err)
|
||||
}
|
||||
_, err = wait.DeleteProjectWaitHandler(ctx, client, *items[i].ContainerId).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying project %s during CheckDestroy: waiting for deletion %w", *items[i].ContainerId, err)
|
||||
}
|
||||
if *items[i].LifecycleState == resourcemanager.LIFECYCLESTATE_DELETING {
|
||||
continue
|
||||
}
|
||||
if !utils.Contains(projectsToDestroy, *items[i].ContainerId) {
|
||||
continue
|
||||
}
|
||||
|
||||
err := client.DeleteProjectExecute(ctx, *items[i].ContainerId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying project %s during CheckDestroy: %w", *items[i].ContainerId, err)
|
||||
}
|
||||
_, err = wait.DeleteProjectWaitHandler(ctx, client, *items[i].ContainerId).WaitWithContext(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("destroying project %s during CheckDestroy: waiting for deletion %w", *items[i].ContainerId, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -33,8 +33,8 @@ var clusterResource = map[string]string{
|
|||
"nodepool_name": "np-acc-test",
|
||||
"nodepool_name_min": "np-acc-min-test",
|
||||
"nodepool_machine_type": "b1.2",
|
||||
"nodepool_os_version": "3510.2.5",
|
||||
"nodepool_os_version_min": "3510.2.5",
|
||||
"nodepool_os_version": "3602.2.1",
|
||||
"nodepool_os_version_min": "3602.2.1",
|
||||
"nodepool_os_name": "flatcar",
|
||||
"nodepool_minimum": "2",
|
||||
"nodepool_maximum": "3",
|
||||
|
|
@ -60,6 +60,7 @@ var clusterResource = map[string]string{
|
|||
"maintenance_enable_machine_image_version_updates": "true",
|
||||
"maintenance_start": "01:23:45Z",
|
||||
"maintenance_end": "05:00:00+02:00",
|
||||
"maintenance_end_new": "03:03:03+00:00",
|
||||
}
|
||||
|
||||
func getConfig(version string, apc *bool, maintenanceEnd *string) string {
|
||||
|
|
@ -145,6 +146,12 @@ func getConfig(version string, apc *bool, maintenanceEnd *string) string {
|
|||
max_surge = "%s"
|
||||
availability_zones = ["%s"]
|
||||
}]
|
||||
maintenance = {
|
||||
enable_kubernetes_version_updates = %s
|
||||
enable_machine_image_version_updates = %s
|
||||
start = "%s"
|
||||
end = "%s"
|
||||
}
|
||||
}
|
||||
`,
|
||||
testutil.SKEProviderConfig(),
|
||||
|
|
@ -191,6 +198,10 @@ func getConfig(version string, apc *bool, maintenanceEnd *string) string {
|
|||
clusterResource["nodepool_maximum"],
|
||||
clusterResource["nodepool_max_surge"],
|
||||
clusterResource["nodepool_zone"],
|
||||
clusterResource["maintenance_enable_kubernetes_version_updates"],
|
||||
clusterResource["maintenance_enable_machine_image_version_updates"],
|
||||
clusterResource["maintenance_start"],
|
||||
maintenanceEndTF,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -448,13 +459,13 @@ func TestAccSKE(t *testing.T) {
|
|||
},
|
||||
// 6) Update kubernetes version and maximum
|
||||
{
|
||||
Config: getConfig("1.25.12", nil, utils.Ptr("03:03:03+00:00")),
|
||||
Config: getConfig(clusterResource["kubernetes_version_new"], nil, utils.Ptr(clusterResource["maintenance_end_new"])),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
// cluster data
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "name", clusterResource["name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version", "1.25"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version_used", "1.25.12"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version", clusterResource["kubernetes_version_new"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "kubernetes_version_used", clusterResource["kubernetes_version_used_new"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.name", clusterResource["nodepool_name"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.#", "1"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "node_pools.0.availability_zones.0", clusterResource["nodepool_zone"]),
|
||||
|
|
@ -485,7 +496,7 @@ func TestAccSKE(t *testing.T) {
|
|||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_kubernetes_version_updates", clusterResource["maintenance_enable_kubernetes_version_updates"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", "03:03:03+00:00"),
|
||||
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end_new"]),
|
||||
|
||||
resource.TestCheckResourceAttrSet("stackit_ske_cluster.cluster", "kube_config"),
|
||||
),
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ func DnsProviderConfig() string {
|
|||
}
|
||||
|
||||
func LoadBalancerProviderConfig() string {
|
||||
if LogMeCustomEndpoint == "" {
|
||||
if LoadBalancerCustomEndpoint == "" {
|
||||
return `
|
||||
provider "stackit" {
|
||||
region = "eu01"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue