fix(acceptance-tests): Postgresql use force delete (#771)

- use TF_ACC_REGION in acceptance test

Signed-off-by: Alexander Dahmen <alexander.dahmen@inovex.de>
This commit is contained in:
Alexander Dahmen 2025-04-09 09:34:43 +02:00 committed by GitHub
parent bce05fb420
commit 67778eb1d5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 21 additions and 29 deletions

View file

@ -17,7 +17,7 @@ jobs:
run: make project-tools
- name: Run tests
run: |
make test-acceptance-tf TF_ACC_PROJECT_ID=$${{ secrets.TF_ACC_PROJECT_ID }} TF_ACC_ORGANIZATION_ID=$${{ secrets.TF_ACC_ORGANIZATION_ID }}
make test-acceptance-tf TF_ACC_PROJECT_ID=$${{ secrets.TF_ACC_PROJECT_ID }} TF_ACC_ORGANIZATION_ID=$${{ secrets.TF_ACC_ORGANIZATION_ID }} TF_ACC_REGION="eu01"
env:
STACKIT_SERVICE_ACCOUNT_TOKEN: ${{ secrets.TF_ACC_SERVICE_ACCOUNT_TOKEN }}
TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_EMAIL: ${{ secrets.TF_ACC_TEST_PROJECT_SERVICE_ACCOUNT_EMAIL }}

View file

@ -106,14 +106,13 @@ func configResources(backupSchedule string, region *string) string {
}
func TestAccPostgresFlexFlexResource(t *testing.T) {
testRegion := utils.Ptr("eu01")
resource.Test(t, resource.TestCase{
ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
CheckDestroy: testAccCheckPostgresFlexDestroy,
Steps: []resource.TestStep{
// Creation
{
Config: configResources(instanceResource["backup_schedule"], testRegion),
Config: configResources(instanceResource["backup_schedule"], &testutil.Region),
Check: resource.ComposeAggregateTestCheckFunc(
// Instance
resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "project_id", instanceResource["project_id"]),
@ -130,7 +129,7 @@ func TestAccPostgresFlexFlexResource(t *testing.T) {
resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "storage.class", instanceResource["storage_class"]),
resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "storage.size", instanceResource["storage_size"]),
resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "version", instanceResource["version"]),
resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "region", *testRegion),
resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "region", testutil.Region),
// User
resource.TestCheckResourceAttrPair(
@ -169,7 +168,7 @@ func TestAccPostgresFlexFlexResource(t *testing.T) {
project_id = stackit_postgresflex_instance.instance.project_id
instance_id = stackit_postgresflex_instance.instance.instance_id
}
data "stackit_postgresflex_user" "user" {
project_id = stackit_postgresflex_instance.instance.project_id
instance_id = stackit_postgresflex_instance.instance.instance_id
@ -341,7 +340,7 @@ func testAccCheckPostgresFlexDestroy(s *terraform.State) error {
continue
}
// instance terraform ID: = "[project_id],[region],[instance_id]"
instanceId := strings.Split(rs.Primary.ID, core.Separator)[1]
instanceId := strings.Split(rs.Primary.ID, core.Separator)[2]
instancesToDestroy = append(instancesToDestroy, instanceId)
}
@ -356,7 +355,7 @@ func testAccCheckPostgresFlexDestroy(s *terraform.State) error {
continue
}
if utils.Contains(instancesToDestroy, *items[i].Id) {
err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, testutil.Region, *items[i].Id)
err := client.ForceDeleteInstanceExecute(ctx, testutil.ProjectId, testutil.Region, *items[i].Id)
if err != nil {
return fmt.Errorf("deleting instance %s during CheckDestroy: %w", *items[i].Id, err)
}
@ -364,10 +363,6 @@ func testAccCheckPostgresFlexDestroy(s *terraform.State) error {
if err != nil {
return fmt.Errorf("deleting instance %s during CheckDestroy: waiting for deletion %w", *items[i].Id, err)
}
err = client.ForceDeleteInstanceExecute(ctx, testutil.ProjectId, testutil.Region, *items[i].Id)
if err != nil {
return fmt.Errorf("force deleting instance %s during CheckDestroy: %w", *items[i].Id, err)
}
}
}
return nil

View file

@ -59,7 +59,6 @@ func TestAccServerUpdateScheduleResource(t *testing.T) {
fmt.Println("TF_ACC_SERVER_ID not set, skipping test")
return
}
testRegion := utils.Ptr("eu01")
var invalidMaintenanceWindow int64 = 0
var validMaintenanceWindow int64 = 15
var updatedMaintenanceWindow int64 = 8
@ -69,12 +68,12 @@ func TestAccServerUpdateScheduleResource(t *testing.T) {
Steps: []resource.TestStep{
// Creation fail
{
Config: resourceConfig(invalidMaintenanceWindow, testRegion),
Config: resourceConfig(invalidMaintenanceWindow, &testutil.Region),
ExpectError: regexp.MustCompile(`.*maintenance_window value must be at least 1*`),
},
// Creation
{
Config: resourceConfig(validMaintenanceWindow, testRegion),
Config: resourceConfig(validMaintenanceWindow, &testutil.Region),
Check: resource.ComposeAggregateTestCheckFunc(
// Update schedule data
resource.TestCheckResourceAttr("stackit_server_update_schedule.test_schedule", "project_id", serverUpdateScheduleResource["project_id"]),
@ -84,7 +83,7 @@ func TestAccServerUpdateScheduleResource(t *testing.T) {
resource.TestCheckResourceAttr("stackit_server_update_schedule.test_schedule", "name", serverUpdateScheduleResource["name"]),
resource.TestCheckResourceAttr("stackit_server_update_schedule.test_schedule", "rrule", serverUpdateScheduleResource["rrule"]),
resource.TestCheckResourceAttr("stackit_server_update_schedule.test_schedule", "enabled", strconv.FormatBool(true)),
resource.TestCheckResourceAttr("stackit_server_update_schedule.test_schedule", "region", *testRegion),
resource.TestCheckResourceAttr("stackit_server_update_schedule.test_schedule", "region", testutil.Region),
),
},
// data source
@ -102,7 +101,7 @@ func TestAccServerUpdateScheduleResource(t *testing.T) {
server_id = stackit_server_update_schedule.test_schedule.server_id
update_schedule_id = stackit_server_update_schedule.test_schedule.update_schedule_id
}`,
resourceConfig(validMaintenanceWindow, testRegion),
resourceConfig(validMaintenanceWindow, &testutil.Region),
),
Check: resource.ComposeAggregateTestCheckFunc(
// Server update schedule data

View file

@ -206,7 +206,6 @@ func getConfig(kubernetesVersion, nodePoolMachineOSVersion string, maintenanceEn
}
func TestAccSKE(t *testing.T) {
testRegion := utils.Ptr("eu01")
resource.Test(t, resource.TestCase{
ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
CheckDestroy: testAccCheckSKEDestroy,
@ -215,7 +214,7 @@ func TestAccSKE(t *testing.T) {
// 1) Creation
{
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil, testRegion),
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil, &testutil.Region),
Check: resource.ComposeAggregateTestCheckFunc(
// cluster data
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "name", clusterResource["name"]),
@ -257,7 +256,7 @@ func TestAccSKE(t *testing.T) {
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "region", *testRegion),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "region", testutil.Region),
// Kubeconfig
@ -285,7 +284,7 @@ func TestAccSKE(t *testing.T) {
}
`,
getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil, testRegion),
getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], nil, &testutil.Region),
clusterResource["project_id"],
clusterResource["name"],
),
@ -294,7 +293,7 @@ func TestAccSKE(t *testing.T) {
// cluster data
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "id", fmt.Sprintf("%s,%s,%s",
clusterResource["project_id"],
*testRegion,
testutil.Region,
clusterResource["name"],
)),
resource.TestCheckResourceAttr("data.stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
@ -357,7 +356,7 @@ func TestAccSKE(t *testing.T) {
},
// 4) Update kubernetes version, OS version and maintenance end
{
Config: getConfig(clusterResource["kubernetes_version_min_new"], clusterResource["nodepool_os_version_min_new"], utils.Ptr(clusterResource["maintenance_end_new"]), testRegion),
Config: getConfig(clusterResource["kubernetes_version_min_new"], clusterResource["nodepool_os_version_min_new"], utils.Ptr(clusterResource["maintenance_end_new"]), &testutil.Region),
Check: resource.ComposeAggregateTestCheckFunc(
// cluster data
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
@ -399,12 +398,12 @@ func TestAccSKE(t *testing.T) {
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.enable_machine_image_version_updates", clusterResource["maintenance_enable_machine_image_version_updates"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.start", clusterResource["maintenance_start"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "maintenance.end", clusterResource["maintenance_end_new"]),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "region", *testRegion),
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "region", testutil.Region),
),
},
// 5) Downgrade kubernetes and nodepool machine OS version
{
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], utils.Ptr(clusterResource["maintenance_end_new"]), testRegion),
Config: getConfig(clusterResource["kubernetes_version_min"], clusterResource["nodepool_os_version_min"], utils.Ptr(clusterResource["maintenance_end_new"]), &testutil.Region),
Check: resource.ComposeAggregateTestCheckFunc(
// cluster data
resource.TestCheckResourceAttr("stackit_ske_cluster.cluster", "project_id", clusterResource["project_id"]),
@ -443,7 +442,7 @@ func testAccCheckSKEDestroy(s *terraform.State) error {
continue
}
// cluster terraform ID: = "[project_id],[region],[cluster_name]"
clusterName := strings.Split(rs.Primary.ID, core.Separator)[1]
clusterName := strings.Split(rs.Primary.ID, core.Separator)[2]
clustersToDestroy = append(clustersToDestroy, clusterName)
}

View file

@ -97,14 +97,13 @@ func configResources(backupSchedule string, region *string) string {
}
func TestAccSQLServerFlexResource(t *testing.T) {
testRegion := utils.Ptr("eu01")
resource.Test(t, resource.TestCase{
ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories,
CheckDestroy: testAccChecksqlserverflexDestroy,
Steps: []resource.TestStep{
// Creation
{
Config: configResources(instanceResource["backup_schedule"], testRegion),
Config: configResources(instanceResource["backup_schedule"], &testutil.Region),
Check: resource.ComposeAggregateTestCheckFunc(
// Instance
resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "project_id", instanceResource["project_id"]),
@ -122,7 +121,7 @@ func TestAccSQLServerFlexResource(t *testing.T) {
resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "version", instanceResource["version"]),
resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "options.retention_days", instanceResource["options_retention_days"]),
resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "backup_schedule", instanceResource["backup_schedule"]),
resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "region", *testRegion),
resource.TestCheckResourceAttr("stackit_sqlserverflex_instance.instance", "region", testutil.Region),
// User
resource.TestCheckResourceAttrPair(
"stackit_sqlserverflex_user.user", "project_id",
@ -323,7 +322,7 @@ func testAccChecksqlserverflexDestroy(s *terraform.State) error {
continue
}
// instance terraform ID: = "[project_id],[region],[instance_id]"
instanceId := strings.Split(rs.Primary.ID, core.Separator)[1]
instanceId := strings.Split(rs.Primary.ID, core.Separator)[2]
instancesToDestroy = append(instancesToDestroy, instanceId)
}