fix: fix new sdk builder adjustment
Some checks failed
CI Workflow / Check GoReleaser config (pull_request) Successful in 6s
CI Workflow / CI run build and linting (pull_request) Failing after 53s
CI Workflow / Code coverage report (pull_request) Has been skipped
CI Workflow / CI run tests (pull_request) Failing after 15m56s
CI Workflow / Test readiness for publishing provider (pull_request) Successful in 23m35s

This commit is contained in:
Marcel S. Henselin 2026-03-02 08:50:16 +01:00
parent cc08fca97a
commit 469ed9e056
6 changed files with 223 additions and 93 deletions

View file

@ -41,6 +41,8 @@ type Builder struct {
SkipClone bool SkipClone bool
SkipCleanup bool SkipCleanup bool
PackagesOnly bool PackagesOnly bool
Verbose bool
Debug bool
} }
func (b *Builder) Build() error { func (b *Builder) Build() error {
@ -56,13 +58,17 @@ func (b *Builder) Build() error {
if root == nil || *root == "" { if root == nil || *root == "" {
return fmt.Errorf("unable to determine root directory from git") return fmt.Errorf("unable to determine root directory from git")
} }
slog.Info(" ... using root directory", "dir", *root) if b.Verbose {
slog.Info(" ... using root directory", "dir", *root)
}
if !b.PackagesOnly { if !b.PackagesOnly {
slog.Info(" ... Checking needed commands available") if b.Verbose {
err := checkCommands([]string{}) slog.Info(" ... Checking needed commands available")
if err != nil { }
return err chkErr := checkCommands([]string{})
if chkErr != nil {
return chkErr
} }
} }
@ -92,14 +98,13 @@ func (b *Builder) Build() error {
} }
slog.Info("Creating oas repo dir", "dir", fmt.Sprintf("%s/%s", *root, OAS_REPO_NAME)) slog.Info("Creating oas repo dir", "dir", fmt.Sprintf("%s/%s", *root, OAS_REPO_NAME))
repoDir, err := createRepoDir(genDir, OAS_REPO, OAS_REPO_NAME, b.SkipClone) repoDir, err := b.createRepoDir(genDir, OAS_REPO, OAS_REPO_NAME, b.SkipClone)
if err != nil { if err != nil {
return fmt.Errorf("%s", err.Error()) return fmt.Errorf("%s", err.Error())
} }
slog.Info("Retrieving versions from subdirs")
// TODO - major // TODO - major
verMap, err := getVersions(repoDir) verMap, err := b.getVersions(repoDir)
if err != nil { if err != nil {
return fmt.Errorf("%s", err.Error()) return fmt.Errorf("%s", err.Error())
} }
@ -111,7 +116,7 @@ func (b *Builder) Build() error {
} }
slog.Info("Creating OAS dir") slog.Info("Creating OAS dir")
err = os.MkdirAll(path.Join(genDir, "oas"), 0o755) //nolint:gosec // this dir is not sensitive, so we can use 0755 err = os.MkdirAll(path.Join(genDir, "oas", "legacy"), 0o755) //nolint:gosec // this dir is not sensitive, so we can use 0755
if err != nil { if err != nil {
return err return err
} }
@ -131,7 +136,7 @@ func (b *Builder) Build() error {
itemVersion, itemVersion,
fmt.Sprintf("%s.json", baseService), fmt.Sprintf("%s.json", baseService),
) )
dstFile := path.Join(genDir, "oas", fmt.Sprintf("%s.json", service)) dstFile := path.Join(genDir, "oas", "legacy", fmt.Sprintf("%s.json", service))
_, err = copyFile(srcFile, dstFile) _, err = copyFile(srcFile, dstFile)
if err != nil { if err != nil {
return fmt.Errorf("%s", err.Error()) return fmt.Errorf("%s", err.Error())
@ -534,6 +539,7 @@ func generateServiceFiles(rootDir, generatorDir string) error {
oasFile := path.Join( oasFile := path.Join(
generatorDir, generatorDir,
"oas", "oas",
"legacy",
fmt.Sprintf("%s%s.json", service.Name(), svcVersion.Name()), fmt.Sprintf("%s%s.json", service.Name(), svcVersion.Name()),
) )
if _, oasErr := os.Stat(oasFile); os.IsNotExist(oasErr) { if _, oasErr := os.Stat(oasFile); os.IsNotExist(oasErr) {
@ -919,22 +925,32 @@ func getOnlyLatest(m map[string]version) (map[string]version, error) {
return tmpMap, nil return tmpMap, nil
} }
func getVersions(dir string) (map[string]version, error) { func (b *Builder) getVersions(dir string) (map[string]version, error) {
slog.Info("Retrieving versions from subdirs", "func", "getVersions")
res := make(map[string]version) res := make(map[string]version)
children, err := os.ReadDir(path.Join(dir, "services")) children, err := os.ReadDir(path.Join(dir, "services"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(children) < 1 {
slog.Error("found no children", "dir", path.Join(dir, "services"))
}
for _, entry := range children { for _, entry := range children {
if !entry.IsDir() { if !entry.IsDir() {
slog.Info("entry is no dir", "entry", entry.Name())
continue continue
} }
if b.Verbose {
slog.Info("getting versions", "svc", entry.Name())
}
versions, err := os.ReadDir(path.Join(dir, "services", entry.Name())) versions, err := os.ReadDir(path.Join(dir, "services", entry.Name()))
if err != nil { if err != nil {
return nil, err return nil, err
} }
m, err2 := extractVersions(entry.Name(), versions)
m, err2 := b.extractVersions(entry.Name(), versions)
if err2 != nil { if err2 != nil {
return m, err2 return m, err2
} }
@ -945,8 +961,12 @@ func getVersions(dir string) (map[string]version, error) {
return res, nil return res, nil
} }
func extractVersions(service string, versionDirs []os.DirEntry) (map[string]version, error) { func (b *Builder) extractVersions(service string, versionDirs []os.DirEntry) (map[string]version, error) {
res := make(map[string]version) res := make(map[string]version)
if len(versionDirs) < 1 {
slog.Error("list of version directories is empty")
return nil, nil
}
for _, vDir := range versionDirs { for _, vDir := range versionDirs {
if !vDir.IsDir() { if !vDir.IsDir() {
continue continue
@ -954,6 +974,9 @@ func extractVersions(service string, versionDirs []os.DirEntry) (map[string]vers
r := regexp.MustCompile(`v(\d+)([a-z]+)(\d*)`) r := regexp.MustCompile(`v(\d+)([a-z]+)(\d*)`)
matches := r.FindAllStringSubmatch(vDir.Name(), -1) matches := r.FindAllStringSubmatch(vDir.Name(), -1)
if matches == nil { if matches == nil {
if b.Debug {
slog.Warn("item did not fulfill regex", "item", vDir.Name())
}
continue continue
} }
svc, ver, err := handleVersion(service, matches[0]) svc, ver, err := handleVersion(service, matches[0])
@ -992,20 +1015,23 @@ func handleVersion(service string, match []string) (*string, *version, error) {
return &resStr, &version{verString: verString, major: majVer, minor: minVer}, nil return &resStr, &version{verString: verString, major: majVer, minor: minVer}, nil
} }
func createRepoDir(root, repoUrl, repoName string, skipClone bool) (string, error) { func (b *Builder) createRepoDir(root, repoUrl, repoName string, skipClone bool) (string, error) {
targetDir := path.Join(root, repoName) targetDir := path.Join(root, repoName)
if !skipClone { if !skipClone {
if fileExists(targetDir) { if fileExists(targetDir) {
slog.Warn("target dir exists - skipping", "targetDir", targetDir) slog.Warn("target dir exists - skipping", "targetDir", targetDir)
return targetDir, nil return targetDir, nil
} }
_, err := git.Clone( out, err := git.Clone(
clone.Repository(repoUrl), clone.Repository(repoUrl),
clone.Directory(targetDir), clone.Directory(targetDir),
) )
if err != nil { if err != nil {
return "", err return "", err
} }
if b.Verbose {
slog.Info("git clone result", "output", out)
}
} }
return targetDir, nil return targetDir, nil
} }

View file

@ -10,6 +10,8 @@ var (
skipCleanup bool skipCleanup bool
skipClone bool skipClone bool
packagesOnly bool packagesOnly bool
verbose bool
debug bool
) )
var buildCmd = &cobra.Command{ var buildCmd = &cobra.Command{
@ -21,6 +23,8 @@ var buildCmd = &cobra.Command{
SkipClone: skipClone, SkipClone: skipClone,
SkipCleanup: skipCleanup, SkipCleanup: skipCleanup,
PackagesOnly: packagesOnly, PackagesOnly: packagesOnly,
Verbose: verbose,
Debug: debug,
} }
return b.Build() return b.Build()
}, },
@ -32,6 +36,8 @@ func NewBuildCmd() *cobra.Command {
func init() { //nolint:gochecknoinits // This is the standard way to set up Cobra commands func init() { //nolint:gochecknoinits // This is the standard way to set up Cobra commands
buildCmd.Flags().BoolVarP(&skipCleanup, "skip-clean", "c", false, "Skip cleanup steps") buildCmd.Flags().BoolVarP(&skipCleanup, "skip-clean", "c", false, "Skip cleanup steps")
buildCmd.Flags().BoolVarP(&debug, "debug", "d", false, "Enable debug output")
buildCmd.Flags().BoolVarP(&skipClone, "skip-clone", "g", false, "Skip cloning from git") buildCmd.Flags().BoolVarP(&skipClone, "skip-clone", "g", false, "Skip cloning from git")
buildCmd.Flags().BoolVarP(&packagesOnly, "packages-only", "p", false, "Only generate packages") buildCmd.Flags().BoolVarP(&packagesOnly, "packages-only", "p", false, "Only generate packages")
buildCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "verbose - show more logs")
} }

View file

@ -28,8 +28,8 @@ func InstanceDataSourceSchema(ctx context.Context) schema.Schema {
}, },
"backup_schedule": schema.StringAttribute{ "backup_schedule": schema.StringAttribute{
Computed: true, Computed: true,
Description: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.", Description: "The schedule for when the database backup will be created. Currently, ONLY daily schedules are supported (every 24 hours). The schedule is written as a cron schedule.",
MarkdownDescription: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.", MarkdownDescription: "The schedule for when the database backup will be created. Currently, ONLY daily schedules are supported (every 24 hours). The schedule is written as a cron schedule.",
}, },
"connection_info": schema.SingleNestedAttribute{ "connection_info": schema.SingleNestedAttribute{
Attributes: map[string]schema.Attribute{ Attributes: map[string]schema.Attribute{

View file

@ -30,8 +30,8 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
}, },
"backup_schedule": schema.StringAttribute{ "backup_schedule": schema.StringAttribute{
Required: true, Required: true,
Description: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.", Description: "The schedule for when the database backup will be created. Currently, ONLY daily schedules are supported (every 24 hours). The schedule is written as a cron schedule.",
MarkdownDescription: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.", MarkdownDescription: "The schedule for when the database backup will be created. Currently, ONLY daily schedules are supported (every 24 hours). The schedule is written as a cron schedule.",
}, },
"connection_info": schema.SingleNestedAttribute{ "connection_info": schema.SingleNestedAttribute{
Attributes: map[string]schema.Attribute{ Attributes: map[string]schema.Attribute{

View file

@ -9,13 +9,18 @@ import (
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"time"
"github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/acctest"
"github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-plugin-testing/terraform"
"github.com/stackitcloud/stackit-sdk-go/core/config" "github.com/stackitcloud/stackit-sdk-go/core/config"
"github.com/stackitcloud/stackit-sdk-go/core/utils"
postgresflexalphaPkgGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/postgresflexalpha" postgresflexalphaPkgGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/postgresflexalpha"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core"
postgresflexalphaInstance "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/instance" postgresflexalphaInstance "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/instance"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/postgresflexalpha"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/internal/testutils" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/internal/testutils"
// The fwresource import alias is so there is no collision // The fwresource import alias is so there is no collision
@ -26,52 +31,6 @@ import (
const pfx = "stackitprivatepreview_postgresflexalpha" const pfx = "stackitprivatepreview_postgresflexalpha"
var testInstances []string
func init() {
sweeperName := fmt.Sprintf("%s_%s", pfx, "sweeper")
resource.AddTestSweepers(
sweeperName, &resource.Sweeper{
Name: sweeperName,
F: func(_ string) error { // region is passed by the testing framework
ctx := context.Background()
apiClientConfigOptions := []config.ConfigurationOption{}
apiClient, err := postgresflexalphaPkgGen.NewAPIClient(apiClientConfigOptions...)
if err != nil {
log.Fatalln(err)
}
instances, err := apiClient.ListInstancesRequest(ctx, testutils.ProjectId, testutils.Region).
Size(100).
Execute()
if err != nil {
log.Fatalln(err)
}
for _, inst := range instances.GetInstances() {
if strings.HasPrefix(inst.GetName(), "tf-acc-") {
for _, item := range testInstances {
if inst.GetName() == item {
delErr := apiClient.DeleteInstanceRequestExecute(
ctx,
testutils.ProjectId,
testutils.Region,
inst.GetId(),
)
if delErr != nil {
// TODO: maybe just warn?
log.Fatalln(delErr)
}
}
}
}
}
return nil
},
},
)
}
func TestInstanceResourceSchema(t *testing.T) { func TestInstanceResourceSchema(t *testing.T) {
t.Parallel() t.Parallel()
@ -115,27 +74,6 @@ func testAccPreCheck(t *testing.T) {
} }
} }
// func TestAccResourceExample_parallel(t *testing.T) {
// t.Parallel()
//
// exData := resData{
// Region: "eu01",
// ServiceAccountFilePath: sa_file,
// ProjectID: project_id,
// Name: acctest.RandomWithPrefix("tf-acc"),
// }
//
// resource.Test(t, resource.TestCase{
// ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories,
// Steps: []resource.TestStep{
// {
// Config: testAccResourceEncryptionExampleConfig(exData),
// Check: resource.TestCheckResourceAttrSet("example_resource.test", "id"),
// },
// },
// })
//}
type resData struct { type resData struct {
ServiceAccountFilePath string ServiceAccountFilePath string
ProjectId string ProjectId string
@ -202,28 +140,53 @@ func TestAccInstance(t *testing.T) {
updSizeData := exData updSizeData := exData
updSizeData.Size = 25 updSizeData.Size = 25
updBackupSched := exData
updBackupSched.BackupSchedule = "0 0 1 * *"
/*
{
"backupSchedule": "6 6 * * *",
"flavorId": "1.2",
"name": "postgres-instance",
"network": {
"acl": [
"198.51.100.0/24"
]
},
"replicas": 1,
"retentionDays": 35,
"storage": {
"size": 10
},
"version": "string"
}
*/
var testItemID string
resource.ParallelTest( resource.ParallelTest(
t, resource.TestCase{ t, resource.TestCase{
PreCheck: func() { PreCheck: func() {
testAccPreCheck(t) testAccPreCheck(t)
t.Logf(" ... working on instance %s", exData.TfName) t.Logf(" ... working on instance %s", exData.TfName)
testInstances = append(testInstances, exData.TfName)
}, },
CheckDestroy: testAccCheckPostgresFlexDestroy,
ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
// Create and verify // Create and verify
{ {
PreConfig: func() {
testItemID = testutils.ResStr(pfx, "instance", exData.TfName)
},
Config: testutils.StringFromTemplateMust( Config: testutils.StringFromTemplateMust(
"testdata/instance_template.gompl", "testdata/instance_template.gompl",
exData, exData,
), ),
Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr( // check params are set
testutils.ResStr(pfx, "instance", exData.TfName), resource.TestCheckResourceAttrSet(testItemID, "id"),
"name",
exData.Name, // check param values
), resource.TestCheckResourceAttr(testItemID, "name", exData.Name),
resource.TestCheckResourceAttrSet(testutils.ResStr(pfx, "instance", exData.TfName), "id"),
), ),
}, },
// Update name and verify // Update name and verify
@ -254,6 +217,20 @@ func TestAccInstance(t *testing.T) {
), ),
), ),
}, },
// Update backup schedule
{
Config: testutils.StringFromTemplateMust(
"testdata/instance_template.gompl",
updBackupSched,
),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
testutils.ResStr(pfx, "instance", exData.TfName),
"backup_schedule",
updBackupSched.BackupSchedule,
),
),
},
//// Import test //// Import test
//{ //{
// ResourceName: "example_resource.test", // ResourceName: "example_resource.test",
@ -284,6 +261,7 @@ func TestAccInstanceWithUsers(t *testing.T) {
t.Logf(" ... working on instance %s", data.TfName) t.Logf(" ... working on instance %s", data.TfName)
testInstances = append(testInstances, data.TfName) testInstances = append(testInstances, data.TfName)
}, },
CheckDestroy: testAccCheckPostgresFlexDestroy,
ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
// Create and verify // Create and verify
@ -336,6 +314,7 @@ func TestAccInstanceWithDatabases(t *testing.T) {
t.Logf(" ... working on instance %s", data.TfName) t.Logf(" ... working on instance %s", data.TfName)
testInstances = append(testInstances, data.TfName) testInstances = append(testInstances, data.TfName)
}, },
CheckDestroy: testAccCheckPostgresFlexDestroy,
ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories, ProtoV6ProviderFactories: testutils.TestAccProtoV6ProviderFactories,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
// Create and verify // Create and verify
@ -1015,3 +994,80 @@ func TestAccInstanceWithDatabases(t *testing.T) {
// } // }
// return nil // return nil
//} //}
func testAccCheckPostgresFlexDestroy(s *terraform.State) error {
ctx := context.Background()
var client *postgresflexalphaPkgGen.APIClient
var err error
var region, projectId string
region = testutils.Region
if region == "" {
region = "eu01"
}
projectId = testutils.ProjectId
if projectId == "" {
return fmt.Errorf("projectID could not be determined in destroy function")
}
apiClientConfigOptions := []config.ConfigurationOption{
config.WithServiceAccountKeyPath(os.Getenv("TF_ACC_SERVICE_ACCOUNT_FILE")),
config.WithRegion(region),
}
if testutils.PostgresFlexCustomEndpoint != "" {
apiClientConfigOptions = append(
apiClientConfigOptions,
config.WithEndpoint(testutils.PostgresFlexCustomEndpoint),
)
}
client, err = postgresflexalphaPkgGen.NewAPIClient(apiClientConfigOptions...)
if err != nil {
log.Fatalln(err)
}
instancesToDestroy := []string{}
for _, rs := range s.RootModule().Resources {
if rs.Type != "stackitprivatepreview_postgresflexalpha_instance" &&
rs.Type != "stackitprivatepreview_postgresflexbeta_instance" {
continue
}
// instance terraform ID: = "[project_id],[region],[instance_id]"
instanceId := strings.Split(rs.Primary.ID, core.Separator)[2]
instancesToDestroy = append(instancesToDestroy, instanceId)
}
instancesResp, err := client.ListInstancesRequest(ctx, projectId, region).
Size(100).
Execute()
if err != nil {
return fmt.Errorf("getting instancesResp: %w", err)
}
items := instancesResp.GetInstances()
for i := range items {
if items[i].Id == nil {
continue
}
if utils.Contains(instancesToDestroy, *items[i].Id) {
err := client.DeleteInstanceRequestExecute(ctx, testutils.ProjectId, region, *items[i].Id)
if err != nil {
return fmt.Errorf("deleting instance %s during CheckDestroy: %w", *items[i].Id, err)
}
err = postgresflexalpha.DeleteInstanceWaitHandler(
ctx,
client,
testutils.ProjectId,
testutils.Region,
*items[i].Id,
15*time.Minute,
10*time.Second,
)
if err != nil {
return fmt.Errorf("deleting instance %s during CheckDestroy: waiting for deletion %w", *items[i].Id, err)
}
}
}
return nil
}

View file

@ -25,6 +25,7 @@ const (
InstanceStateTerminating = "TERMINATING" InstanceStateTerminating = "TERMINATING"
InstanceStateUnknown = "UNKNOWN" InstanceStateUnknown = "UNKNOWN"
InstanceStatePending = "PENDING" InstanceStatePending = "PENDING"
InstanceStateDeleted = "DELETED"
) )
// APIClientInstanceInterface Interface needed for tests // APIClientInstanceInterface Interface needed for tests
@ -296,3 +297,44 @@ func GetDatabaseByIdWaitHandler(
) )
return handler return handler
} }
func DeleteInstanceWaitHandler(
ctx context.Context,
a APIClientInstanceInterface,
projectId,
region,
instanceId string,
timeout, sleepBeforeWait time.Duration,
) error {
handler := wait.New(
func() (waitFinished bool, response *postgresflex.GetInstanceResponse, err error) {
s, err := a.GetInstanceRequestExecute(ctx, projectId, region, instanceId)
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) // nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
if !ok {
return false, nil, fmt.Errorf("received error is no oapierror: %w", err)
}
if oapiErr.StatusCode == 404 {
return true, nil, nil
}
return false, nil, fmt.Errorf("api returned error: %w", err)
}
switch *s.Status {
case InstanceStateDeleted:
return true, nil, nil
case InstanceStateEmpty, InstanceStatePending, InstanceStateUnknown, InstanceStateProgressing, InstanceStateSuccess:
return false, nil, nil
case InstanceStateFailed:
return true, nil, fmt.Errorf("wait handler got status FAILURE for instance: %s", instanceId)
default:
return true, s, fmt.Errorf("instance with id %s has unexpected status %s", instanceId, *s.Status)
}
},
).SetTimeout(timeout).SetSleepBeforeWait(sleepBeforeWait)
_, err := handler.WaitWithContext(ctx)
if err != nil {
return err
}
return nil
}