diff --git a/.gitignore b/.gitignore
index 8b2a63bb..c588a0f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,3 +45,4 @@ dist
.secrets
pkg_gen
+/release/
diff --git a/cmd/cmd/build/build.go b/cmd/cmd/build/build.go
index e822afaf..df038609 100644
--- a/cmd/cmd/build/build.go
+++ b/cmd/cmd/build/build.go
@@ -35,10 +35,15 @@ type version struct {
minor int
}
-func Build() error {
+type Builder struct {
+ SkipClone bool
+ SkipCleanup bool
+}
+
+func (b *Builder) Build() error {
slog.Info("Starting Builder")
- slog.Info("Checking needed commands available")
+ slog.Info(" ... Checking needed commands available")
err := checkCommands([]string{"tfplugingen-framework", "tfplugingen-openapi"})
if err != nil {
return err
@@ -51,28 +56,33 @@ func Build() error {
if root == nil || *root == "" {
return fmt.Errorf("unable to determine root directory from git")
}
- slog.Info("Using root directory", "dir", *root)
+ slog.Info(" ... using root directory", "dir", *root)
- slog.Info("Cleaning up old generator directory")
- err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME))
- if err != nil {
- return err
- }
+ if !b.SkipCleanup {
+ slog.Info("Cleaning up old generator directory")
+ err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME))
+ if err != nil {
+ return err
+ }
- slog.Info("Cleaning up old packages directory")
- err = os.RemoveAll(path.Join(*root, "pkg_gen"))
- if err != nil {
- return err
+ slog.Info("Cleaning up old packages directory")
+ err = os.RemoveAll(path.Join(*root, "pkg_gen"))
+ if err != nil {
+ return err
+ }
}
slog.Info("Creating generator dir", "dir", fmt.Sprintf("%s/%s", *root, GEN_REPO_NAME))
- genDir, err := createGeneratorDir(*root, GEN_REPO, GEN_REPO_NAME)
- if err != nil {
- return err
+ genDir := path.Join(*root, GEN_REPO_NAME)
+ if !b.SkipClone {
+ err = createGeneratorDir(GEN_REPO, genDir, b.SkipClone)
+ if err != nil {
+ return err
+ }
}
- slog.Info("Creating oas dir", "dir", fmt.Sprintf("%s/%s", *root, OAS_REPO_NAME))
- repoDir, err := createRepoDir(genDir, OAS_REPO, OAS_REPO_NAME)
+ slog.Info("Creating oas repo dir", "dir", fmt.Sprintf("%s/%s", *root, OAS_REPO_NAME))
+ repoDir, err := createRepoDir(genDir, OAS_REPO, OAS_REPO_NAME, b.SkipClone)
if err != nil {
return fmt.Errorf("%s", err.Error())
}
@@ -118,12 +128,6 @@ func Build() error {
}
}
- slog.Info("Cleaning up", "dir", repoDir)
- err = os.RemoveAll(filepath.Dir(repoDir))
- if err != nil {
- return fmt.Errorf("%s", err.Error())
- }
-
slog.Info("Changing dir", "dir", genDir)
err = os.Chdir(genDir)
if err != nil {
@@ -191,30 +195,16 @@ func Build() error {
if item.IsDir() {
slog.Info(" -> package", "name", item.Name())
tgtDir := path.Join(*root, "pkg_gen", item.Name())
- // no backup needed as we generate new
- //bakName := fmt.Sprintf("%s.%s", item.Name(), time.Now().Format("20060102-150405"))
- //if _, err = os.Stat(tgtDir); !os.IsNotExist(err) {
- // err = os.Rename(
- // tgtDir,
- // path.Join(*root, "pkg", bakName),
- // )
- // if err != nil {
- // return err
- // }
- //}
+ if fileExists(tgtDir) {
+ delErr := os.RemoveAll(tgtDir)
+ if delErr != nil {
+ return delErr
+ }
+ }
err = os.Rename(path.Join(srcDir, item.Name()), tgtDir)
if err != nil {
return err
}
-
- // wait is placed outside now
- //if _, err = os.Stat(path.Join(*root, "pkg", bakName, "wait")); !os.IsNotExist(err) {
- // slog.Info(" Copying wait subfolder")
- // err = os.Rename(path.Join(*root, "pkg", bakName, "wait"), path.Join(tgtDir, "wait"))
- // if err != nil {
- // return err
- // }
- //}
}
}
@@ -238,17 +228,25 @@ func Build() error {
return err
}
- slog.Info("Finally removing temporary files and directories")
- //err = os.RemoveAll(path.Join(*root, "generated"))
- //if err != nil {
- // slog.Error("RemoveAll", "dir", path.Join(*root, "generated"), "err", err)
- // return err
- //}
+ if !b.SkipCleanup {
+ slog.Info("Finally removing temporary files and directories")
+ err = os.RemoveAll(path.Join(*root, "generated"))
+ if err != nil {
+ slog.Error("RemoveAll", "dir", path.Join(*root, "generated"), "err", err)
+ return err
+ }
- err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME))
- if err != nil {
- slog.Error("RemoveAll", "dir", path.Join(*root, GEN_REPO_NAME), "err", err)
- return err
+ err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME))
+ if err != nil {
+ slog.Error("RemoveAll", "dir", path.Join(*root, GEN_REPO_NAME), "err", err)
+ return err
+ }
+
+ slog.Info("Cleaning up", "dir", repoDir)
+ err = os.RemoveAll(filepath.Dir(repoDir))
+ if err != nil {
+ return fmt.Errorf("%s", err.Error())
+ }
}
slog.Info("Done")
@@ -421,6 +419,7 @@ func generateServiceFiles(rootDir, generatorDir string) error {
continue
}
+ // TODO: use const of supported versions
if svcVersion.Name() != "alpha" && svcVersion.Name() != "beta" {
continue
}
@@ -442,7 +441,7 @@ func generateServiceFiles(rootDir, generatorDir string) error {
fileName := matches[0][0]
resource := matches[0][1]
slog.Info(
- "Found service spec",
+ " found service spec",
"name",
specFile.Name(),
"service",
@@ -451,136 +450,147 @@ func generateServiceFiles(rootDir, generatorDir string) error {
resource,
)
- //for _, part := range []string{"alpha", "beta"} {
- oasFile := path.Join(generatorDir, "oas", fmt.Sprintf("%s%s.json", service.Name(), svcVersion))
- if _, err = os.Stat(oasFile); !os.IsNotExist(err) {
- slog.Info("found matching oas", "svc", service.Name(), "version", svcVersion.Name())
- scName := fmt.Sprintf("%s%s", service.Name(), svcVersion.Name())
- scName = strings.ReplaceAll(scName, "-", "")
- err = os.MkdirAll(path.Join(rootDir, "generated", "internal", "services", scName, resource), 0755)
- if err != nil {
- return err
- }
+ oasFile := path.Join(generatorDir, "oas", fmt.Sprintf("%s%s.json", service.Name(), svcVersion.Name()))
+ if _, oasErr := os.Stat(oasFile); os.IsNotExist(oasErr) {
+ slog.Warn(" coulc not find matching oas", "svc", service.Name(), "version", svcVersion.Name())
+ continue
+ }
- // slog.Info("Generating openapi spec json")
- specJsonFile := path.Join(rootDir, "generated", "specs", fmt.Sprintf("%s_%s_spec.json", scName, resource))
+ scName := fmt.Sprintf("%s%s", service.Name(), svcVersion.Name())
+ scName = strings.ReplaceAll(scName, "-", "")
+ err = os.MkdirAll(path.Join(rootDir, "generated", "internal", "services", scName, resource), 0755)
+ if err != nil {
+ return err
+ }
- var stdOut, stdErr bytes.Buffer
+ // slog.Info("Generating openapi spec json")
+ specJsonFile := path.Join(rootDir, "generated", "specs", fmt.Sprintf("%s_%s_spec.json", scName, resource))
- // noqa:gosec
- cmd := exec.Command(
- "tfplugingen-openapi",
- "generate",
- "--config",
- path.Join(rootDir, "service_specs", fileName),
- "--output",
- specJsonFile,
- oasFile,
+ var stdOut, stdErr bytes.Buffer
+
+ // noqa:gosec
+ cmd := exec.Command(
+ "tfplugingen-openapi",
+ "generate",
+ "--config",
+ path.Join(rootDir, "service_specs", service.Name(), svcVersion.Name(), fileName),
+ "--output",
+ specJsonFile,
+ oasFile,
+ )
+ cmd.Stdout = &stdOut
+ cmd.Stderr = &stdErr
+
+ if err = cmd.Start(); err != nil {
+ slog.Error(
+ "tfplugingen-openapi generate",
+ "error",
+ err,
+ "stdOut",
+ stdOut.String(),
+ "stdErr",
+ stdErr.String(),
)
- cmd.Stdout = &stdOut
- cmd.Stderr = &stdErr
+ return err
+ }
- if err = cmd.Start(); err != nil {
- slog.Error("tfplugingen-openapi generate", "error", err)
- return err
+ if err = cmd.Wait(); err != nil {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
+ slog.Error("tfplugingen-openapi generate", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
+ return fmt.Errorf("%s", stdErr.String())
}
-
- if err = cmd.Wait(); err != nil {
- var exitErr *exec.ExitError
- if errors.As(err, &exitErr) {
- slog.Error("tfplugingen-openapi generate", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
- return fmt.Errorf("%s", stdErr.String())
- }
- if err != nil {
- slog.Error("tfplugingen-openapi generate", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
- return err
- }
- }
-
- // slog.Info("Creating terraform svc resource files folder")
- tgtFolder := path.Join(rootDir, "generated", "internal", "services", scName, resource, "resources_gen")
- err = os.MkdirAll(tgtFolder, 0755)
if err != nil {
+ slog.Error("tfplugingen-openapi generate", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
+ return err
+ }
+ }
+ if stdOut.Len() > 0 {
+ slog.Warn(" command output", "stdout", stdOut.String(), "stderr", stdErr.String())
+ }
+
+ // slog.Info("Creating terraform svc resource files folder")
+ tgtFolder := path.Join(rootDir, "generated", "internal", "services", scName, resource, "resources_gen")
+ err = os.MkdirAll(tgtFolder, 0755)
+ if err != nil {
+ return err
+ }
+
+ // slog.Info("Generating terraform svc resource files")
+
+ // noqa:gosec
+ cmd2 := exec.Command(
+ "tfplugingen-framework",
+ "generate",
+ "resources",
+ "--input",
+ specJsonFile,
+ "--output",
+ tgtFolder,
+ "--package",
+ scName,
+ )
+
+ cmd2.Stdout = &stdOut
+ cmd2.Stderr = &stdErr
+ if err = cmd2.Start(); err != nil {
+ slog.Error("tfplugingen-framework generate resources", "error", err)
+ return err
+ }
+
+ if err = cmd2.Wait(); err != nil {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
+ slog.Error("tfplugingen-framework generate resources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
+ return fmt.Errorf("%s", stdErr.String())
+ }
+ if err != nil {
+ slog.Error("tfplugingen-framework generate resources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
+ return err
+ }
+ }
+
+ // slog.Info("Creating terraform svc datasource files folder")
+ tgtFolder = path.Join(rootDir, "generated", "internal", "services", scName, resource, "datasources_gen")
+ err = os.MkdirAll(tgtFolder, 0755)
+ if err != nil {
+ return err
+ }
+
+ // slog.Info("Generating terraform svc resource files")
+
+ // noqa:gosec
+ cmd3 := exec.Command(
+ "tfplugingen-framework",
+ "generate",
+ "data-sources",
+ "--input",
+ specJsonFile,
+ "--output",
+ tgtFolder,
+ "--package",
+ scName,
+ )
+ var stdOut3, stdErr3 bytes.Buffer
+ cmd3.Stdout = &stdOut3
+ cmd3.Stderr = &stdErr3
+
+ if err = cmd3.Start(); err != nil {
+ slog.Error("tfplugingen-framework generate data-sources", "error", err)
+ return err
+ }
+
+ if err = cmd3.Wait(); err != nil {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
+ slog.Error("tfplugingen-framework generate data-sources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
+ return fmt.Errorf("%s", stdErr.String())
+ }
+ if err != nil {
+ slog.Error("tfplugingen-framework generate data-sources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return err
}
-
- // slog.Info("Generating terraform svc resource files")
-
- // noqa:gosec
- cmd2 := exec.Command(
- "tfplugingen-framework",
- "generate",
- "resources",
- "--input",
- specJsonFile,
- "--output",
- tgtFolder,
- "--package",
- scName,
- )
-
- cmd2.Stdout = &stdOut
- cmd2.Stderr = &stdErr
- if err = cmd2.Start(); err != nil {
- slog.Error("tfplugingen-framework generate resources", "error", err)
- return err
- }
-
- if err = cmd2.Wait(); err != nil {
- var exitErr *exec.ExitError
- if errors.As(err, &exitErr) {
- slog.Error("tfplugingen-framework generate resources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
- return fmt.Errorf("%s", stdErr.String())
- }
- if err != nil {
- slog.Error("tfplugingen-framework generate resources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
- return err
- }
- }
-
- // slog.Info("Creating terraform svc datasource files folder")
- tgtFolder = path.Join(rootDir, "generated", "internal", "services", scName, resource, "datasources_gen")
- err = os.MkdirAll(tgtFolder, 0755)
- if err != nil {
- return err
- }
-
- // slog.Info("Generating terraform svc resource files")
-
- // noqa:gosec
- cmd3 := exec.Command(
- "tfplugingen-framework",
- "generate",
- "data-sources",
- "--input",
- specJsonFile,
- "--output",
- tgtFolder,
- "--package",
- scName,
- )
- var stdOut3, stdErr3 bytes.Buffer
- cmd3.Stdout = &stdOut3
- cmd3.Stderr = &stdErr3
-
- if err = cmd3.Start(); err != nil {
- slog.Error("tfplugingen-framework generate data-sources", "error", err)
- return err
- }
-
- if err = cmd3.Wait(); err != nil {
- var exitErr *exec.ExitError
- if errors.As(err, &exitErr) {
- slog.Error("tfplugingen-framework generate data-sources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
- return fmt.Errorf("%s", stdErr.String())
- }
- if err != nil {
- slog.Error("tfplugingen-framework generate data-sources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
- return err
- }
- }
}
- //}
}
}
}
@@ -593,7 +603,7 @@ func checkCommands(commands []string) error {
if !commandExists(commandName) {
return fmt.Errorf("missing command %s", commandName)
}
- slog.Info("found", "command", commandName)
+ slog.Info(" found", "command", commandName)
}
return nil
}
@@ -726,32 +736,41 @@ func handleVersion(service string, match []string) (*string, *version, error) {
return &resStr, &version{verString: verString, major: majVer, minor: minVer}, nil
}
-func createRepoDir(root, repoUrl, repoName string) (string, error) {
- oasTmpDir, err := os.MkdirTemp(root, "oas-tmp")
- if err != nil {
- return "", err
- }
- targetDir := path.Join(oasTmpDir, repoName)
- _, err = git.Clone(
- clone.Repository(repoUrl),
- clone.Directory(targetDir),
- )
- if err != nil {
- return "", err
+func createRepoDir(root, repoUrl, repoName string, skipClone bool) (string, error) {
+ targetDir := path.Join(root, repoName)
+ if !skipClone {
+ if fileExists(targetDir) {
+ slog.Warn("target dir exists - skipping", "targetDir", targetDir)
+ return targetDir, nil
+ }
+ _, err := git.Clone(
+ clone.Repository(repoUrl),
+ clone.Directory(targetDir),
+ )
+ if err != nil {
+ return "", err
+ }
}
return targetDir, nil
}
-func createGeneratorDir(root, repoUrl, repoName string) (string, error) {
- targetDir := path.Join(root, repoName)
- _, err := git.Clone(
- clone.Repository(repoUrl),
- clone.Directory(targetDir),
- )
- if err != nil {
- return "", err
+func createGeneratorDir(repoUrl, targetDir string, skipClone bool) error {
+ if !skipClone {
+ if fileExists(targetDir) {
+ remErr := os.RemoveAll(targetDir)
+ if remErr != nil {
+ return remErr
+ }
+ }
+ _, cloneErr := git.Clone(
+ clone.Repository(repoUrl),
+ clone.Directory(targetDir),
+ )
+ if cloneErr != nil {
+ return cloneErr
+ }
}
- return targetDir, nil
+ return nil
}
func getRoot() (*string, error) {
diff --git a/cmd/cmd/build/templates/resource_scaffold.gotmpl b/cmd/cmd/build/templates/resource_scaffold.gotmpl
index cdd38853..5c96fdae 100644
--- a/cmd/cmd/build/templates/resource_scaffold.gotmpl
+++ b/cmd/cmd/build/templates/resource_scaffold.gotmpl
@@ -3,6 +3,7 @@ package {{.PackageName}}
import (
"context"
+ "github.com/hashicorp/terraform-plugin-framework/resource/identityschema"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
@@ -17,6 +18,7 @@ var (
_ resource.ResourceWithConfigure = &{{.NameCamel}}Resource{}
_ resource.ResourceWithImportState = &{{.NameCamel}}Resource{}
_ resource.ResourceWithModifyPlan = &{{.NameCamel}}Resource{}
+ _ resource.ResourceWithIdentity = &{{.NameCamel}}Resource{}
)
func New{{.NamePascal}}Resource() resource.Resource {
@@ -28,6 +30,13 @@ type {{.NameCamel}}Resource struct{
providerData core.ProviderData
}
+type InstanceResourceIdentityModel struct {
+ ProjectID types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ InstanceID types.String `tfsdk:"instance_id"`
+ // TODO: implement further needed parts
+}
+
func (r *{{.NameCamel}}Resource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_{{.PackageName}}_{{.NameSnake}}"
}
@@ -36,6 +45,23 @@ func (r *{{.NameCamel}}Resource) Schema(ctx context.Context, req resource.Schema
resp.Schema = {{.PackageName}}Gen.{{.NamePascal}}ResourceSchema(ctx)
}
+func (r *instanceResource) IdentitySchema(_ context.Context, _ resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) {
+ resp.IdentitySchema = identityschema.Schema{
+ Attributes: map[string]identityschema.Attribute{
+ "project_id": identityschema.StringAttribute{
+ RequiredForImport: true, // must be set during import by the practitioner
+ },
+ "region": identityschema.StringAttribute{
+ RequiredForImport: true, // can be defaulted by the provider configuration
+ },
+ "instance_id": identityschema.StringAttribute{
+ RequiredForImport: true, // can be defaulted by the provider configuration
+ },
+ },
+ }
+}
+
+
// Configure adds the provider configured client to the resource.
func (r *{{.NameCamel}}Resource) Configure(
ctx context.Context,
@@ -81,6 +107,19 @@ func (r *{{.NameCamel}}Resource) Create(ctx context.Context, req resource.Create
// Example data value setting
data.{{.NameCamel | ucfirst}}Id = types.StringValue("id-from-response")
+ // TODO: Set data returned by API in identity
+ identity := InstanceResourceIdentityModel{
+ ProjectID: types.StringValue(projectId),
+ Region: types.StringValue(region),
+ InstanceID: types.StringValue(instanceId),
+ }
+ resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ // TODO: implement wait handler if needed
+
// Save data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
@@ -93,6 +132,13 @@ func (r *{{.NameCamel}}Resource) Read(ctx context.Context, req resource.ReadRequ
// Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
+ // Read identity data
+ var identityData InstanceResourceIdentityModel
+ resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
if resp.Diagnostics.HasError() {
return
}
diff --git a/cmd/cmd/buildCmd.go b/cmd/cmd/buildCmd.go
index 683c3536..0a239215 100644
--- a/cmd/cmd/buildCmd.go
+++ b/cmd/cmd/buildCmd.go
@@ -5,13 +5,29 @@ import (
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/cmd/cmd/build"
)
-func NewBuildCmd() *cobra.Command {
- return &cobra.Command{
- Use: "build",
- Short: "Build the necessary boilerplate",
- Long: `...`,
- RunE: func(cmd *cobra.Command, args []string) error {
- return build.Build()
- },
- }
+var (
+ skipCleanup bool
+ skipClone bool
+)
+
+var buildCmd = &cobra.Command{
+ Use: "build",
+ Short: "Build the necessary boilerplate",
+ Long: `...`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ b := build.Builder{
+ SkipClone: skipClone,
+ SkipCleanup: skipCleanup,
+ }
+ return b.Build()
+ },
+}
+
+func NewBuildCmd() *cobra.Command {
+ return buildCmd
+}
+
+func init() { // nolint: gochecknoinits
+ buildCmd.Flags().BoolVarP(&skipCleanup, "skip-clean", "c", false, "Skip cleanup steps")
+ buildCmd.Flags().BoolVarP(&skipClone, "skip-clone", "g", false, "Skip cloning from git")
}
diff --git a/cmd/main.go b/cmd/main.go
index 7704aa1d..52753a18 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -2,17 +2,27 @@ package main
import (
"log"
+ "log/slog"
"os"
+ "github.com/MatusOllah/slogcolor"
+ cc "github.com/ivanpirog/coloredcobra"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/cmd/cmd"
)
func main() {
- rootCmd := cmd.NewRootCmd()
- //rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
- //rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
- //rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
+ slog.SetDefault(slog.New(slogcolor.NewHandler(os.Stderr, slogcolor.DefaultOptions)))
+ rootCmd := cmd.NewRootCmd()
+
+ cc.Init(&cc.Config{
+ RootCmd: rootCmd,
+ Headings: cc.HiCyan + cc.Bold + cc.Underline,
+ Commands: cc.HiYellow + cc.Bold,
+ Example: cc.Italic,
+ ExecName: cc.Bold,
+ Flags: cc.Bold,
+ })
rootCmd.SetOut(os.Stdout)
rootCmd.AddCommand(
diff --git a/docs/data-sources/postgresflexalpha_instance.md b/docs/data-sources/postgresflexalpha_instance.md
index b254eb7d..54d887ea 100644
--- a/docs/data-sources/postgresflexalpha_instance.md
+++ b/docs/data-sources/postgresflexalpha_instance.md
@@ -30,11 +30,12 @@ data "stackitprivatepreview_postgresflexalpha_instance" "example" {
### Read-Only
+- `acl` (List of String) List of IPV4 cidr.
- `backup_schedule` (String) The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.
- `connection_info` (Attributes) The DNS name and port in the instance overview (see [below for nested schema](#nestedatt--connection_info))
- `encryption` (Attributes) The configuration for instance's volume and backup storage encryption.
-⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption))
+⚠ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption))
- `flavor_id` (String) The id of the instance flavor.
- `id` (String) The ID of the instance.
- `is_deletable` (Boolean) Whether the instance can be deleted or not.
diff --git a/docs/data-sources/sqlserverflexalpha_instance.md b/docs/data-sources/sqlserverflexalpha_instance.md
index 9627892a..134eb567 100644
--- a/docs/data-sources/sqlserverflexalpha_instance.md
+++ b/docs/data-sources/sqlserverflexalpha_instance.md
@@ -3,12 +3,12 @@
page_title: "stackitprivatepreview_sqlserverflexalpha_instance Data Source - stackitprivatepreview"
subcategory: ""
description: |-
- SQLServer Flex ALPHA instance resource schema. Must have a region specified in the provider configuration.
+
---
# stackitprivatepreview_sqlserverflexalpha_instance (Data Source)
-SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.
+
## Example Usage
@@ -24,61 +24,48 @@ data "stackitprivatepreview_sqlserverflexalpha_instance" "example" {
### Required
-- `instance_id` (String) ID of the SQLServer Flex instance.
-- `project_id` (String) STACKIT project ID to which the instance is associated.
-
-### Optional
-
-- `region` (String) The resource region. If not defined, the provider region is used.
+- `instance_id` (String) The ID of the instance.
+- `project_id` (String) The STACKIT project ID.
+- `region` (String) The region which should be addressed
### Read-Only
-- `backup_schedule` (String) The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")
-- `edition` (String)
-- `encryption` (Attributes) The encryption block. (see [below for nested schema](#nestedatt--encryption))
-- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor))
-- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`instance_id`".
-- `is_deletable` (Boolean)
-- `name` (String) Instance name.
-- `network` (Attributes) The network block. (see [below for nested schema](#nestedatt--network))
-- `replicas` (Number)
-- `retention_days` (Number)
+- `backup_schedule` (String) The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.
+- `edition` (String) Edition of the MSSQL server instance
+- `encryption` (Attributes) this defines which key to use for storage encryption (see [below for nested schema](#nestedatt--encryption))
+- `flavor_id` (String) The id of the instance flavor.
+- `id` (String) The ID of the instance.
+- `is_deletable` (Boolean) Whether the instance can be deleted or not.
+- `name` (String) The name of the instance.
+- `network` (Attributes) The access configuration of the instance (see [below for nested schema](#nestedatt--network))
+- `replicas` (Number) How many replicas the instance should have.
+- `retention_days` (Number) The days for how long the backup files should be stored before cleaned up. 30 to 365
- `status` (String)
-- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage))
-- `version` (String)
+- `storage` (Attributes) The object containing information about the storage size and class. (see [below for nested schema](#nestedatt--storage))
+- `version` (String) The sqlserver version used for the instance.
### Nested Schema for `encryption`
Read-Only:
-- `key_id` (String) STACKIT KMS - Key ID of the encryption key to use.
-- `key_version` (String) STACKIT KMS - Key version to use in the encryption key.
-- `keyring_id` (String) STACKIT KMS - KeyRing ID of the encryption key to use.
+- `kek_key_id` (String) The key identifier
+- `kek_key_ring_id` (String) The keyring identifier
+- `kek_key_version` (String) The key version
- `service_account` (String)
-
-### Nested Schema for `flavor`
-
-Read-Only:
-
-- `cpu` (Number)
-- `description` (String)
-- `id` (String)
-- `node_type` (String)
-- `ram` (Number)
-
-
### Nested Schema for `network`
Read-Only:
-- `access_scope` (String) The access scope of the instance. (e.g. SNA)
-- `acl` (List of String) The Access Control List (ACL) for the SQLServer Flex instance.
-- `instance_address` (String) The returned instance IP address of the SQLServer Flex instance.
-- `router_address` (String) The returned router IP address of the SQLServer Flex instance.
+- `access_scope` (String) The network access scope of the instance
+
+⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected.
+- `acl` (List of String) List of IPV4 cidr.
+- `instance_address` (String)
+- `router_address` (String)
@@ -86,5 +73,5 @@ Read-Only:
Read-Only:
-- `class` (String)
-- `size` (Number)
+- `class` (String) The storage class for the storage.
+- `size` (Number) The storage size in Gigabytes.
diff --git a/docs/resources/postgresflexalpha_instance.md b/docs/resources/postgresflexalpha_instance.md
index 3dc7ef51..9ec697ba 100644
--- a/docs/resources/postgresflexalpha_instance.md
+++ b/docs/resources/postgresflexalpha_instance.md
@@ -55,13 +55,14 @@ import {
- `encryption` (Attributes) The configuration for instance's volume and backup storage encryption.
-⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption))
+⚠ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption))
- `instance_id` (String) The ID of the instance.
- `project_id` (String) The STACKIT project ID.
- `region` (String) The region which should be addressed
### Read-Only
+- `acl` (List of String) List of IPV4 cidr.
- `connection_info` (Attributes) The DNS name and port in the instance overview (see [below for nested schema](#nestedatt--connection_info))
- `id` (String) The ID of the instance.
- `is_deletable` (Boolean) Whether the instance can be deleted or not.
@@ -77,6 +78,9 @@ Required:
Optional:
- `access_scope` (String) The access scope of the instance. It defines if the instance is public or airgapped.
+
+Read-Only:
+
- `instance_address` (String)
- `router_address` (String)
diff --git a/docs/resources/sqlserverflexalpha_instance.md b/docs/resources/sqlserverflexalpha_instance.md
index d5926387..95e33673 100644
--- a/docs/resources/sqlserverflexalpha_instance.md
+++ b/docs/resources/sqlserverflexalpha_instance.md
@@ -3,12 +3,12 @@
page_title: "stackitprivatepreview_sqlserverflexalpha_instance Resource - stackitprivatepreview"
subcategory: ""
description: |-
- SQLServer Flex ALPHA instance resource schema. Must have a region specified in the provider configuration.
+
---
# stackitprivatepreview_sqlserverflexalpha_instance (Resource)
-SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.
+
## Example Usage
@@ -41,41 +41,55 @@ import {
### Required
-- `flavor_id` (String)
-- `name` (String) Instance name.
-- `network` (Attributes) The network block. (see [below for nested schema](#nestedatt--network))
-- `project_id` (String) STACKIT project ID to which the instance is associated.
+- `backup_schedule` (String) The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.
+- `flavor_id` (String) The id of the instance flavor.
+- `name` (String) The name of the instance.
+- `network` (Attributes) the network configuration of the instance. (see [below for nested schema](#nestedatt--network))
+- `retention_days` (Number) The days for how long the backup files should be stored before cleaned up. 30 to 365
+- `storage` (Attributes) The object containing information about the storage size and class. (see [below for nested schema](#nestedatt--storage))
+- `version` (String) The sqlserver version used for the instance.
### Optional
-- `backup_schedule` (String) The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")
-- `encryption` (Attributes) The encryption block. (see [below for nested schema](#nestedatt--encryption))
-- `is_deletable` (Boolean)
-- `region` (String) The resource region. If not defined, the provider region is used.
-- `retention_days` (Number)
-- `status` (String)
-- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage))
-- `version` (String)
+- `encryption` (Attributes) this defines which key to use for storage encryption (see [below for nested schema](#nestedatt--encryption))
+- `instance_id` (String) The ID of the instance.
+- `project_id` (String) The STACKIT project ID.
+- `region` (String) The region which should be addressed
### Read-Only
-- `edition` (String)
-- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`instance_id`".
-- `instance_id` (String) ID of the SQLServer Flex instance.
-- `replicas` (Number)
+- `edition` (String) Edition of the MSSQL server instance
+- `id` (String) The ID of the instance.
+- `is_deletable` (Boolean) Whether the instance can be deleted or not.
+- `replicas` (Number) How many replicas the instance should have.
+- `status` (String)
### Nested Schema for `network`
Required:
-- `access_scope` (String) The access scope of the instance. (SNA | PUBLIC)
-- `acl` (List of String) The Access Control List (ACL) for the SQLServer Flex instance.
+- `acl` (List of String) List of IPV4 cidr.
+
+Optional:
+
+- `access_scope` (String) The network access scope of the instance
+
+⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected.
Read-Only:
-- `instance_address` (String) The returned instance IP address of the SQLServer Flex instance.
-- `router_address` (String) The returned router IP address of the SQLServer Flex instance.
+- `instance_address` (String)
+- `router_address` (String)
+
+
+
+### Nested Schema for `storage`
+
+Required:
+
+- `class` (String) The storage class for the storage.
+- `size` (Number) The storage size in Gigabytes.
@@ -83,16 +97,7 @@ Read-Only:
Required:
-- `key_id` (String) STACKIT KMS - Key ID of the encryption key to use.
-- `key_version` (String) STACKIT KMS - Key version to use in the encryption key.
-- `keyring_id` (String) STACKIT KMS - KeyRing ID of the encryption key to use.
+- `kek_key_id` (String) The key identifier
+- `kek_key_ring_id` (String) The keyring identifier
+- `kek_key_version` (String) The key version
- `service_account` (String)
-
-
-
-### Nested Schema for `storage`
-
-Optional:
-
-- `class` (String)
-- `size` (Number)
diff --git a/go.mod b/go.mod
index d827c584..0815f3a3 100644
--- a/go.mod
+++ b/go.mod
@@ -3,6 +3,7 @@ module tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stac
go 1.25.6
require (
+ github.com/MatusOllah/slogcolor v1.7.0
github.com/google/go-cmp v0.7.0
github.com/google/uuid v1.6.0
github.com/hashicorp/terraform-plugin-framework v1.17.0
@@ -11,6 +12,7 @@ require (
github.com/hashicorp/terraform-plugin-log v0.10.0
github.com/hashicorp/terraform-plugin-testing v1.14.0
github.com/iancoleman/strcase v0.3.0
+ github.com/ivanpirog/coloredcobra v1.0.1
github.com/ldez/go-git-cmd-wrapper/v2 v2.9.1
github.com/spf13/cobra v1.10.2
github.com/stackitcloud/stackit-sdk-go/core v0.21.0
diff --git a/go.sum b/go.sum
index cd787442..a7b3189b 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,7 @@
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/MatusOllah/slogcolor v1.7.0 h1:Nrd7yBPv2EBEEBEwl7WEPRmMd1ozZzw2jm8SLMYDbKs=
+github.com/MatusOllah/slogcolor v1.7.0/go.mod h1:5y1H50XuQIBvuYTJlmokWi+4FuPiJN5L7Z0jM4K4bYA=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
@@ -13,6 +15,7 @@ github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/
github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
github.com/cloudflare/circl v1.6.2 h1:hL7VBpHHKzrV5WTfHCaBsgx/HGbBYlgrwvNXEVDYYsQ=
github.com/cloudflare/circl v1.6.2/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
+github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
@@ -106,8 +109,11 @@ github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/ivanpirog/coloredcobra v1.0.1 h1:aURSdEmlR90/tSiWS0dMjdwOvCVUeYLfltLfbgNxrN4=
+github.com/ivanpirog/coloredcobra v1.0.1/go.mod h1:iho4nEKcnwZFiniGSdcgdvRgZNjxm+h20acv8vqmN6Q=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94=
@@ -155,8 +161,10 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
+github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -268,5 +276,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/sample/sqlserver/sqlserver.tf b/sample/sqlserver/sqlserver.tf
index 365a2005..847678bb 100644
--- a/sample/sqlserver/sqlserver.tf
+++ b/sample/sqlserver/sqlserver.tf
@@ -34,10 +34,10 @@ resource "stackitprivatepreview_sqlserverflexalpha_instance" "msh-sna-001" {
#keyring_id = stackit_kms_keyring.keyring.keyring_id
#key_version = 1
# key with scope public
- key_id = "fe039bcf-8d7b-431a-801d-9e81371a6b7b"
+ kek_key_id = "fe039bcf-8d7b-431a-801d-9e81371a6b7b"
# key_id = var.key_id
- keyring_id = var.keyring_id
- key_version = var.key_version
+ kek_key_ring_id = var.keyring_id
+ kek_key_version = var.key_version
service_account = var.sa_email
}
network = {
diff --git a/service_specs/postgres-flex/alpha/database_config.yml b/service_specs/postgres-flex/alpha/database_config.yml
index 8211b1a7..7af8d47b 100644
--- a/service_specs/postgres-flex/alpha/database_config.yml
+++ b/service_specs/postgres-flex/alpha/database_config.yml
@@ -18,6 +18,11 @@ resources:
method: DELETE
data_sources:
+ database:
+ read:
+ path: /v3alpha1/projects/{projectId}/regions/{region}/instances/{instanceId}/databases/{databaseId}
+ method: GET
+
databases:
read:
path: /v3alpha1/projects/{projectId}/regions/{region}/instances/{instanceId}/databases
diff --git a/service_specs/sqlserverflex/alpha/instance_config.yml b/service_specs/sqlserverflex/alpha/instance_config.yml
index bef39890..c7ae4c2a 100644
--- a/service_specs/sqlserverflex/alpha/instance_config.yml
+++ b/service_specs/sqlserverflex/alpha/instance_config.yml
@@ -1,4 +1,3 @@
-
provider:
name: stackitprivatepreview
@@ -18,6 +17,11 @@ resources:
method: DELETE
data_sources:
+ instances:
+ read:
+ path: /v3alpha1/projects/{projectId}/regions/{region}/instances
+ method: GET
+
instance:
read:
path: /v3alpha1/projects/{projectId}/regions/{region}/instances/{instanceId}
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/datasource.go b/stackit/internal/services/sqlserverflexalpha/instance/datasource.go
index 5b0fb0fd..9765d99a 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/datasource.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/datasource.go
@@ -7,21 +7,17 @@ import (
"fmt"
"net/http"
- "github.com/hashicorp/terraform-plugin-framework/types"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
+ sqlserverflexalpha "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen"
+ sqlserverflexalpha2 "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen"
sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils"
sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
"github.com/hashicorp/terraform-plugin-framework/datasource"
- "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils"
- "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/validate"
-
- "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
)
// Ensure the implementation satisfies the expected interfaces.
@@ -62,165 +58,167 @@ func (r *instanceDataSource) Configure(ctx context.Context, req datasource.Confi
}
// Schema defines the schema for the data source.
-func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
- descriptions := map[string]string{
- "main": "SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.",
- "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".",
- "instance_id": "ID of the SQLServer Flex instance.",
- "project_id": "STACKIT project ID to which the instance is associated.",
- "name": "Instance name.",
- "access_scope": "The access scope of the instance. (e.g. SNA)",
- "acl": "The Access Control List (ACL) for the SQLServer Flex instance.",
- "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`,
- "region": "The resource region. If not defined, the provider region is used.",
- "encryption": "The encryption block.",
- "network": "The network block.",
- "keyring_id": "STACKIT KMS - KeyRing ID of the encryption key to use.",
- "key_id": "STACKIT KMS - Key ID of the encryption key to use.",
- "key_version": "STACKIT KMS - Key version to use in the encryption key.",
- "service:account": "STACKIT KMS - service account to use in the encryption key.",
- "instance_address": "The returned instance IP address of the SQLServer Flex instance.",
- "router_address": "The returned router IP address of the SQLServer Flex instance.",
- }
+func (r *instanceDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ //descriptions := map[string]string{
+ // "main": "SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.",
+ // "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".",
+ // "instance_id": "ID of the SQLServer Flex instance.",
+ // "project_id": "STACKIT project ID to which the instance is associated.",
+ // "name": "Instance name.",
+ // "access_scope": "The access scope of the instance. (e.g. SNA)",
+ // "acl": "The Access Control List (ACL) for the SQLServer Flex instance.",
+ // "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`,
+ // "region": "The resource region. If not defined, the provider region is used.",
+ // "encryption": "The encryption block.",
+ // "network": "The network block.",
+ // "keyring_id": "STACKIT KMS - KeyRing ID of the encryption key to use.",
+ // "key_id": "STACKIT KMS - Key ID of the encryption key to use.",
+ // "key_version": "STACKIT KMS - Key version to use in the encryption key.",
+ // "service:account": "STACKIT KMS - service account to use in the encryption key.",
+ // "instance_address": "The returned instance IP address of the SQLServer Flex instance.",
+ // "router_address": "The returned router IP address of the SQLServer Flex instance.",
+ //}
- resp.Schema = schema.Schema{
- Description: descriptions["main"],
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Description: descriptions["id"],
- Computed: true,
- },
- "instance_id": schema.StringAttribute{
- Description: descriptions["instance_id"],
- Required: true,
- Validators: []validator.String{
- validate.UUID(),
- validate.NoSeparator(),
- },
- },
- "project_id": schema.StringAttribute{
- Description: descriptions["project_id"],
- Required: true,
- Validators: []validator.String{
- validate.UUID(),
- validate.NoSeparator(),
- },
- },
- "name": schema.StringAttribute{
- Description: descriptions["name"],
- Computed: true,
- },
- "backup_schedule": schema.StringAttribute{
- Description: descriptions["backup_schedule"],
- Computed: true,
- },
- "is_deletable": schema.BoolAttribute{
- Description: descriptions["is_deletable"],
- Computed: true,
- },
- "flavor": schema.SingleNestedAttribute{
- Computed: true,
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Computed: true,
- },
- "description": schema.StringAttribute{
- Computed: true,
- },
- "cpu": schema.Int64Attribute{
- Computed: true,
- },
- "ram": schema.Int64Attribute{
- Computed: true,
- },
- "node_type": schema.StringAttribute{
- Computed: true,
- },
- },
- },
- "replicas": schema.Int64Attribute{
- Computed: true,
- },
- "storage": schema.SingleNestedAttribute{
- Computed: true,
- Attributes: map[string]schema.Attribute{
- "class": schema.StringAttribute{
- Computed: true,
- },
- "size": schema.Int64Attribute{
- Computed: true,
- },
- },
- },
- "version": schema.StringAttribute{
- Computed: true,
- },
- "status": schema.StringAttribute{
- Computed: true,
- },
- "edition": schema.StringAttribute{
- Computed: true,
- },
- "retention_days": schema.Int64Attribute{
- Computed: true,
- },
- "region": schema.StringAttribute{
- // the region cannot be found, so it has to be passed
- Optional: true,
- Description: descriptions["region"],
- },
- "encryption": schema.SingleNestedAttribute{
- Computed: true,
- Attributes: map[string]schema.Attribute{
- "key_id": schema.StringAttribute{
- Description: descriptions["key_id"],
- Computed: true,
- },
- "key_version": schema.StringAttribute{
- Description: descriptions["key_version"],
- Computed: true,
- },
- "keyring_id": schema.StringAttribute{
- Description: descriptions["keyring_id"],
- Computed: true,
- },
- "service_account": schema.StringAttribute{
- Description: descriptions["service_account"],
- Computed: true,
- },
- },
- Description: descriptions["encryption"],
- },
- "network": schema.SingleNestedAttribute{
- Computed: true,
- Attributes: map[string]schema.Attribute{
- "access_scope": schema.StringAttribute{
- Description: descriptions["access_scope"],
- Computed: true,
- },
- "instance_address": schema.StringAttribute{
- Description: descriptions["instance_address"],
- Computed: true,
- },
- "router_address": schema.StringAttribute{
- Description: descriptions["router_address"],
- Computed: true,
- },
- "acl": schema.ListAttribute{
- Description: descriptions["acl"],
- ElementType: types.StringType,
- Computed: true,
- },
- },
- Description: descriptions["network"],
- },
- },
- }
+ resp.Schema = sqlserverflexalpha.InstanceDataSourceSchema(ctx)
+
+ //resp.Schema = schema.Schema{
+ // Description: descriptions["main"],
+ // Attributes: map[string]schema.Attribute{
+ // "id": schema.StringAttribute{
+ // Description: descriptions["id"],
+ // Computed: true,
+ // },
+ // "instance_id": schema.StringAttribute{
+ // Description: descriptions["instance_id"],
+ // Required: true,
+ // Validators: []validator.String{
+ // validate.UUID(),
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "project_id": schema.StringAttribute{
+ // Description: descriptions["project_id"],
+ // Required: true,
+ // Validators: []validator.String{
+ // validate.UUID(),
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "name": schema.StringAttribute{
+ // Description: descriptions["name"],
+ // Computed: true,
+ // },
+ // "backup_schedule": schema.StringAttribute{
+ // Description: descriptions["backup_schedule"],
+ // Computed: true,
+ // },
+ // "is_deletable": schema.BoolAttribute{
+ // Description: descriptions["is_deletable"],
+ // Computed: true,
+ // },
+ // "flavor": schema.SingleNestedAttribute{
+ // Computed: true,
+ // Attributes: map[string]schema.Attribute{
+ // "id": schema.StringAttribute{
+ // Computed: true,
+ // },
+ // "description": schema.StringAttribute{
+ // Computed: true,
+ // },
+ // "cpu": schema.Int64Attribute{
+ // Computed: true,
+ // },
+ // "ram": schema.Int64Attribute{
+ // Computed: true,
+ // },
+ // "node_type": schema.StringAttribute{
+ // Computed: true,
+ // },
+ // },
+ // },
+ // "replicas": schema.Int64Attribute{
+ // Computed: true,
+ // },
+ // "storage": schema.SingleNestedAttribute{
+ // Computed: true,
+ // Attributes: map[string]schema.Attribute{
+ // "class": schema.StringAttribute{
+ // Computed: true,
+ // },
+ // "size": schema.Int64Attribute{
+ // Computed: true,
+ // },
+ // },
+ // },
+ // "version": schema.StringAttribute{
+ // Computed: true,
+ // },
+ // "status": schema.StringAttribute{
+ // Computed: true,
+ // },
+ // "edition": schema.StringAttribute{
+ // Computed: true,
+ // },
+ // "retention_days": schema.Int64Attribute{
+ // Computed: true,
+ // },
+ // "region": schema.StringAttribute{
+ // // the region cannot be found, so it has to be passed
+ // Optional: true,
+ // Description: descriptions["region"],
+ // },
+ // "encryption": schema.SingleNestedAttribute{
+ // Computed: true,
+ // Attributes: map[string]schema.Attribute{
+ // "key_id": schema.StringAttribute{
+ // Description: descriptions["key_id"],
+ // Computed: true,
+ // },
+ // "key_version": schema.StringAttribute{
+ // Description: descriptions["key_version"],
+ // Computed: true,
+ // },
+ // "keyring_id": schema.StringAttribute{
+ // Description: descriptions["keyring_id"],
+ // Computed: true,
+ // },
+ // "service_account": schema.StringAttribute{
+ // Description: descriptions["service_account"],
+ // Computed: true,
+ // },
+ // },
+ // Description: descriptions["encryption"],
+ // },
+ // "network": schema.SingleNestedAttribute{
+ // Computed: true,
+ // Attributes: map[string]schema.Attribute{
+ // "access_scope": schema.StringAttribute{
+ // Description: descriptions["access_scope"],
+ // Computed: true,
+ // },
+ // "instance_address": schema.StringAttribute{
+ // Description: descriptions["instance_address"],
+ // Computed: true,
+ // },
+ // "router_address": schema.StringAttribute{
+ // Description: descriptions["router_address"],
+ // Computed: true,
+ // },
+ // "acl": schema.ListAttribute{
+ // Description: descriptions["acl"],
+ // ElementType: types.StringType,
+ // Computed: true,
+ // },
+ // },
+ // Description: descriptions["network"],
+ // },
+ // },
+ //}
}
// Read refreshes the Terraform state with the latest data.
func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
- var model Model
+ var model sqlserverflexalpha2.InstanceModel
diags := req.Config.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
@@ -253,34 +251,35 @@ func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadReques
ctx = core.LogResponse(ctx)
- var storage = &storageModel{}
- if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
- diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
+ //var storage = &storageModel{}
+ //if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
+ // diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
+ // resp.Diagnostics.Append(diags...)
+ // if resp.Diagnostics.HasError() {
+ // return
+ // }
+ //}
+ //
+ //var encryption = &encryptionModel{}
+ //if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
+ // diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
+ // resp.Diagnostics.Append(diags...)
+ // if resp.Diagnostics.HasError() {
+ // return
+ // }
+ //}
+ //
+ //var network = &networkModel{}
+ //if !model.Network.IsNull() && !model.Network.IsUnknown() {
+ // diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
+ // resp.Diagnostics.Append(diags...)
+ // if resp.Diagnostics.HasError() {
+ // return
+ // }
+ //}
- var encryption = &encryptionModel{}
- if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
- diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var network = &networkModel{}
- if !model.Network.IsNull() && !model.Network.IsUnknown() {
- diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
+ err = mapResponseToModel(ctx, instanceResp, &model, resp.Diagnostics)
+ //err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
return
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen/instance_data_source_gen.go b/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen/instance_data_source_gen.go
new file mode 100644
index 00000000..dcf7f6dd
--- /dev/null
+++ b/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen/instance_data_source_gen.go
@@ -0,0 +1,1579 @@
+// Code generated by terraform-plugin-framework-generator DO NOT EDIT.
+
+package sqlserverflexalpha
+
+import (
+ "context"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-go/tftypes"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+)
+
+func InstanceDataSourceSchema(ctx context.Context) schema.Schema {
+ return schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "backup_schedule": schema.StringAttribute{
+ Computed: true,
+ Description: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
+ MarkdownDescription: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
+ },
+ "edition": schema.StringAttribute{
+ Computed: true,
+ Description: "Edition of the MSSQL server instance",
+ MarkdownDescription: "Edition of the MSSQL server instance",
+ },
+ "encryption": schema.SingleNestedAttribute{
+ Attributes: map[string]schema.Attribute{
+ "kek_key_id": schema.StringAttribute{
+ Computed: true,
+ Description: "The key identifier",
+ MarkdownDescription: "The key identifier",
+ },
+ "kek_key_ring_id": schema.StringAttribute{
+ Computed: true,
+ Description: "The keyring identifier",
+ MarkdownDescription: "The keyring identifier",
+ },
+ "kek_key_version": schema.StringAttribute{
+ Computed: true,
+ Description: "The key version",
+ MarkdownDescription: "The key version",
+ },
+ "service_account": schema.StringAttribute{
+ Computed: true,
+ },
+ },
+ CustomType: EncryptionType{
+ ObjectType: types.ObjectType{
+ AttrTypes: EncryptionValue{}.AttributeTypes(ctx),
+ },
+ },
+ Computed: true,
+ Description: "this defines which key to use for storage encryption",
+ MarkdownDescription: "this defines which key to use for storage encryption",
+ },
+ "flavor_id": schema.StringAttribute{
+ Computed: true,
+ Description: "The id of the instance flavor.",
+ MarkdownDescription: "The id of the instance flavor.",
+ },
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The ID of the instance.",
+ MarkdownDescription: "The ID of the instance.",
+ },
+ "instance_id": schema.StringAttribute{
+ Required: true,
+ Description: "The ID of the instance.",
+ MarkdownDescription: "The ID of the instance.",
+ },
+ "is_deletable": schema.BoolAttribute{
+ Computed: true,
+ Description: "Whether the instance can be deleted or not.",
+ MarkdownDescription: "Whether the instance can be deleted or not.",
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ Description: "The name of the instance.",
+ MarkdownDescription: "The name of the instance.",
+ },
+ "network": schema.SingleNestedAttribute{
+ Attributes: map[string]schema.Attribute{
+ "access_scope": schema.StringAttribute{
+ Computed: true,
+ Description: "The network access scope of the instance\n\n⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected.\n",
+ MarkdownDescription: "The network access scope of the instance\n\n⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected.\n",
+ },
+ "acl": schema.ListAttribute{
+ ElementType: types.StringType,
+ Computed: true,
+ Description: "List of IPV4 cidr.",
+ MarkdownDescription: "List of IPV4 cidr.",
+ },
+ "instance_address": schema.StringAttribute{
+ Computed: true,
+ },
+ "router_address": schema.StringAttribute{
+ Computed: true,
+ },
+ },
+ CustomType: NetworkType{
+ ObjectType: types.ObjectType{
+ AttrTypes: NetworkValue{}.AttributeTypes(ctx),
+ },
+ },
+ Computed: true,
+ Description: "The access configuration of the instance",
+ MarkdownDescription: "The access configuration of the instance",
+ },
+ "project_id": schema.StringAttribute{
+ Required: true,
+ Description: "The STACKIT project ID.",
+ MarkdownDescription: "The STACKIT project ID.",
+ },
+ "region": schema.StringAttribute{
+ Required: true,
+ Description: "The region which should be addressed",
+ MarkdownDescription: "The region which should be addressed",
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ "eu01",
+ ),
+ },
+ },
+ "replicas": schema.Int64Attribute{
+ Computed: true,
+ Description: "How many replicas the instance should have.",
+ MarkdownDescription: "How many replicas the instance should have.",
+ },
+ "retention_days": schema.Int64Attribute{
+ Computed: true,
+ Description: "The days for how long the backup files should be stored before cleaned up. 30 to 365",
+ MarkdownDescription: "The days for how long the backup files should be stored before cleaned up. 30 to 365",
+ },
+ "status": schema.StringAttribute{
+ Computed: true,
+ },
+ "storage": schema.SingleNestedAttribute{
+ Attributes: map[string]schema.Attribute{
+ "class": schema.StringAttribute{
+ Computed: true,
+ Description: "The storage class for the storage.",
+ MarkdownDescription: "The storage class for the storage.",
+ },
+ "size": schema.Int64Attribute{
+ Computed: true,
+ Description: "The storage size in Gigabytes.",
+ MarkdownDescription: "The storage size in Gigabytes.",
+ },
+ },
+ CustomType: StorageType{
+ ObjectType: types.ObjectType{
+ AttrTypes: StorageValue{}.AttributeTypes(ctx),
+ },
+ },
+ Computed: true,
+ Description: "The object containing information about the storage size and class.",
+ MarkdownDescription: "The object containing information about the storage size and class.",
+ },
+ "version": schema.StringAttribute{
+ Computed: true,
+ Description: "The sqlserver version used for the instance.",
+ MarkdownDescription: "The sqlserver version used for the instance.",
+ },
+ },
+ }
+}
+
+type InstanceModel struct {
+ BackupSchedule types.String `tfsdk:"backup_schedule"`
+ Edition types.String `tfsdk:"edition"`
+ Encryption EncryptionValue `tfsdk:"encryption"`
+ FlavorId types.String `tfsdk:"flavor_id"`
+ Id types.String `tfsdk:"id"`
+ InstanceId types.String `tfsdk:"instance_id"`
+ IsDeletable types.Bool `tfsdk:"is_deletable"`
+ Name types.String `tfsdk:"name"`
+ Network NetworkValue `tfsdk:"network"`
+ ProjectId types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ Replicas types.Int64 `tfsdk:"replicas"`
+ RetentionDays types.Int64 `tfsdk:"retention_days"`
+ Status types.String `tfsdk:"status"`
+ Storage StorageValue `tfsdk:"storage"`
+ Version types.String `tfsdk:"version"`
+}
+
+var _ basetypes.ObjectTypable = EncryptionType{}
+
+type EncryptionType struct {
+ basetypes.ObjectType
+}
+
+func (t EncryptionType) Equal(o attr.Type) bool {
+ other, ok := o.(EncryptionType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t EncryptionType) String() string {
+ return "EncryptionType"
+}
+
+func (t EncryptionType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ kekKeyIdAttribute, ok := attributes["kek_key_id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `kek_key_id is missing from object`)
+
+ return nil, diags
+ }
+
+ kekKeyIdVal, ok := kekKeyIdAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`kek_key_id expected to be basetypes.StringValue, was: %T`, kekKeyIdAttribute))
+ }
+
+ kekKeyRingIdAttribute, ok := attributes["kek_key_ring_id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `kek_key_ring_id is missing from object`)
+
+ return nil, diags
+ }
+
+ kekKeyRingIdVal, ok := kekKeyRingIdAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`kek_key_ring_id expected to be basetypes.StringValue, was: %T`, kekKeyRingIdAttribute))
+ }
+
+ kekKeyVersionAttribute, ok := attributes["kek_key_version"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `kek_key_version is missing from object`)
+
+ return nil, diags
+ }
+
+ kekKeyVersionVal, ok := kekKeyVersionAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`kek_key_version expected to be basetypes.StringValue, was: %T`, kekKeyVersionAttribute))
+ }
+
+ serviceAccountAttribute, ok := attributes["service_account"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `service_account is missing from object`)
+
+ return nil, diags
+ }
+
+ serviceAccountVal, ok := serviceAccountAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`service_account expected to be basetypes.StringValue, was: %T`, serviceAccountAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return EncryptionValue{
+ KekKeyId: kekKeyIdVal,
+ KekKeyRingId: kekKeyRingIdVal,
+ KekKeyVersion: kekKeyVersionVal,
+ ServiceAccount: serviceAccountVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewEncryptionValueNull() EncryptionValue {
+ return EncryptionValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewEncryptionValueUnknown() EncryptionValue {
+ return EncryptionValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewEncryptionValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (EncryptionValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing EncryptionValue Attribute Value",
+ "While creating a EncryptionValue value, a missing attribute value was detected. "+
+ "A EncryptionValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("EncryptionValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid EncryptionValue Attribute Type",
+ "While creating a EncryptionValue value, an invalid attribute value was detected. "+
+ "A EncryptionValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("EncryptionValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("EncryptionValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra EncryptionValue Attribute Value",
+ "While creating a EncryptionValue value, an extra attribute value was detected. "+
+ "A EncryptionValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra EncryptionValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewEncryptionValueUnknown(), diags
+ }
+
+ kekKeyIdAttribute, ok := attributes["kek_key_id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `kek_key_id is missing from object`)
+
+ return NewEncryptionValueUnknown(), diags
+ }
+
+ kekKeyIdVal, ok := kekKeyIdAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`kek_key_id expected to be basetypes.StringValue, was: %T`, kekKeyIdAttribute))
+ }
+
+ kekKeyRingIdAttribute, ok := attributes["kek_key_ring_id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `kek_key_ring_id is missing from object`)
+
+ return NewEncryptionValueUnknown(), diags
+ }
+
+ kekKeyRingIdVal, ok := kekKeyRingIdAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`kek_key_ring_id expected to be basetypes.StringValue, was: %T`, kekKeyRingIdAttribute))
+ }
+
+ kekKeyVersionAttribute, ok := attributes["kek_key_version"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `kek_key_version is missing from object`)
+
+ return NewEncryptionValueUnknown(), diags
+ }
+
+ kekKeyVersionVal, ok := kekKeyVersionAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`kek_key_version expected to be basetypes.StringValue, was: %T`, kekKeyVersionAttribute))
+ }
+
+ serviceAccountAttribute, ok := attributes["service_account"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `service_account is missing from object`)
+
+ return NewEncryptionValueUnknown(), diags
+ }
+
+ serviceAccountVal, ok := serviceAccountAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`service_account expected to be basetypes.StringValue, was: %T`, serviceAccountAttribute))
+ }
+
+ if diags.HasError() {
+ return NewEncryptionValueUnknown(), diags
+ }
+
+ return EncryptionValue{
+ KekKeyId: kekKeyIdVal,
+ KekKeyRingId: kekKeyRingIdVal,
+ KekKeyVersion: kekKeyVersionVal,
+ ServiceAccount: serviceAccountVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewEncryptionValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) EncryptionValue {
+ object, diags := NewEncryptionValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewEncryptionValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t EncryptionType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewEncryptionValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewEncryptionValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewEncryptionValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewEncryptionValueMust(EncryptionValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t EncryptionType) ValueType(ctx context.Context) attr.Value {
+ return EncryptionValue{}
+}
+
+var _ basetypes.ObjectValuable = EncryptionValue{}
+
+type EncryptionValue struct {
+ KekKeyId basetypes.StringValue `tfsdk:"kek_key_id"`
+ KekKeyRingId basetypes.StringValue `tfsdk:"kek_key_ring_id"`
+ KekKeyVersion basetypes.StringValue `tfsdk:"kek_key_version"`
+ ServiceAccount basetypes.StringValue `tfsdk:"service_account"`
+ state attr.ValueState
+}
+
+func (v EncryptionValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 4)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["kek_key_id"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["kek_key_ring_id"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["kek_key_version"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["service_account"] = basetypes.StringType{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 4)
+
+ val, err = v.KekKeyId.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["kek_key_id"] = val
+
+ val, err = v.KekKeyRingId.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["kek_key_ring_id"] = val
+
+ val, err = v.KekKeyVersion.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["kek_key_version"] = val
+
+ val, err = v.ServiceAccount.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["service_account"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v EncryptionValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v EncryptionValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v EncryptionValue) String() string {
+ return "EncryptionValue"
+}
+
+func (v EncryptionValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "kek_key_id": basetypes.StringType{},
+ "kek_key_ring_id": basetypes.StringType{},
+ "kek_key_version": basetypes.StringType{},
+ "service_account": basetypes.StringType{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "kek_key_id": v.KekKeyId,
+ "kek_key_ring_id": v.KekKeyRingId,
+ "kek_key_version": v.KekKeyVersion,
+ "service_account": v.ServiceAccount,
+ })
+
+ return objVal, diags
+}
+
+func (v EncryptionValue) Equal(o attr.Value) bool {
+ other, ok := o.(EncryptionValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.KekKeyId.Equal(other.KekKeyId) {
+ return false
+ }
+
+ if !v.KekKeyRingId.Equal(other.KekKeyRingId) {
+ return false
+ }
+
+ if !v.KekKeyVersion.Equal(other.KekKeyVersion) {
+ return false
+ }
+
+ if !v.ServiceAccount.Equal(other.ServiceAccount) {
+ return false
+ }
+
+ return true
+}
+
+func (v EncryptionValue) Type(ctx context.Context) attr.Type {
+ return EncryptionType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v EncryptionValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "kek_key_id": basetypes.StringType{},
+ "kek_key_ring_id": basetypes.StringType{},
+ "kek_key_version": basetypes.StringType{},
+ "service_account": basetypes.StringType{},
+ }
+}
+
+var _ basetypes.ObjectTypable = NetworkType{}
+
+type NetworkType struct {
+ basetypes.ObjectType
+}
+
+func (t NetworkType) Equal(o attr.Type) bool {
+ other, ok := o.(NetworkType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t NetworkType) String() string {
+ return "NetworkType"
+}
+
+func (t NetworkType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ accessScopeAttribute, ok := attributes["access_scope"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `access_scope is missing from object`)
+
+ return nil, diags
+ }
+
+ accessScopeVal, ok := accessScopeAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`access_scope expected to be basetypes.StringValue, was: %T`, accessScopeAttribute))
+ }
+
+ aclAttribute, ok := attributes["acl"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `acl is missing from object`)
+
+ return nil, diags
+ }
+
+ aclVal, ok := aclAttribute.(basetypes.ListValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute))
+ }
+
+ instanceAddressAttribute, ok := attributes["instance_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `instance_address is missing from object`)
+
+ return nil, diags
+ }
+
+ instanceAddressVal, ok := instanceAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`instance_address expected to be basetypes.StringValue, was: %T`, instanceAddressAttribute))
+ }
+
+ routerAddressAttribute, ok := attributes["router_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `router_address is missing from object`)
+
+ return nil, diags
+ }
+
+ routerAddressVal, ok := routerAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`router_address expected to be basetypes.StringValue, was: %T`, routerAddressAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return NetworkValue{
+ AccessScope: accessScopeVal,
+ Acl: aclVal,
+ InstanceAddress: instanceAddressVal,
+ RouterAddress: routerAddressVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewNetworkValueNull() NetworkValue {
+ return NetworkValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewNetworkValueUnknown() NetworkValue {
+ return NetworkValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewNetworkValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (NetworkValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing NetworkValue Attribute Value",
+ "While creating a NetworkValue value, a missing attribute value was detected. "+
+ "A NetworkValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("NetworkValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid NetworkValue Attribute Type",
+ "While creating a NetworkValue value, an invalid attribute value was detected. "+
+ "A NetworkValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("NetworkValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("NetworkValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra NetworkValue Attribute Value",
+ "While creating a NetworkValue value, an extra attribute value was detected. "+
+ "A NetworkValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra NetworkValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewNetworkValueUnknown(), diags
+ }
+
+ accessScopeAttribute, ok := attributes["access_scope"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `access_scope is missing from object`)
+
+ return NewNetworkValueUnknown(), diags
+ }
+
+ accessScopeVal, ok := accessScopeAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`access_scope expected to be basetypes.StringValue, was: %T`, accessScopeAttribute))
+ }
+
+ aclAttribute, ok := attributes["acl"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `acl is missing from object`)
+
+ return NewNetworkValueUnknown(), diags
+ }
+
+ aclVal, ok := aclAttribute.(basetypes.ListValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute))
+ }
+
+ instanceAddressAttribute, ok := attributes["instance_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `instance_address is missing from object`)
+
+ return NewNetworkValueUnknown(), diags
+ }
+
+ instanceAddressVal, ok := instanceAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`instance_address expected to be basetypes.StringValue, was: %T`, instanceAddressAttribute))
+ }
+
+ routerAddressAttribute, ok := attributes["router_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `router_address is missing from object`)
+
+ return NewNetworkValueUnknown(), diags
+ }
+
+ routerAddressVal, ok := routerAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`router_address expected to be basetypes.StringValue, was: %T`, routerAddressAttribute))
+ }
+
+ if diags.HasError() {
+ return NewNetworkValueUnknown(), diags
+ }
+
+ return NetworkValue{
+ AccessScope: accessScopeVal,
+ Acl: aclVal,
+ InstanceAddress: instanceAddressVal,
+ RouterAddress: routerAddressVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewNetworkValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) NetworkValue {
+ object, diags := NewNetworkValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewNetworkValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t NetworkType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewNetworkValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewNetworkValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewNetworkValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewNetworkValueMust(NetworkValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t NetworkType) ValueType(ctx context.Context) attr.Value {
+ return NetworkValue{}
+}
+
+var _ basetypes.ObjectValuable = NetworkValue{}
+
+type NetworkValue struct {
+ AccessScope basetypes.StringValue `tfsdk:"access_scope"`
+ Acl basetypes.ListValue `tfsdk:"acl"`
+ InstanceAddress basetypes.StringValue `tfsdk:"instance_address"`
+ RouterAddress basetypes.StringValue `tfsdk:"router_address"`
+ state attr.ValueState
+}
+
+func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 4)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["access_scope"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["acl"] = basetypes.ListType{
+ ElemType: types.StringType,
+ }.TerraformType(ctx)
+ attrTypes["instance_address"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["router_address"] = basetypes.StringType{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 4)
+
+ val, err = v.AccessScope.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["access_scope"] = val
+
+ val, err = v.Acl.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["acl"] = val
+
+ val, err = v.InstanceAddress.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["instance_address"] = val
+
+ val, err = v.RouterAddress.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["router_address"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v NetworkValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v NetworkValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v NetworkValue) String() string {
+ return "NetworkValue"
+}
+
+func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ var aclVal basetypes.ListValue
+ switch {
+ case v.Acl.IsUnknown():
+ aclVal = types.ListUnknown(types.StringType)
+ case v.Acl.IsNull():
+ aclVal = types.ListNull(types.StringType)
+ default:
+ var d diag.Diagnostics
+ aclVal, d = types.ListValue(types.StringType, v.Acl.Elements())
+ diags.Append(d...)
+ }
+
+ if diags.HasError() {
+ return types.ObjectUnknown(map[string]attr.Type{
+ "access_scope": basetypes.StringType{},
+ "acl": basetypes.ListType{
+ ElemType: types.StringType,
+ },
+ "instance_address": basetypes.StringType{},
+ "router_address": basetypes.StringType{},
+ }), diags
+ }
+
+ attributeTypes := map[string]attr.Type{
+ "access_scope": basetypes.StringType{},
+ "acl": basetypes.ListType{
+ ElemType: types.StringType,
+ },
+ "instance_address": basetypes.StringType{},
+ "router_address": basetypes.StringType{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "access_scope": v.AccessScope,
+ "acl": aclVal,
+ "instance_address": v.InstanceAddress,
+ "router_address": v.RouterAddress,
+ })
+
+ return objVal, diags
+}
+
+func (v NetworkValue) Equal(o attr.Value) bool {
+ other, ok := o.(NetworkValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.AccessScope.Equal(other.AccessScope) {
+ return false
+ }
+
+ if !v.Acl.Equal(other.Acl) {
+ return false
+ }
+
+ if !v.InstanceAddress.Equal(other.InstanceAddress) {
+ return false
+ }
+
+ if !v.RouterAddress.Equal(other.RouterAddress) {
+ return false
+ }
+
+ return true
+}
+
+func (v NetworkValue) Type(ctx context.Context) attr.Type {
+ return NetworkType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v NetworkValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "access_scope": basetypes.StringType{},
+ "acl": basetypes.ListType{
+ ElemType: types.StringType,
+ },
+ "instance_address": basetypes.StringType{},
+ "router_address": basetypes.StringType{},
+ }
+}
+
+var _ basetypes.ObjectTypable = StorageType{}
+
+type StorageType struct {
+ basetypes.ObjectType
+}
+
+func (t StorageType) Equal(o attr.Type) bool {
+ other, ok := o.(StorageType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t StorageType) String() string {
+ return "StorageType"
+}
+
+func (t StorageType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ classAttribute, ok := attributes["class"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `class is missing from object`)
+
+ return nil, diags
+ }
+
+ classVal, ok := classAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return nil, diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return StorageValue{
+ Class: classVal,
+ Size: sizeVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewStorageValueNull() StorageValue {
+ return StorageValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewStorageValueUnknown() StorageValue {
+ return StorageValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewStorageValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (StorageValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing StorageValue Attribute Value",
+ "While creating a StorageValue value, a missing attribute value was detected. "+
+ "A StorageValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("StorageValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid StorageValue Attribute Type",
+ "While creating a StorageValue value, an invalid attribute value was detected. "+
+ "A StorageValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("StorageValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("StorageValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra StorageValue Attribute Value",
+ "While creating a StorageValue value, an extra attribute value was detected. "+
+ "A StorageValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra StorageValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewStorageValueUnknown(), diags
+ }
+
+ classAttribute, ok := attributes["class"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `class is missing from object`)
+
+ return NewStorageValueUnknown(), diags
+ }
+
+ classVal, ok := classAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`class expected to be basetypes.StringValue, was: %T`, classAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return NewStorageValueUnknown(), diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ if diags.HasError() {
+ return NewStorageValueUnknown(), diags
+ }
+
+ return StorageValue{
+ Class: classVal,
+ Size: sizeVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewStorageValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) StorageValue {
+ object, diags := NewStorageValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewStorageValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t StorageType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewStorageValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewStorageValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewStorageValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewStorageValueMust(StorageValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t StorageType) ValueType(ctx context.Context) attr.Value {
+ return StorageValue{}
+}
+
+var _ basetypes.ObjectValuable = StorageValue{}
+
+type StorageValue struct {
+ Class basetypes.StringValue `tfsdk:"class"`
+ Size basetypes.Int64Value `tfsdk:"size"`
+ state attr.ValueState
+}
+
+func (v StorageValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 2)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["class"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["size"] = basetypes.Int64Type{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 2)
+
+ val, err = v.Class.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["class"] = val
+
+ val, err = v.Size.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["size"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v StorageValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v StorageValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v StorageValue) String() string {
+ return "StorageValue"
+}
+
+func (v StorageValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "class": basetypes.StringType{},
+ "size": basetypes.Int64Type{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "class": v.Class,
+ "size": v.Size,
+ })
+
+ return objVal, diags
+}
+
+func (v StorageValue) Equal(o attr.Value) bool {
+ other, ok := o.(StorageValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Class.Equal(other.Class) {
+ return false
+ }
+
+ if !v.Size.Equal(other.Size) {
+ return false
+ }
+
+ return true
+}
+
+func (v StorageValue) Type(ctx context.Context) attr.Type {
+ return StorageType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v StorageValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "class": basetypes.StringType{},
+ "size": basetypes.Int64Type{},
+ }
+}
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen/instances_data_source_gen.go b/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen/instances_data_source_gen.go
new file mode 100644
index 00000000..33df0a5d
--- /dev/null
+++ b/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen/instances_data_source_gen.go
@@ -0,0 +1,1172 @@
+// Code generated by terraform-plugin-framework-generator DO NOT EDIT.
+
+package sqlserverflexalpha
+
+import (
+ "context"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-go/tftypes"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+)
+
+func InstancesDataSourceSchema(ctx context.Context) schema.Schema {
+ return schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "instances": schema.ListNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The ID of the instance.",
+ MarkdownDescription: "The ID of the instance.",
+ },
+ "is_deletable": schema.BoolAttribute{
+ Computed: true,
+ Description: "Whether the instance can be deleted or not.",
+ MarkdownDescription: "Whether the instance can be deleted or not.",
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ Description: "The name of the instance.",
+ MarkdownDescription: "The name of the instance.",
+ },
+ "status": schema.StringAttribute{
+ Computed: true,
+ },
+ },
+ CustomType: InstancesType{
+ ObjectType: types.ObjectType{
+ AttrTypes: InstancesValue{}.AttributeTypes(ctx),
+ },
+ },
+ },
+ Computed: true,
+ Description: "List of owned instances and their current status.",
+ MarkdownDescription: "List of owned instances and their current status.",
+ },
+ "page": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "Number of the page of items list to be returned.",
+ MarkdownDescription: "Number of the page of items list to be returned.",
+ },
+ "pagination": schema.SingleNestedAttribute{
+ Attributes: map[string]schema.Attribute{
+ "page": schema.Int64Attribute{
+ Computed: true,
+ },
+ "size": schema.Int64Attribute{
+ Computed: true,
+ },
+ "sort": schema.StringAttribute{
+ Computed: true,
+ },
+ "total_pages": schema.Int64Attribute{
+ Computed: true,
+ },
+ "total_rows": schema.Int64Attribute{
+ Computed: true,
+ },
+ },
+ CustomType: PaginationType{
+ ObjectType: types.ObjectType{
+ AttrTypes: PaginationValue{}.AttributeTypes(ctx),
+ },
+ },
+ Computed: true,
+ },
+ "project_id": schema.StringAttribute{
+ Required: true,
+ Description: "The STACKIT project ID.",
+ MarkdownDescription: "The STACKIT project ID.",
+ },
+ "region": schema.StringAttribute{
+ Required: true,
+ Description: "The region which should be addressed",
+ MarkdownDescription: "The region which should be addressed",
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ "eu01",
+ ),
+ },
+ },
+ "size": schema.Int64Attribute{
+ Optional: true,
+ Computed: true,
+ Description: "Number of items to be returned on each page.",
+ MarkdownDescription: "Number of items to be returned on each page.",
+ },
+ "sort": schema.StringAttribute{
+ Optional: true,
+ Computed: true,
+ Description: "Sorting of the items to be returned on each page.",
+ MarkdownDescription: "Sorting of the items to be returned on each page.",
+ Validators: []validator.String{
+ stringvalidator.OneOf(
+ "index.desc",
+ "index.asc",
+ "id.desc",
+ "id.asc",
+ "is_deletable.desc",
+ "is_deletable.asc",
+ "name.asc",
+ "name.desc",
+ "status.asc",
+ "status.desc",
+ ),
+ },
+ },
+ },
+ }
+}
+
+type InstancesModel struct {
+ Instances types.List `tfsdk:"instances"`
+ Page types.Int64 `tfsdk:"page"`
+ Pagination PaginationValue `tfsdk:"pagination"`
+ ProjectId types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ Size types.Int64 `tfsdk:"size"`
+ Sort types.String `tfsdk:"sort"`
+}
+
+var _ basetypes.ObjectTypable = InstancesType{}
+
+type InstancesType struct {
+ basetypes.ObjectType
+}
+
+func (t InstancesType) Equal(o attr.Type) bool {
+ other, ok := o.(InstancesType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t InstancesType) String() string {
+ return "InstancesType"
+}
+
+func (t InstancesType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ idAttribute, ok := attributes["id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `id is missing from object`)
+
+ return nil, diags
+ }
+
+ idVal, ok := idAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
+ }
+
+ isDeletableAttribute, ok := attributes["is_deletable"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `is_deletable is missing from object`)
+
+ return nil, diags
+ }
+
+ isDeletableVal, ok := isDeletableAttribute.(basetypes.BoolValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`is_deletable expected to be basetypes.BoolValue, was: %T`, isDeletableAttribute))
+ }
+
+ nameAttribute, ok := attributes["name"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `name is missing from object`)
+
+ return nil, diags
+ }
+
+ nameVal, ok := nameAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute))
+ }
+
+ statusAttribute, ok := attributes["status"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `status is missing from object`)
+
+ return nil, diags
+ }
+
+ statusVal, ok := statusAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`status expected to be basetypes.StringValue, was: %T`, statusAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return InstancesValue{
+ Id: idVal,
+ IsDeletable: isDeletableVal,
+ Name: nameVal,
+ Status: statusVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewInstancesValueNull() InstancesValue {
+ return InstancesValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewInstancesValueUnknown() InstancesValue {
+ return InstancesValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewInstancesValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (InstancesValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing InstancesValue Attribute Value",
+ "While creating a InstancesValue value, a missing attribute value was detected. "+
+ "A InstancesValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("InstancesValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid InstancesValue Attribute Type",
+ "While creating a InstancesValue value, an invalid attribute value was detected. "+
+ "A InstancesValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("InstancesValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("InstancesValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra InstancesValue Attribute Value",
+ "While creating a InstancesValue value, an extra attribute value was detected. "+
+ "A InstancesValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra InstancesValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewInstancesValueUnknown(), diags
+ }
+
+ idAttribute, ok := attributes["id"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `id is missing from object`)
+
+ return NewInstancesValueUnknown(), diags
+ }
+
+ idVal, ok := idAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute))
+ }
+
+ isDeletableAttribute, ok := attributes["is_deletable"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `is_deletable is missing from object`)
+
+ return NewInstancesValueUnknown(), diags
+ }
+
+ isDeletableVal, ok := isDeletableAttribute.(basetypes.BoolValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`is_deletable expected to be basetypes.BoolValue, was: %T`, isDeletableAttribute))
+ }
+
+ nameAttribute, ok := attributes["name"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `name is missing from object`)
+
+ return NewInstancesValueUnknown(), diags
+ }
+
+ nameVal, ok := nameAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute))
+ }
+
+ statusAttribute, ok := attributes["status"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `status is missing from object`)
+
+ return NewInstancesValueUnknown(), diags
+ }
+
+ statusVal, ok := statusAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`status expected to be basetypes.StringValue, was: %T`, statusAttribute))
+ }
+
+ if diags.HasError() {
+ return NewInstancesValueUnknown(), diags
+ }
+
+ return InstancesValue{
+ Id: idVal,
+ IsDeletable: isDeletableVal,
+ Name: nameVal,
+ Status: statusVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewInstancesValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) InstancesValue {
+ object, diags := NewInstancesValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewInstancesValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t InstancesType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewInstancesValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewInstancesValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewInstancesValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewInstancesValueMust(InstancesValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t InstancesType) ValueType(ctx context.Context) attr.Value {
+ return InstancesValue{}
+}
+
+var _ basetypes.ObjectValuable = InstancesValue{}
+
+type InstancesValue struct {
+ Id basetypes.StringValue `tfsdk:"id"`
+ IsDeletable basetypes.BoolValue `tfsdk:"is_deletable"`
+ Name basetypes.StringValue `tfsdk:"name"`
+ Status basetypes.StringValue `tfsdk:"status"`
+ state attr.ValueState
+}
+
+func (v InstancesValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 4)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["is_deletable"] = basetypes.BoolType{}.TerraformType(ctx)
+ attrTypes["name"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["status"] = basetypes.StringType{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 4)
+
+ val, err = v.Id.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["id"] = val
+
+ val, err = v.IsDeletable.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["is_deletable"] = val
+
+ val, err = v.Name.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["name"] = val
+
+ val, err = v.Status.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["status"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v InstancesValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v InstancesValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v InstancesValue) String() string {
+ return "InstancesValue"
+}
+
+func (v InstancesValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "id": basetypes.StringType{},
+ "is_deletable": basetypes.BoolType{},
+ "name": basetypes.StringType{},
+ "status": basetypes.StringType{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "id": v.Id,
+ "is_deletable": v.IsDeletable,
+ "name": v.Name,
+ "status": v.Status,
+ })
+
+ return objVal, diags
+}
+
+func (v InstancesValue) Equal(o attr.Value) bool {
+ other, ok := o.(InstancesValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Id.Equal(other.Id) {
+ return false
+ }
+
+ if !v.IsDeletable.Equal(other.IsDeletable) {
+ return false
+ }
+
+ if !v.Name.Equal(other.Name) {
+ return false
+ }
+
+ if !v.Status.Equal(other.Status) {
+ return false
+ }
+
+ return true
+}
+
+func (v InstancesValue) Type(ctx context.Context) attr.Type {
+ return InstancesType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v InstancesValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "id": basetypes.StringType{},
+ "is_deletable": basetypes.BoolType{},
+ "name": basetypes.StringType{},
+ "status": basetypes.StringType{},
+ }
+}
+
+var _ basetypes.ObjectTypable = PaginationType{}
+
+type PaginationType struct {
+ basetypes.ObjectType
+}
+
+func (t PaginationType) Equal(o attr.Type) bool {
+ other, ok := o.(PaginationType)
+
+ if !ok {
+ return false
+ }
+
+ return t.ObjectType.Equal(other.ObjectType)
+}
+
+func (t PaginationType) String() string {
+ return "PaginationType"
+}
+
+func (t PaginationType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributes := in.Attributes()
+
+ pageAttribute, ok := attributes["page"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `page is missing from object`)
+
+ return nil, diags
+ }
+
+ pageVal, ok := pageAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return nil, diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ sortAttribute, ok := attributes["sort"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `sort is missing from object`)
+
+ return nil, diags
+ }
+
+ sortVal, ok := sortAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute))
+ }
+
+ totalPagesAttribute, ok := attributes["total_pages"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_pages is missing from object`)
+
+ return nil, diags
+ }
+
+ totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute))
+ }
+
+ totalRowsAttribute, ok := attributes["total_rows"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_rows is missing from object`)
+
+ return nil, diags
+ }
+
+ totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute))
+ }
+
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ return PaginationValue{
+ Page: pageVal,
+ Size: sizeVal,
+ Sort: sortVal,
+ TotalPages: totalPagesVal,
+ TotalRows: totalRowsVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewPaginationValueNull() PaginationValue {
+ return PaginationValue{
+ state: attr.ValueStateNull,
+ }
+}
+
+func NewPaginationValueUnknown() PaginationValue {
+ return PaginationValue{
+ state: attr.ValueStateUnknown,
+ }
+}
+
+func NewPaginationValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (PaginationValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521
+ ctx := context.Background()
+
+ for name, attributeType := range attributeTypes {
+ attribute, ok := attributes[name]
+
+ if !ok {
+ diags.AddError(
+ "Missing PaginationValue Attribute Value",
+ "While creating a PaginationValue value, a missing attribute value was detected. "+
+ "A PaginationValue must contain values for all attributes, even if null or unknown. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()),
+ )
+
+ continue
+ }
+
+ if !attributeType.Equal(attribute.Type(ctx)) {
+ diags.AddError(
+ "Invalid PaginationValue Attribute Type",
+ "While creating a PaginationValue value, an invalid attribute value was detected. "+
+ "A PaginationValue must use a matching attribute type for the value. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+
+ fmt.Sprintf("PaginationValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)),
+ )
+ }
+ }
+
+ for name := range attributes {
+ _, ok := attributeTypes[name]
+
+ if !ok {
+ diags.AddError(
+ "Extra PaginationValue Attribute Value",
+ "While creating a PaginationValue value, an extra attribute value was detected. "+
+ "A PaginationValue must not contain values beyond the expected attribute types. "+
+ "This is always an issue with the provider and should be reported to the provider developers.\n\n"+
+ fmt.Sprintf("Extra PaginationValue Attribute Name: %s", name),
+ )
+ }
+ }
+
+ if diags.HasError() {
+ return NewPaginationValueUnknown(), diags
+ }
+
+ pageAttribute, ok := attributes["page"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `page is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ pageVal, ok := pageAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`page expected to be basetypes.Int64Value, was: %T`, pageAttribute))
+ }
+
+ sizeAttribute, ok := attributes["size"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `size is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ sizeVal, ok := sizeAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`size expected to be basetypes.Int64Value, was: %T`, sizeAttribute))
+ }
+
+ sortAttribute, ok := attributes["sort"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `sort is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ sortVal, ok := sortAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`sort expected to be basetypes.StringValue, was: %T`, sortAttribute))
+ }
+
+ totalPagesAttribute, ok := attributes["total_pages"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_pages is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ totalPagesVal, ok := totalPagesAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_pages expected to be basetypes.Int64Value, was: %T`, totalPagesAttribute))
+ }
+
+ totalRowsAttribute, ok := attributes["total_rows"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `total_rows is missing from object`)
+
+ return NewPaginationValueUnknown(), diags
+ }
+
+ totalRowsVal, ok := totalRowsAttribute.(basetypes.Int64Value)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`total_rows expected to be basetypes.Int64Value, was: %T`, totalRowsAttribute))
+ }
+
+ if diags.HasError() {
+ return NewPaginationValueUnknown(), diags
+ }
+
+ return PaginationValue{
+ Page: pageVal,
+ Size: sizeVal,
+ Sort: sortVal,
+ TotalPages: totalPagesVal,
+ TotalRows: totalRowsVal,
+ state: attr.ValueStateKnown,
+ }, diags
+}
+
+func NewPaginationValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) PaginationValue {
+ object, diags := NewPaginationValue(attributeTypes, attributes)
+
+ if diags.HasError() {
+ // This could potentially be added to the diag package.
+ diagsStrings := make([]string, 0, len(diags))
+
+ for _, diagnostic := range diags {
+ diagsStrings = append(diagsStrings, fmt.Sprintf(
+ "%s | %s | %s",
+ diagnostic.Severity(),
+ diagnostic.Summary(),
+ diagnostic.Detail()))
+ }
+
+ panic("NewPaginationValueMust received error(s): " + strings.Join(diagsStrings, "\n"))
+ }
+
+ return object
+}
+
+func (t PaginationType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) {
+ if in.Type() == nil {
+ return NewPaginationValueNull(), nil
+ }
+
+ if !in.Type().Equal(t.TerraformType(ctx)) {
+ return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type())
+ }
+
+ if !in.IsKnown() {
+ return NewPaginationValueUnknown(), nil
+ }
+
+ if in.IsNull() {
+ return NewPaginationValueNull(), nil
+ }
+
+ attributes := map[string]attr.Value{}
+
+ val := map[string]tftypes.Value{}
+
+ err := in.As(&val)
+
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range val {
+ a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ attributes[k] = a
+ }
+
+ return NewPaginationValueMust(PaginationValue{}.AttributeTypes(ctx), attributes), nil
+}
+
+func (t PaginationType) ValueType(ctx context.Context) attr.Value {
+ return PaginationValue{}
+}
+
+var _ basetypes.ObjectValuable = PaginationValue{}
+
+type PaginationValue struct {
+ Page basetypes.Int64Value `tfsdk:"page"`
+ Size basetypes.Int64Value `tfsdk:"size"`
+ Sort basetypes.StringValue `tfsdk:"sort"`
+ TotalPages basetypes.Int64Value `tfsdk:"total_pages"`
+ TotalRows basetypes.Int64Value `tfsdk:"total_rows"`
+ state attr.ValueState
+}
+
+func (v PaginationValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
+ attrTypes := make(map[string]tftypes.Type, 5)
+
+ var val tftypes.Value
+ var err error
+
+ attrTypes["page"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["size"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["sort"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["total_pages"] = basetypes.Int64Type{}.TerraformType(ctx)
+ attrTypes["total_rows"] = basetypes.Int64Type{}.TerraformType(ctx)
+
+ objectType := tftypes.Object{AttributeTypes: attrTypes}
+
+ switch v.state {
+ case attr.ValueStateKnown:
+ vals := make(map[string]tftypes.Value, 5)
+
+ val, err = v.Page.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["page"] = val
+
+ val, err = v.Size.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["size"] = val
+
+ val, err = v.Sort.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["sort"] = val
+
+ val, err = v.TotalPages.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["total_pages"] = val
+
+ val, err = v.TotalRows.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["total_rows"] = val
+
+ if err := tftypes.ValidateValue(objectType, vals); err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ return tftypes.NewValue(objectType, vals), nil
+ case attr.ValueStateNull:
+ return tftypes.NewValue(objectType, nil), nil
+ case attr.ValueStateUnknown:
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), nil
+ default:
+ panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state))
+ }
+}
+
+func (v PaginationValue) IsNull() bool {
+ return v.state == attr.ValueStateNull
+}
+
+func (v PaginationValue) IsUnknown() bool {
+ return v.state == attr.ValueStateUnknown
+}
+
+func (v PaginationValue) String() string {
+ return "PaginationValue"
+}
+
+func (v PaginationValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) {
+ var diags diag.Diagnostics
+
+ attributeTypes := map[string]attr.Type{
+ "page": basetypes.Int64Type{},
+ "size": basetypes.Int64Type{},
+ "sort": basetypes.StringType{},
+ "total_pages": basetypes.Int64Type{},
+ "total_rows": basetypes.Int64Type{},
+ }
+
+ if v.IsNull() {
+ return types.ObjectNull(attributeTypes), diags
+ }
+
+ if v.IsUnknown() {
+ return types.ObjectUnknown(attributeTypes), diags
+ }
+
+ objVal, diags := types.ObjectValue(
+ attributeTypes,
+ map[string]attr.Value{
+ "page": v.Page,
+ "size": v.Size,
+ "sort": v.Sort,
+ "total_pages": v.TotalPages,
+ "total_rows": v.TotalRows,
+ })
+
+ return objVal, diags
+}
+
+func (v PaginationValue) Equal(o attr.Value) bool {
+ other, ok := o.(PaginationValue)
+
+ if !ok {
+ return false
+ }
+
+ if v.state != other.state {
+ return false
+ }
+
+ if v.state != attr.ValueStateKnown {
+ return true
+ }
+
+ if !v.Page.Equal(other.Page) {
+ return false
+ }
+
+ if !v.Size.Equal(other.Size) {
+ return false
+ }
+
+ if !v.Sort.Equal(other.Sort) {
+ return false
+ }
+
+ if !v.TotalPages.Equal(other.TotalPages) {
+ return false
+ }
+
+ if !v.TotalRows.Equal(other.TotalRows) {
+ return false
+ }
+
+ return true
+}
+
+func (v PaginationValue) Type(ctx context.Context) attr.Type {
+ return PaginationType{
+ basetypes.ObjectType{
+ AttrTypes: v.AttributeTypes(ctx),
+ },
+ }
+}
+
+func (v PaginationValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
+ return map[string]attr.Type{
+ "page": basetypes.Int64Type{},
+ "size": basetypes.Int64Type{},
+ "sort": basetypes.StringType{},
+ "total_pages": basetypes.Int64Type{},
+ "total_rows": basetypes.Int64Type{},
+ }
+}
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/functions.go b/stackit/internal/services/sqlserverflexalpha/instance/functions.go
index b451eb70..ee75cd21 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/functions.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/functions.go
@@ -6,202 +6,299 @@ import (
"math"
"github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types"
sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
- "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core"
- "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils"
+ sqlserverflexResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen"
)
-func mapFields(
+func mapResponseToModel(
ctx context.Context,
resp *sqlserverflex.GetInstanceResponse,
- model *Model,
- storage *storageModel,
- encryption *encryptionModel,
- network *networkModel,
- region string,
+ m *sqlserverflexResGen.InstanceModel,
+ tfDiags diag.Diagnostics,
) error {
- if resp == nil {
- return fmt.Errorf("response input is nil")
- }
- if model == nil {
- return fmt.Errorf("model input is nil")
- }
- instance := resp
-
- var instanceId string
- if model.InstanceId.ValueString() != "" {
- instanceId = model.InstanceId.ValueString()
- } else if instance.Id != nil {
- instanceId = *instance.Id
- } else {
- return fmt.Errorf("instance id not present")
- }
-
- var storageValues map[string]attr.Value
- if instance.Storage == nil {
- storageValues = map[string]attr.Value{
- "class": storage.Class,
- "size": storage.Size,
- }
- } else {
- storageValues = map[string]attr.Value{
- "class": types.StringValue(*instance.Storage.Class),
- "size": types.Int64PointerValue(instance.Storage.Size),
- }
- }
- storageObject, diags := types.ObjectValue(storageTypes, storageValues)
+ m.BackupSchedule = types.StringValue(resp.GetBackupSchedule())
+ m.Edition = types.StringValue(string(resp.GetEdition()))
+ m.Encryption = handleEncryption(m, resp)
+ m.FlavorId = types.StringValue(resp.GetFlavorId())
+ m.Id = types.StringValue(resp.GetId())
+ m.InstanceId = types.StringValue(resp.GetId())
+ m.IsDeletable = types.BoolValue(resp.GetIsDeletable())
+ m.Name = types.StringValue(resp.GetName())
+ netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl())
+ tfDiags.Append(diags...)
if diags.HasError() {
- return fmt.Errorf("creating storage: %w", core.DiagsToError(diags))
+ return fmt.Errorf(
+ "error converting network acl response value",
+ )
}
-
- var encryptionValues map[string]attr.Value
- if instance.Encryption == nil {
- encryptionValues = map[string]attr.Value{
- "keyring_id": encryption.KeyRingId,
- "key_id": encryption.KeyId,
- "key_version": encryption.KeyVersion,
- "service_account": encryption.ServiceAccount,
- }
- } else {
- encryptionValues = map[string]attr.Value{
- "keyring_id": types.StringValue(*instance.Encryption.KekKeyRingId),
- "key_id": types.StringValue(*instance.Encryption.KekKeyId),
- "key_version": types.StringValue(*instance.Encryption.KekKeyVersion),
- "service_account": types.StringValue(*instance.Encryption.ServiceAccount),
- }
- }
- encryptionObject, diags := types.ObjectValue(encryptionTypes, encryptionValues)
+ net, diags := sqlserverflexResGen.NewNetworkValue(
+ sqlserverflexResGen.NetworkValue{}.AttributeTypes(ctx),
+ map[string]attr.Value{
+ "access_scope": types.StringValue(string(resp.Network.GetAccessScope())),
+ "acl": netAcl,
+ "instance_address": types.StringValue(resp.Network.GetInstanceAddress()),
+ "router_address": types.StringValue(resp.Network.GetRouterAddress()),
+ },
+ )
+ tfDiags.Append(diags...)
if diags.HasError() {
- return fmt.Errorf("creating encryption: %w", core.DiagsToError(diags))
+ return fmt.Errorf(
+ "error converting network response value",
+ "access_scope",
+ types.StringValue(string(resp.Network.GetAccessScope())),
+ "acl",
+ netAcl,
+ "instance_address",
+ types.StringValue(resp.Network.GetInstanceAddress()),
+ "router_address",
+ types.StringValue(resp.Network.GetRouterAddress()),
+ )
}
+ m.Network = net
+ m.Replicas = types.Int64Value(int64(resp.GetReplicas()))
+ m.RetentionDays = types.Int64Value(resp.GetRetentionDays())
+ m.Status = types.StringValue(string(resp.GetStatus()))
- var networkValues map[string]attr.Value
- if instance.Network == nil {
- networkValues = map[string]attr.Value{
- "acl": network.ACL,
- "access_scope": network.AccessScope,
- "instance_address": network.InstanceAddress,
- "router_address": network.RouterAddress,
- }
- } else {
- aclList, diags := types.ListValueFrom(ctx, types.StringType, *instance.Network.Acl)
- if diags.HasError() {
- return fmt.Errorf("creating network (acl list): %w", core.DiagsToError(diags))
- }
-
- var routerAddress string
- if instance.Network.RouterAddress != nil {
- routerAddress = *instance.Network.RouterAddress
- diags.AddWarning("field missing while mapping fields", "router_address was empty in API response")
- }
- if instance.Network.InstanceAddress == nil {
- return fmt.Errorf("creating network: no instance address returned")
- }
- networkValues = map[string]attr.Value{
- "acl": aclList,
- "access_scope": types.StringValue(string(*instance.Network.AccessScope)),
- "instance_address": types.StringValue(*instance.Network.InstanceAddress),
- "router_address": types.StringValue(routerAddress),
- }
- }
- networkObject, diags := types.ObjectValue(networkTypes, networkValues)
+ stor, diags := sqlserverflexResGen.NewStorageValue(
+ sqlserverflexResGen.StorageValue{}.AttributeTypes(ctx),
+ map[string]attr.Value{
+ "class": types.StringValue(resp.Storage.GetClass()),
+ "size": types.Int64Value(resp.Storage.GetSize()),
+ },
+ )
+ tfDiags.Append(diags...)
if diags.HasError() {
- return fmt.Errorf("creating network: %w", core.DiagsToError(diags))
+ return fmt.Errorf("error converting storage response value")
}
+ m.Storage = stor
- simplifiedModelBackupSchedule := utils.SimplifyBackupSchedule(model.BackupSchedule.ValueString())
- // If the value returned by the API is different from the one in the model after simplification,
- // we update the model so that it causes an error in Terraform
- if simplifiedModelBackupSchedule != types.StringPointerValue(instance.BackupSchedule).ValueString() {
- model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule)
- }
-
- if instance.Replicas == nil {
- return fmt.Errorf("instance has no replicas set")
- }
-
- if instance.RetentionDays == nil {
- return fmt.Errorf("instance has no retention days set")
- }
-
- if instance.Version == nil {
- return fmt.Errorf("instance has no version set")
- }
-
- if instance.Edition == nil {
- return fmt.Errorf("instance has no edition set")
- }
-
- if instance.Status == nil {
- return fmt.Errorf("instance has no status set")
- }
-
- if instance.IsDeletable == nil {
- return fmt.Errorf("instance has no IsDeletable set")
- }
-
- model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, instanceId)
- model.InstanceId = types.StringValue(instanceId)
- model.Name = types.StringPointerValue(instance.Name)
- model.FlavorId = types.StringPointerValue(instance.FlavorId)
- model.Replicas = types.Int64Value(int64(*instance.Replicas))
- model.Storage = storageObject
- model.Version = types.StringValue(string(*instance.Version))
- model.Edition = types.StringValue(string(*instance.Edition))
- model.Region = types.StringValue(region)
- model.Encryption = encryptionObject
- model.Network = networkObject
- model.RetentionDays = types.Int64Value(*instance.RetentionDays)
- model.Status = types.StringValue(string(*instance.Status))
- model.IsDeletable = types.BoolValue(*instance.IsDeletable)
+ m.Version = types.StringValue(string(resp.GetVersion()))
return nil
}
+func handleEncryption(
+ m *sqlserverflexResGen.InstanceModel,
+ resp *sqlserverflex.GetInstanceResponse,
+) sqlserverflexResGen.EncryptionValue {
+ if !resp.HasEncryption() ||
+ resp.Encryption == nil ||
+ resp.Encryption.KekKeyId == nil ||
+ resp.Encryption.KekKeyRingId == nil ||
+ resp.Encryption.KekKeyVersion == nil ||
+ resp.Encryption.ServiceAccount == nil {
+
+ if m.Encryption.IsNull() || m.Encryption.IsUnknown() {
+ return sqlserverflexResGen.NewEncryptionValueNull()
+ }
+ return m.Encryption
+ }
+
+ enc := sqlserverflexResGen.NewEncryptionValueNull()
+ if kVal, ok := resp.Encryption.GetKekKeyIdOk(); ok {
+ enc.KekKeyId = types.StringValue(kVal)
+ }
+ if kkVal, ok := resp.Encryption.GetKekKeyRingIdOk(); ok {
+ enc.KekKeyRingId = types.StringValue(kkVal)
+ }
+ if kkvVal, ok := resp.Encryption.GetKekKeyVersionOk(); ok {
+ enc.KekKeyVersion = types.StringValue(kkvVal)
+ }
+ if sa, ok := resp.Encryption.GetServiceAccountOk(); ok {
+ enc.ServiceAccount = types.StringValue(sa)
+ }
+ return enc
+}
+
+//func mapFields(
+// ctx context.Context,
+// resp *sqlserverflex.GetInstanceResponse,
+// model *Model,
+// storage *storageModel,
+// encryption *encryptionModel,
+// network *networkModel,
+// region string,
+//) error {
+// if resp == nil {
+// return fmt.Errorf("response input is nil")
+// }
+// if model == nil {
+// return fmt.Errorf("model input is nil")
+// }
+// instance := resp
+//
+// var instanceId string
+// if model.InstanceId.ValueString() != "" {
+// instanceId = model.InstanceId.ValueString()
+// } else if instance.Id != nil {
+// instanceId = *instance.Id
+// } else {
+// return fmt.Errorf("instance id not present")
+// }
+//
+// var storageValues map[string]attr.Value
+// if instance.Storage == nil {
+// storageValues = map[string]attr.Value{
+// "class": storage.Class,
+// "size": storage.Size,
+// }
+// } else {
+// storageValues = map[string]attr.Value{
+// "class": types.StringValue(*instance.Storage.Class),
+// "size": types.Int64PointerValue(instance.Storage.Size),
+// }
+// }
+// storageObject, diags := types.ObjectValue(storageTypes, storageValues)
+// if diags.HasError() {
+// return fmt.Errorf("creating storage: %w", core.DiagsToError(diags))
+// }
+//
+// var encryptionValues map[string]attr.Value
+// if instance.Encryption == nil {
+// encryptionValues = map[string]attr.Value{
+// "keyring_id": encryption.KeyRingId,
+// "key_id": encryption.KeyId,
+// "key_version": encryption.KeyVersion,
+// "service_account": encryption.ServiceAccount,
+// }
+// } else {
+// encryptionValues = map[string]attr.Value{
+// "keyring_id": types.StringValue(*instance.Encryption.KekKeyRingId),
+// "key_id": types.StringValue(*instance.Encryption.KekKeyId),
+// "key_version": types.StringValue(*instance.Encryption.KekKeyVersion),
+// "service_account": types.StringValue(*instance.Encryption.ServiceAccount),
+// }
+// }
+// encryptionObject, diags := types.ObjectValue(encryptionTypes, encryptionValues)
+// if diags.HasError() {
+// return fmt.Errorf("creating encryption: %w", core.DiagsToError(diags))
+// }
+//
+// var networkValues map[string]attr.Value
+// if instance.Network == nil {
+// networkValues = map[string]attr.Value{
+// "acl": network.ACL,
+// "access_scope": network.AccessScope,
+// "instance_address": network.InstanceAddress,
+// "router_address": network.RouterAddress,
+// }
+// } else {
+// aclList, diags := types.ListValueFrom(ctx, types.StringType, *instance.Network.Acl)
+// if diags.HasError() {
+// return fmt.Errorf("creating network (acl list): %w", core.DiagsToError(diags))
+// }
+//
+// var routerAddress string
+// if instance.Network.RouterAddress != nil {
+// routerAddress = *instance.Network.RouterAddress
+// diags.AddWarning("field missing while mapping fields", "router_address was empty in API response")
+// }
+// if instance.Network.InstanceAddress == nil {
+// return fmt.Errorf("creating network: no instance address returned")
+// }
+// networkValues = map[string]attr.Value{
+// "acl": aclList,
+// "access_scope": types.StringValue(string(*instance.Network.AccessScope)),
+// "instance_address": types.StringValue(*instance.Network.InstanceAddress),
+// "router_address": types.StringValue(routerAddress),
+// }
+// }
+// networkObject, diags := types.ObjectValue(networkTypes, networkValues)
+// if diags.HasError() {
+// return fmt.Errorf("creating network: %w", core.DiagsToError(diags))
+// }
+//
+// simplifiedModelBackupSchedule := utils.SimplifyBackupSchedule(model.BackupSchedule.ValueString())
+// // If the value returned by the API is different from the one in the model after simplification,
+// // we update the model so that it causes an error in Terraform
+// if simplifiedModelBackupSchedule != types.StringPointerValue(instance.BackupSchedule).ValueString() {
+// model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule)
+// }
+//
+// if instance.Replicas == nil {
+// return fmt.Errorf("instance has no replicas set")
+// }
+//
+// if instance.RetentionDays == nil {
+// return fmt.Errorf("instance has no retention days set")
+// }
+//
+// if instance.Version == nil {
+// return fmt.Errorf("instance has no version set")
+// }
+//
+// if instance.Edition == nil {
+// return fmt.Errorf("instance has no edition set")
+// }
+//
+// if instance.Status == nil {
+// return fmt.Errorf("instance has no status set")
+// }
+//
+// if instance.IsDeletable == nil {
+// return fmt.Errorf("instance has no IsDeletable set")
+// }
+//
+// model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, instanceId)
+// model.InstanceId = types.StringValue(instanceId)
+// model.Name = types.StringPointerValue(instance.Name)
+// model.FlavorId = types.StringPointerValue(instance.FlavorId)
+// model.Replicas = types.Int64Value(int64(*instance.Replicas))
+// model.Storage = storageObject
+// model.Version = types.StringValue(string(*instance.Version))
+// model.Edition = types.StringValue(string(*instance.Edition))
+// model.Region = types.StringValue(region)
+// model.Encryption = encryptionObject
+// model.Network = networkObject
+// model.RetentionDays = types.Int64Value(*instance.RetentionDays)
+// model.Status = types.StringValue(string(*instance.Status))
+// model.IsDeletable = types.BoolValue(*instance.IsDeletable)
+// return nil
+//}
+
func toCreatePayload(
- model *Model,
- storage *storageModel,
- encryption *encryptionModel,
- network *networkModel,
+ ctx context.Context,
+ model *sqlserverflexResGen.InstanceModel,
) (*sqlserverflex.CreateInstanceRequestPayload, error) {
if model == nil {
return nil, fmt.Errorf("nil model")
}
storagePayload := &sqlserverflex.CreateInstanceRequestPayloadGetStorageArgType{}
- if storage != nil {
- storagePayload.Class = conversion.StringValueToPointer(storage.Class)
- storagePayload.Size = conversion.Int64ValueToPointer(storage.Size)
+ if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
+ storagePayload.Class = model.Storage.Class.ValueStringPointer()
+ storagePayload.Size = model.Storage.Size.ValueInt64Pointer()
}
- var encryptionPayload *sqlserverflex.CreateInstanceRequestPayloadGetEncryptionArgType
- if encryption != nil &&
- !encryption.KeyId.IsNull() && !encryption.KeyId.IsUnknown() &&
- !encryption.KeyRingId.IsNull() && !encryption.KeyRingId.IsUnknown() &&
- !encryption.KeyVersion.IsNull() && !encryption.KeyVersion.IsUnknown() &&
- !encryption.ServiceAccount.IsNull() && !encryption.ServiceAccount.IsUnknown() {
+ var encryptionPayload *sqlserverflex.CreateInstanceRequestPayloadGetEncryptionArgType = nil
+ if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() &&
+ !model.Encryption.KekKeyId.IsNull() && model.Encryption.KekKeyId.IsUnknown() && model.Encryption.KekKeyId.ValueString() != "" &&
+ !model.Encryption.KekKeyRingId.IsNull() && !model.Encryption.KekKeyRingId.IsUnknown() && model.Encryption.KekKeyRingId.ValueString() != "" &&
+ !model.Encryption.KekKeyVersion.IsNull() && !model.Encryption.KekKeyVersion.IsUnknown() && model.Encryption.KekKeyVersion.ValueString() != "" &&
+ !model.Encryption.ServiceAccount.IsNull() && !model.Encryption.ServiceAccount.IsUnknown() && model.Encryption.ServiceAccount.ValueString() != "" {
encryptionPayload = &sqlserverflex.CreateInstanceRequestPayloadGetEncryptionArgType{
- KekKeyId: conversion.StringValueToPointer(encryption.KeyId),
- KekKeyRingId: conversion.StringValueToPointer(encryption.KeyVersion),
- KekKeyVersion: conversion.StringValueToPointer(encryption.KeyRingId),
- ServiceAccount: conversion.StringValueToPointer(encryption.ServiceAccount),
- }
- }
-
- var aclElements []string
- if network != nil && !network.ACL.IsNull() && !network.ACL.IsUnknown() {
- aclElements = make([]string, 0, len(network.ACL.Elements()))
- diags := network.ACL.ElementsAs(context.TODO(), &aclElements, false)
- if diags.HasError() {
- return nil, fmt.Errorf("creating network: %w", core.DiagsToError(diags))
+ KekKeyId: model.Encryption.KekKeyId.ValueStringPointer(),
+ KekKeyRingId: model.Encryption.KekKeyVersion.ValueStringPointer(),
+ KekKeyVersion: model.Encryption.KekKeyRingId.ValueStringPointer(),
+ ServiceAccount: model.Encryption.ServiceAccount.ValueStringPointer(),
}
}
networkPayload := &sqlserverflex.CreateInstanceRequestPayloadGetNetworkArgType{}
- if network != nil {
- networkPayload.AccessScope = sqlserverflex.CreateInstanceRequestPayloadNetworkGetAccessScopeAttributeType(conversion.StringValueToPointer(network.AccessScope))
- networkPayload.Acl = &aclElements
+ if !model.Network.IsNull() && !model.Network.IsUnknown() {
+ networkPayload.AccessScope = sqlserverflex.CreateInstanceRequestPayloadNetworkGetAccessScopeAttributeType(
+ model.Network.AccessScope.ValueStringPointer(),
+ )
+
+ var resList []string
+ diags := model.Network.Acl.ElementsAs(ctx, &resList, false)
+ if diags.HasError() {
+ return nil, fmt.Errorf("error converting network acl list")
+ }
+ networkPayload.Acl = &resList
}
return &sqlserverflex.CreateInstanceRequestPayload{
@@ -216,66 +313,80 @@ func toCreatePayload(
}, nil
}
-//nolint:unused // TODO: remove if not needed later
-func toUpdatePartiallyPayload(
- model *Model,
- storage *storageModel,
- network *networkModel,
-) (*sqlserverflex.UpdateInstancePartiallyRequestPayload, error) {
- if model == nil {
- return nil, fmt.Errorf("nil model")
- }
-
- storagePayload := &sqlserverflex.UpdateInstanceRequestPayloadGetStorageArgType{}
- if storage != nil {
- storagePayload.Size = conversion.Int64ValueToPointer(storage.Size)
- }
-
- var aclElements []string
- if network != nil && !network.ACL.IsNull() && !network.ACL.IsUnknown() {
- aclElements = make([]string, 0, len(network.ACL.Elements()))
- diags := network.ACL.ElementsAs(context.TODO(), &aclElements, false)
- if diags.HasError() {
- return nil, fmt.Errorf("creating network: %w", core.DiagsToError(diags))
- }
- }
-
- networkPayload := &sqlserverflex.UpdateInstancePartiallyRequestPayloadGetNetworkArgType{}
- if network != nil {
- networkPayload.AccessScope = sqlserverflex.UpdateInstancePartiallyRequestPayloadNetworkGetAccessScopeAttributeType(conversion.StringValueToPointer(network.AccessScope))
- networkPayload.Acl = &aclElements
- }
-
- if model.Replicas.ValueInt64() > math.MaxInt32 {
- return nil, fmt.Errorf("replica count too big: %d", model.Replicas.ValueInt64())
- }
- replCount := int32(model.Replicas.ValueInt64()) // nolint:gosec // check is performed above
- return &sqlserverflex.UpdateInstancePartiallyRequestPayload{
- BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
- FlavorId: conversion.StringValueToPointer(model.FlavorId),
- Name: conversion.StringValueToPointer(model.Name),
- Network: networkPayload,
- Replicas: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetReplicasAttributeType(&replCount),
- RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays),
- Storage: storagePayload,
- Version: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetVersionAttributeType(conversion.StringValueToPointer(model.Version)),
- }, nil
-}
+////nolint:unused // TODO: remove if not needed later
+//func toUpdatePartiallyPayload(
+// model *Model,
+// storage *storageModel,
+// network *networkModel,
+//) (*sqlserverflex.UpdateInstancePartiallyRequestPayload, error) {
+// if model == nil {
+// return nil, fmt.Errorf("nil model")
+// }
+//
+// storagePayload := &sqlserverflex.UpdateInstanceRequestPayloadGetStorageArgType{}
+// if storage != nil {
+// storagePayload.Size = conversion.Int64ValueToPointer(storage.Size)
+// }
+//
+// var aclElements []string
+// if network != nil && !network.ACL.IsNull() && !network.ACL.IsUnknown() {
+// aclElements = make([]string, 0, len(network.ACL.Elements()))
+// diags := network.ACL.ElementsAs(context.TODO(), &aclElements, false)
+// if diags.HasError() {
+// return nil, fmt.Errorf("creating network: %w", core.DiagsToError(diags))
+// }
+// }
+//
+// networkPayload := &sqlserverflex.UpdateInstancePartiallyRequestPayloadGetNetworkArgType{}
+// if network != nil {
+// networkPayload.AccessScope = sqlserverflex.UpdateInstancePartiallyRequestPayloadNetworkGetAccessScopeAttributeType(conversion.StringValueToPointer(network.AccessScope))
+// networkPayload.Acl = &aclElements
+// }
+//
+// if model.Replicas.ValueInt64() > math.MaxInt32 {
+// return nil, fmt.Errorf("replica count too big: %d", model.Replicas.ValueInt64())
+// }
+// replCount := int32(model.Replicas.ValueInt64()) // nolint:gosec // check is performed above
+// return &sqlserverflex.UpdateInstancePartiallyRequestPayload{
+// BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
+// FlavorId: conversion.StringValueToPointer(model.FlavorId),
+// Name: conversion.StringValueToPointer(model.Name),
+// Network: networkPayload,
+// Replicas: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetReplicasAttributeType(&replCount),
+// RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays),
+// Storage: storagePayload,
+// Version: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetVersionAttributeType(conversion.StringValueToPointer(model.Version)),
+// }, nil
+//}
// TODO: check func with his args
func toUpdatePayload(
- _ *Model,
- _ *storageModel,
- _ *networkModel,
+ ctx context.Context,
+ m *sqlserverflexResGen.InstanceModel,
+ resp *resource.UpdateResponse,
) (*sqlserverflex.UpdateInstanceRequestPayload, error) {
+ if m.Replicas.ValueInt64() > math.MaxUint32 {
+ return nil, fmt.Errorf("replicas value is too big for uint32")
+ }
+ replVal := sqlserverflex.Replicas(uint32(m.Replicas.ValueInt64()))
+
+ var netAcl []string
+ diags := m.Network.Acl.ElementsAs(ctx, &netAcl, false)
+ resp.Diagnostics.Append(diags...)
+ if diags.HasError() {
+ return nil, fmt.Errorf("error converting model network acl value")
+ }
return &sqlserverflex.UpdateInstanceRequestPayload{
- BackupSchedule: nil,
- FlavorId: nil,
- Name: nil,
- Network: nil,
- Replicas: nil,
- RetentionDays: nil,
- Storage: nil,
- Version: nil,
+ BackupSchedule: m.BackupSchedule.ValueStringPointer(),
+ FlavorId: m.FlavorId.ValueStringPointer(),
+ Name: m.Name.ValueStringPointer(),
+ Network: &sqlserverflex.CreateInstanceRequestPayloadNetwork{
+ AccessScope: sqlserverflex.CreateInstanceRequestPayloadNetworkGetAccessScopeAttributeType(m.Network.AccessScope.ValueStringPointer()),
+ Acl: &netAcl,
+ },
+ Replicas: &replVal,
+ RetentionDays: m.RetentionDays.ValueInt64Pointer(),
+ Storage: &sqlserverflex.StorageUpdate{Size: m.Storage.Size.ValueInt64Pointer()},
+ Version: sqlserverflex.UpdateInstanceRequestPayloadGetVersionAttributeType(m.Version.ValueStringPointer()),
}, nil
}
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml b/stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml
index 2e72ba16..71d4cbe4 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml
+++ b/stackit/internal/services/sqlserverflexalpha/instance/planModifiers.yaml
@@ -21,7 +21,6 @@ fields:
- name: 'name'
modifiers:
- 'UseStateForUnknown'
- - 'RequiresReplace'
- name: 'backup_schedule'
modifiers:
@@ -31,24 +30,28 @@ fields:
validators:
- validate.NoSeparator
modifiers:
+ - 'UseStateForUnknown'
- 'RequiresReplace'
- name: 'encryption.kek_key_version'
validators:
- validate.NoSeparator
modifiers:
+ - 'UseStateForUnknown'
- 'RequiresReplace'
- name: 'encryption.kek_key_ring_id'
validators:
- validate.NoSeparator
modifiers:
+ - 'UseStateForUnknown'
- 'RequiresReplace'
- name: 'encryption.service_account'
validators:
- validate.NoSeparator
modifiers:
+ - 'UseStateForUnknown'
- 'RequiresReplace'
- name: 'network.access_scope'
@@ -76,6 +79,7 @@ fields:
- name: 'region'
modifiers:
+ - 'UseStateForUnknown'
- 'RequiresReplace'
- name: 'retention_days'
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/resource.go b/stackit/internal/services/sqlserverflexalpha/instance/resource.go
index 092805f3..66436cac 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/resource.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/resource.go
@@ -4,35 +4,25 @@ package sqlserverflex
import (
"context"
+ _ "embed"
"fmt"
"net/http"
- "regexp"
"strings"
"time"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/identityschema"
+ postgresflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils"
+ sqlserverflexalpha2 "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen"
sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils"
- "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
- "github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/path"
- "github.com/hashicorp/terraform-plugin-framework/schema/validator"
- "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-log/tflog"
+ "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils"
- "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/validate"
-
- "github.com/hashicorp/terraform-plugin-framework/resource"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
- "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
- "github.com/hashicorp/terraform-plugin-framework/types"
- "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
wait "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/wait/sqlserverflexalpha"
"github.com/stackitcloud/stackit-sdk-go/core/oapierror"
@@ -44,6 +34,7 @@ var (
_ resource.ResourceWithConfigure = &instanceResource{}
_ resource.ResourceWithImportState = &instanceResource{}
_ resource.ResourceWithModifyPlan = &instanceResource{}
+ _ resource.ResourceWithIdentity = &instanceResource{}
)
//nolint:unused // TODO: remove if not needed later
@@ -52,63 +43,10 @@ var validNodeTypes []string = []string{
"Replica",
}
-type Model struct {
- Id types.String `tfsdk:"id"` // needed by TF
- InstanceId types.String `tfsdk:"instance_id"`
- ProjectId types.String `tfsdk:"project_id"`
- Name types.String `tfsdk:"name"`
- BackupSchedule types.String `tfsdk:"backup_schedule"`
- FlavorId types.String `tfsdk:"flavor_id"`
- Encryption types.Object `tfsdk:"encryption"`
- IsDeletable types.Bool `tfsdk:"is_deletable"`
- Storage types.Object `tfsdk:"storage"`
- Status types.String `tfsdk:"status"`
- Version types.String `tfsdk:"version"`
- Replicas types.Int64 `tfsdk:"replicas"`
- Region types.String `tfsdk:"region"`
- Network types.Object `tfsdk:"network"`
- Edition types.String `tfsdk:"edition"`
- RetentionDays types.Int64 `tfsdk:"retention_days"`
-}
-
-type encryptionModel struct {
- KeyRingId types.String `tfsdk:"keyring_id"`
- KeyId types.String `tfsdk:"key_id"`
- KeyVersion types.String `tfsdk:"key_version"`
- ServiceAccount types.String `tfsdk:"service_account"`
-}
-
-var encryptionTypes = map[string]attr.Type{
- "keyring_id": basetypes.StringType{},
- "key_id": basetypes.StringType{},
- "key_version": basetypes.StringType{},
- "service_account": basetypes.StringType{},
-}
-
-type networkModel struct {
- ACL types.List `tfsdk:"acl"`
- AccessScope types.String `tfsdk:"access_scope"`
- InstanceAddress types.String `tfsdk:"instance_address"`
- RouterAddress types.String `tfsdk:"router_address"`
-}
-
-var networkTypes = map[string]attr.Type{
- "acl": basetypes.ListType{ElemType: types.StringType},
- "access_scope": basetypes.StringType{},
- "instance_address": basetypes.StringType{},
- "router_address": basetypes.StringType{},
-}
-
-// Struct corresponding to Model.Storage
-type storageModel struct {
- Class types.String `tfsdk:"class"`
- Size types.Int64 `tfsdk:"size"`
-}
-
-// Types corresponding to storageModel
-var storageTypes = map[string]attr.Type{
- "class": basetypes.StringType{},
- "size": basetypes.Int64Type{},
+type InstanceResourceIdentityModel struct {
+ ProjectID types.String `tfsdk:"project_id"`
+ Region types.String `tfsdk:"region"`
+ InstanceID types.String `tfsdk:"instance_id"`
}
// NewInstanceResource is a helper function to simplify the provider implementation.
@@ -154,17 +92,21 @@ func (r *instanceResource) ModifyPlan(
req resource.ModifyPlanRequest,
resp *resource.ModifyPlanResponse,
) { // nolint:gocritic // function signature required by Terraform
- var configModel Model
+
// skip initial empty configuration to avoid follow-up errors
if req.Config.Raw.IsNull() {
return
}
+ var configModel sqlserverflexalpha2.InstanceModel
resp.Diagnostics.Append(req.Config.Get(ctx, &configModel)...)
if resp.Diagnostics.HasError() {
return
}
- var planModel Model
+ if req.Plan.Raw.IsNull() {
+ return
+ }
+ var planModel sqlserverflexalpha2.InstanceModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
if resp.Diagnostics.HasError() {
return
@@ -175,257 +117,303 @@ func (r *instanceResource) ModifyPlan(
return
}
+ var identityModel InstanceResourceIdentityModel
+ identityModel.ProjectID = planModel.ProjectId
+ identityModel.Region = planModel.Region
+ if !planModel.InstanceId.IsNull() && !planModel.InstanceId.IsUnknown() {
+ identityModel.InstanceID = planModel.InstanceId
+ }
+
+ resp.Diagnostics.Append(resp.Identity.Set(ctx, identityModel)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
resp.Diagnostics.Append(resp.Plan.Set(ctx, planModel)...)
if resp.Diagnostics.HasError() {
return
}
}
+//go:embed planModifiers.yaml
+var modifiersFileByte []byte
+
// Schema defines the schema for the resource.
-func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
- descriptions := map[string]string{
- "main": "SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.",
- "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".",
- "instance_id": "ID of the SQLServer Flex instance.",
- "project_id": "STACKIT project ID to which the instance is associated.",
- "name": "Instance name.",
- "access_scope": "The access scope of the instance. (SNA | PUBLIC)",
- "flavor_id": "The flavor ID of the instance.",
- "acl": "The Access Control List (ACL) for the SQLServer Flex instance.",
- "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`,
- "region": "The resource region. If not defined, the provider region is used.",
- "encryption": "The encryption block.",
- "replicas": "The number of replicas of the SQLServer Flex instance.",
- "network": "The network block.",
- "keyring_id": "STACKIT KMS - KeyRing ID of the encryption key to use.",
- "key_id": "STACKIT KMS - Key ID of the encryption key to use.",
- "key_version": "STACKIT KMS - Key version to use in the encryption key.",
- "service:account": "STACKIT KMS - service account to use in the encryption key.",
- "instance_address": "The returned instance IP address of the SQLServer Flex instance.",
- "router_address": "The returned router IP address of the SQLServer Flex instance.",
+func (r *instanceResource) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ //descriptions := map[string]string{
+ // "main": "SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.",
+ // "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".",
+ // "instance_id": "ID of the SQLServer Flex instance.",
+ // "project_id": "STACKIT project ID to which the instance is associated.",
+ // "name": "Instance name.",
+ // "access_scope": "The access scope of the instance. (SNA | PUBLIC)",
+ // "flavor_id": "The flavor ID of the instance.",
+ // "acl": "The Access Control List (ACL) for the SQLServer Flex instance.",
+ // "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`,
+ // "region": "The resource region. If not defined, the provider region is used.",
+ // "encryption": "The encryption block.",
+ // "replicas": "The number of replicas of the SQLServer Flex instance.",
+ // "network": "The network block.",
+ // "keyring_id": "STACKIT KMS - KeyRing ID of the encryption key to use.",
+ // "key_id": "STACKIT KMS - Key ID of the encryption key to use.",
+ // "key_version": "STACKIT KMS - Key version to use in the encryption key.",
+ // "service:account": "STACKIT KMS - service account to use in the encryption key.",
+ // "instance_address": "The returned instance IP address of the SQLServer Flex instance.",
+ // "router_address": "The returned router IP address of the SQLServer Flex instance.",
+ //}
+
+ schema := sqlserverflexalpha2.InstanceResourceSchema(ctx)
+
+ fields, err := postgresflexUtils.ReadModifiersConfig(modifiersFileByte)
+ if err != nil {
+ resp.Diagnostics.AddError("error during read modifiers config file", err.Error())
+ return
}
- resp.Schema = schema.Schema{
- Description: descriptions["main"],
- Attributes: map[string]schema.Attribute{
- "id": schema.StringAttribute{
- Description: descriptions["id"],
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
+ err = postgresflexUtils.AddPlanModifiersToResourceSchema(fields, &schema)
+ if err != nil {
+ resp.Diagnostics.AddError("error adding plan modifiers", err.Error())
+ return
+ }
+ resp.Schema = schema
+
+ //resp.Schema = schema.Schema{
+ // Description: descriptions["main"],
+ // Attributes: map[string]schema.Attribute{
+ // "id": schema.StringAttribute{
+ // Description: descriptions["id"],
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "instance_id": schema.StringAttribute{
+ // Description: descriptions["instance_id"],
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // Validators: []validator.String{
+ // validate.UUID(),
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "project_id": schema.StringAttribute{
+ // Description: descriptions["project_id"],
+ // Required: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // },
+ // Validators: []validator.String{
+ // validate.UUID(),
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "name": schema.StringAttribute{
+ // Description: descriptions["name"],
+ // Required: true,
+ // Validators: []validator.String{
+ // stringvalidator.LengthAtLeast(1),
+ // stringvalidator.RegexMatches(
+ // regexp.MustCompile("^[a-z]([-a-z0-9]*[a-z0-9])?$"),
+ // "must start with a letter, must have lower case letters, numbers or hyphens, and no hyphen at the end",
+ // ),
+ // },
+ // },
+ // "backup_schedule": schema.StringAttribute{
+ // Description: descriptions["backup_schedule"],
+ // Optional: true,
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "is_deletable": schema.BoolAttribute{
+ // Description: descriptions["is_deletable"],
+ // Optional: true,
+ // Computed: true,
+ // PlanModifiers: []planmodifier.Bool{
+ // boolplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "flavor_id": schema.StringAttribute{
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // Required: true,
+ // },
+ // "replicas": schema.Int64Attribute{
+ // Computed: true,
+ // PlanModifiers: []planmodifier.Int64{
+ // int64planmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "storage": schema.SingleNestedAttribute{
+ // Optional: true,
+ // Computed: true,
+ // PlanModifiers: []planmodifier.Object{
+ // objectplanmodifier.UseStateForUnknown(),
+ // },
+ // Attributes: map[string]schema.Attribute{
+ // "class": schema.StringAttribute{
+ // Optional: true,
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "size": schema.Int64Attribute{
+ // Optional: true,
+ // Computed: true,
+ // PlanModifiers: []planmodifier.Int64{
+ // int64planmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // },
+ // },
+ // "version": schema.StringAttribute{
+ // Optional: true,
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "edition": schema.StringAttribute{
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "retention_days": schema.Int64Attribute{
+ // Optional: true,
+ // Computed: true,
+ // PlanModifiers: []planmodifier.Int64{
+ // int64planmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "region": schema.StringAttribute{
+ // Optional: true,
+ // // must be computed to allow for storing the override value from the provider
+ // Computed: true,
+ // Description: descriptions["region"],
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // },
+ // },
+ // "status": schema.StringAttribute{
+ // Optional: true,
+ // // must be computed to allow for storing the override value from the provider
+ // Computed: true,
+ // Description: descriptions["status"],
+ // },
+ // "encryption": schema.SingleNestedAttribute{
+ // Optional: true,
+ // PlanModifiers: []planmodifier.Object{
+ // objectplanmodifier.RequiresReplace(),
+ // objectplanmodifier.UseStateForUnknown(),
+ // },
+ // Attributes: map[string]schema.Attribute{
+ // "key_id": schema.StringAttribute{
+ // Description: descriptions["key_id"],
+ // Required: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // },
+ // Validators: []validator.String{
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "key_version": schema.StringAttribute{
+ // Description: descriptions["key_version"],
+ // Required: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // },
+ // Validators: []validator.String{
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "keyring_id": schema.StringAttribute{
+ // Description: descriptions["keyring_id"],
+ // Required: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // },
+ // Validators: []validator.String{
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "service_account": schema.StringAttribute{
+ // Description: descriptions["service_account"],
+ // Required: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // },
+ // Validators: []validator.String{
+ // validate.NoSeparator(),
+ // },
+ // },
+ // },
+ // Description: descriptions["encryption"],
+ // },
+ // "network": schema.SingleNestedAttribute{
+ // Required: true,
+ // Attributes: map[string]schema.Attribute{
+ // "access_scope": schema.StringAttribute{
+ // Description: descriptions["access_scope"],
+ // Required: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.RequiresReplace(),
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // Validators: []validator.String{
+ // validate.NoSeparator(),
+ // },
+ // },
+ // "acl": schema.ListAttribute{
+ // Description: descriptions["acl"],
+ // ElementType: types.StringType,
+ // Required: true,
+ // PlanModifiers: []planmodifier.List{
+ // listplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "instance_address": schema.StringAttribute{
+ // Description: descriptions["instance_address"],
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // "router_address": schema.StringAttribute{
+ // Description: descriptions["router_address"],
+ // Computed: true,
+ // PlanModifiers: []planmodifier.String{
+ // stringplanmodifier.UseStateForUnknown(),
+ // },
+ // },
+ // },
+ // Description: descriptions["network"],
+ // },
+ // },
+ //}
+}
+
+func (r *instanceResource) IdentitySchema(_ context.Context, _ resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) {
+ resp.IdentitySchema = identityschema.Schema{
+ Attributes: map[string]identityschema.Attribute{
+ "project_id": identityschema.StringAttribute{
+ RequiredForImport: true, // must be set during import by the practitioner
},
- "instance_id": schema.StringAttribute{
- Description: descriptions["instance_id"],
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- Validators: []validator.String{
- validate.UUID(),
- validate.NoSeparator(),
- },
+ "region": identityschema.StringAttribute{
+ RequiredForImport: true, // can be defaulted by the provider configuration
},
- "project_id": schema.StringAttribute{
- Description: descriptions["project_id"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Validators: []validator.String{
- validate.UUID(),
- validate.NoSeparator(),
- },
- },
- "name": schema.StringAttribute{
- Description: descriptions["name"],
- Required: true,
- Validators: []validator.String{
- stringvalidator.LengthAtLeast(1),
- stringvalidator.RegexMatches(
- regexp.MustCompile("^[a-z]([-a-z0-9]*[a-z0-9])?$"),
- "must start with a letter, must have lower case letters, numbers or hyphens, and no hyphen at the end",
- ),
- },
- },
- "backup_schedule": schema.StringAttribute{
- Description: descriptions["backup_schedule"],
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- "is_deletable": schema.BoolAttribute{
- Description: descriptions["is_deletable"],
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.Bool{
- boolplanmodifier.UseStateForUnknown(),
- },
- },
- "flavor_id": schema.StringAttribute{
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- stringplanmodifier.UseStateForUnknown(),
- },
- Required: true,
- },
- "replicas": schema.Int64Attribute{
- Computed: true,
- PlanModifiers: []planmodifier.Int64{
- int64planmodifier.UseStateForUnknown(),
- },
- },
- "storage": schema.SingleNestedAttribute{
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.Object{
- objectplanmodifier.UseStateForUnknown(),
- },
- Attributes: map[string]schema.Attribute{
- "class": schema.StringAttribute{
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- "size": schema.Int64Attribute{
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.Int64{
- int64planmodifier.UseStateForUnknown(),
- },
- },
- },
- },
- "version": schema.StringAttribute{
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- "edition": schema.StringAttribute{
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- "retention_days": schema.Int64Attribute{
- Optional: true,
- Computed: true,
- PlanModifiers: []planmodifier.Int64{
- int64planmodifier.UseStateForUnknown(),
- },
- },
- "region": schema.StringAttribute{
- Optional: true,
- // must be computed to allow for storing the override value from the provider
- Computed: true,
- Description: descriptions["region"],
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- },
- "status": schema.StringAttribute{
- Optional: true,
- // must be computed to allow for storing the override value from the provider
- Computed: true,
- Description: descriptions["status"],
- },
- "encryption": schema.SingleNestedAttribute{
- Optional: true,
- PlanModifiers: []planmodifier.Object{
- objectplanmodifier.RequiresReplace(),
- objectplanmodifier.UseStateForUnknown(),
- },
- Attributes: map[string]schema.Attribute{
- "key_id": schema.StringAttribute{
- Description: descriptions["key_id"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Validators: []validator.String{
- validate.NoSeparator(),
- },
- },
- "key_version": schema.StringAttribute{
- Description: descriptions["key_version"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Validators: []validator.String{
- validate.NoSeparator(),
- },
- },
- "keyring_id": schema.StringAttribute{
- Description: descriptions["keyring_id"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Validators: []validator.String{
- validate.NoSeparator(),
- },
- },
- "service_account": schema.StringAttribute{
- Description: descriptions["service_account"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- },
- Validators: []validator.String{
- validate.NoSeparator(),
- },
- },
- },
- Description: descriptions["encryption"],
- },
- "network": schema.SingleNestedAttribute{
- Required: true,
- Attributes: map[string]schema.Attribute{
- "access_scope": schema.StringAttribute{
- Description: descriptions["access_scope"],
- Required: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.RequiresReplace(),
- stringplanmodifier.UseStateForUnknown(),
- },
- Validators: []validator.String{
- validate.NoSeparator(),
- },
- },
- "acl": schema.ListAttribute{
- Description: descriptions["acl"],
- ElementType: types.StringType,
- Required: true,
- PlanModifiers: []planmodifier.List{
- listplanmodifier.UseStateForUnknown(),
- },
- },
- "instance_address": schema.StringAttribute{
- Description: descriptions["instance_address"],
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- "router_address": schema.StringAttribute{
- Description: descriptions["router_address"],
- Computed: true,
- PlanModifiers: []planmodifier.String{
- stringplanmodifier.UseStateForUnknown(),
- },
- },
- },
- Description: descriptions["network"],
+ "instance_id": identityschema.StringAttribute{
+ RequiredForImport: true, // can be defaulted by the provider configuration
},
},
}
@@ -437,49 +425,29 @@ func (r *instanceResource) Create(
req resource.CreateRequest,
resp *resource.CreateResponse,
) { // nolint:gocritic // function signature required by Terraform
- var model Model
+ var model sqlserverflexalpha2.InstanceModel
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
+ // Read identity data
+ var identityData InstanceResourceIdentityModel
+ resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
ctx = core.InitProviderContext(ctx)
- projectId := model.ProjectId.ValueString()
- region := model.Region.ValueString()
+ projectId := identityData.ProjectID.ValueString()
+ region := identityData.Region.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "region", region)
- var storage = &storageModel{}
- if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
- diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var encryption = &encryptionModel{}
- if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
- diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var network = &networkModel{}
- if !model.Network.IsNull() && !model.Network.IsUnknown() {
- diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
// Generate API request body from model
- payload, err := toCreatePayload(&model, storage, encryption, network)
+ payload, err := toCreatePayload(ctx, &model)
if err != nil {
core.LogAndAddError(
ctx,
@@ -503,6 +471,18 @@ func (r *instanceResource) Create(
ctx = core.LogResponse(ctx)
instanceId := *createResp.Id
+
+ // Set data returned by API in identity
+ identity := InstanceResourceIdentityModel{
+ ProjectID: types.StringValue(projectId),
+ Region: types.StringValue(region),
+ InstanceID: types.StringValue(instanceId),
+ }
+ resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
utils.SetAndLogStateFields(
ctx, &resp.Diagnostics, &resp.State, map[string]any{
"id": utils.BuildInternalTerraformId(projectId, region, instanceId),
@@ -521,7 +501,11 @@ func (r *instanceResource) Create(
projectId,
instanceId,
region,
- ).SetSleepBeforeWait(30 * time.Second).WaitWithContext(ctx)
+ ).SetSleepBeforeWait(
+ 30 * time.Second,
+ ).SetTimeout(
+ 90 * time.Minute,
+ ).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(
ctx,
@@ -543,7 +527,8 @@ func (r *instanceResource) Create(
}
// Map response body to schema
- err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
+ // err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
+ err = mapResponseToModel(ctx, waitResp, &model, resp.Diagnostics)
if err != nil {
core.LogAndAddError(
ctx,
@@ -560,11 +545,6 @@ func (r *instanceResource) Create(
return
}
- // After the instance creation, database might not be ready to accept connections immediately.
- // That is why we add a sleep
- // TODO - can get removed?
- time.Sleep(120 * time.Second)
-
tflog.Info(ctx, "SQLServer Flex instance created")
}
@@ -574,13 +554,20 @@ func (r *instanceResource) Read(
req resource.ReadRequest,
resp *resource.ReadResponse,
) { // nolint:gocritic // function signature required by Terraform
- var model Model
+ var model sqlserverflexalpha2.InstanceModel
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
+ // Read identity data
+ var identityData InstanceResourceIdentityModel
+ resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
ctx = core.InitProviderContext(ctx)
projectId := model.ProjectId.ValueString()
@@ -591,33 +578,6 @@ func (r *instanceResource) Read(
ctx = tflog.SetField(ctx, "instance_id", instanceId)
ctx = tflog.SetField(ctx, "region", region)
- var storage = &storageModel{}
- if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
- diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var encryption = &encryptionModel{}
- if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
- diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var network = &networkModel{}
- if !model.Network.IsNull() && !model.Network.IsUnknown() {
- diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
instanceResp, err := r.client.GetInstanceRequest(ctx, projectId, region, instanceId).Execute()
if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
@@ -632,7 +592,8 @@ func (r *instanceResource) Read(
ctx = core.LogResponse(ctx)
// Map response body to schema
- err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
+ // err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
+ err = mapResponseToModel(ctx, instanceResp, &model, resp.Diagnostics)
if err != nil {
core.LogAndAddError(
ctx,
@@ -648,6 +609,17 @@ func (r *instanceResource) Read(
if resp.Diagnostics.HasError() {
return
}
+
+ // Set data returned by API in identity
+ identity := InstanceResourceIdentityModel{
+ ProjectID: types.StringValue(projectId),
+ Region: types.StringValue(region),
+ InstanceID: types.StringValue(instanceId),
+ }
+ resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
tflog.Info(ctx, "SQLServer Flex instance read")
}
@@ -658,7 +630,7 @@ func (r *instanceResource) Update(
resp *resource.UpdateResponse,
) { // nolint:gocritic // function signature required by Terraform
// Retrieve values from plan
- var model Model
+ var model sqlserverflexalpha2.InstanceModel
diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
@@ -675,35 +647,8 @@ func (r *instanceResource) Update(
ctx = tflog.SetField(ctx, "instance_id", instanceId)
ctx = tflog.SetField(ctx, "region", region)
- var storage = &storageModel{}
- if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
- diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var encryption = &encryptionModel{}
- if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
- diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
- var network = &networkModel{}
- if !model.Network.IsNull() && !model.Network.IsUnknown() {
- diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
- resp.Diagnostics.Append(diags...)
- if resp.Diagnostics.HasError() {
- return
- }
- }
-
// Generate API request body from model
- payload, err := toUpdatePayload(&model, storage, network)
+ payload, err := toUpdatePayload(ctx, &model, resp)
if err != nil {
core.LogAndAddError(
ctx,
@@ -739,7 +684,8 @@ func (r *instanceResource) Update(
}
// Map response body to schema
- err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
+ err = mapResponseToModel(ctx, waitResp, &model, resp.Diagnostics)
+ // err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
if err != nil {
core.LogAndAddError(
ctx,
@@ -764,7 +710,7 @@ func (r *instanceResource) Delete(
resp *resource.DeleteResponse,
) { // nolint:gocritic // function signature required by Terraform
// Retrieve values from state
- var model Model
+ var model sqlserverflexalpha2.InstanceModel
diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/resource_msh_test.go.bak b/stackit/internal/services/sqlserverflexalpha/instance/resource_msh_test.go.bak
deleted file mode 100644
index 7a968fe5..00000000
--- a/stackit/internal/services/sqlserverflexalpha/instance/resource_msh_test.go.bak
+++ /dev/null
@@ -1,280 +0,0 @@
-package sqlserverflex
-
-import (
- "context"
- "reflect"
- "testing"
-
- "github.com/hashicorp/terraform-plugin-framework/resource"
- "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core"
-
- sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
-)
-
-func TestNewInstanceResource(t *testing.T) {
- tests := []struct {
- name string
- want resource.Resource
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := NewInstanceResource(); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("NewInstanceResource() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func Test_instanceResource_Configure(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- ctx context.Context
- req resource.ConfigureRequest
- resp *resource.ConfigureResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.Configure(tt.args.ctx, tt.args.req, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_Create(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- ctx context.Context
- req resource.CreateRequest
- resp *resource.CreateResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.Create(tt.args.ctx, tt.args.req, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_Delete(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- ctx context.Context
- req resource.DeleteRequest
- resp *resource.DeleteResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.Delete(tt.args.ctx, tt.args.req, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_ImportState(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- ctx context.Context
- req resource.ImportStateRequest
- resp *resource.ImportStateResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.ImportState(tt.args.ctx, tt.args.req, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_Metadata(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- in0 context.Context
- req resource.MetadataRequest
- resp *resource.MetadataResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.Metadata(tt.args.in0, tt.args.req, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_ModifyPlan(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- ctx context.Context
- req resource.ModifyPlanRequest
- resp *resource.ModifyPlanResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.ModifyPlan(tt.args.ctx, tt.args.req, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_Read(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- ctx context.Context
- req resource.ReadRequest
- resp *resource.ReadResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.Read(tt.args.ctx, tt.args.req, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_Schema(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- in0 context.Context
- in1 resource.SchemaRequest
- resp *resource.SchemaResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.Schema(tt.args.in0, tt.args.in1, tt.args.resp)
- })
- }
-}
-
-func Test_instanceResource_Update(t *testing.T) {
- type fields struct {
- client *sqlserverflex.APIClient
- providerData core.ProviderData
- }
- type args struct {
- ctx context.Context
- req resource.UpdateRequest
- resp *resource.UpdateResponse
- }
- tests := []struct {
- name string
- fields fields
- args args
- }{
- // TODO: Add test cases.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- r := &instanceResource{
- client: tt.fields.client,
- providerData: tt.fields.providerData,
- }
- r.Update(tt.args.ctx, tt.args.req, tt.args.resp)
- })
- }
-}
diff --git a/stackit/internal/services/sqlserverflexalpha/instance/resources_gen/instance_resource_gen.go b/stackit/internal/services/sqlserverflexalpha/instance/resources_gen/instance_resource_gen.go
index 58cbf8d1..671c7fd3 100644
--- a/stackit/internal/services/sqlserverflexalpha/instance/resources_gen/instance_resource_gen.go
+++ b/stackit/internal/services/sqlserverflexalpha/instance/resources_gen/instance_resource_gen.go
@@ -26,6 +26,11 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
Description: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
MarkdownDescription: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
},
+ "edition": schema.StringAttribute{
+ Computed: true,
+ Description: "Edition of the MSSQL server instance",
+ MarkdownDescription: "Edition of the MSSQL server instance",
+ },
"encryption": schema.SingleNestedAttribute{
Attributes: map[string]schema.Attribute{
"kek_key_id": schema.StringAttribute{
@@ -73,6 +78,11 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
Description: "The ID of the instance.",
MarkdownDescription: "The ID of the instance.",
},
+ "is_deletable": schema.BoolAttribute{
+ Computed: true,
+ Description: "Whether the instance can be deleted or not.",
+ MarkdownDescription: "Whether the instance can be deleted or not.",
+ },
"name": schema.StringAttribute{
Required: true,
Description: "The name of the instance.",
@@ -99,6 +109,12 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
Description: "List of IPV4 cidr.",
MarkdownDescription: "List of IPV4 cidr.",
},
+ "instance_address": schema.StringAttribute{
+ Computed: true,
+ },
+ "router_address": schema.StringAttribute{
+ Computed: true,
+ },
},
CustomType: NetworkType{
ObjectType: types.ObjectType{
@@ -126,11 +142,19 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
),
},
},
+ "replicas": schema.Int64Attribute{
+ Computed: true,
+ Description: "How many replicas the instance should have.",
+ MarkdownDescription: "How many replicas the instance should have.",
+ },
"retention_days": schema.Int64Attribute{
Required: true,
Description: "The days for how long the backup files should be stored before cleaned up. 30 to 365",
MarkdownDescription: "The days for how long the backup files should be stored before cleaned up. 30 to 365",
},
+ "status": schema.StringAttribute{
+ Computed: true,
+ },
"storage": schema.SingleNestedAttribute{
Attributes: map[string]schema.Attribute{
"class": schema.StringAttribute{
@@ -169,15 +193,19 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
type InstanceModel struct {
BackupSchedule types.String `tfsdk:"backup_schedule"`
+ Edition types.String `tfsdk:"edition"`
Encryption EncryptionValue `tfsdk:"encryption"`
FlavorId types.String `tfsdk:"flavor_id"`
Id types.String `tfsdk:"id"`
InstanceId types.String `tfsdk:"instance_id"`
+ IsDeletable types.Bool `tfsdk:"is_deletable"`
Name types.String `tfsdk:"name"`
Network NetworkValue `tfsdk:"network"`
ProjectId types.String `tfsdk:"project_id"`
Region types.String `tfsdk:"region"`
+ Replicas types.Int64 `tfsdk:"replicas"`
RetentionDays types.Int64 `tfsdk:"retention_days"`
+ Status types.String `tfsdk:"status"`
Storage StorageValue `tfsdk:"storage"`
Version types.String `tfsdk:"version"`
}
@@ -732,14 +760,52 @@ func (t NetworkType) ValueFromObject(ctx context.Context, in basetypes.ObjectVal
fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute))
}
+ instanceAddressAttribute, ok := attributes["instance_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `instance_address is missing from object`)
+
+ return nil, diags
+ }
+
+ instanceAddressVal, ok := instanceAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`instance_address expected to be basetypes.StringValue, was: %T`, instanceAddressAttribute))
+ }
+
+ routerAddressAttribute, ok := attributes["router_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `router_address is missing from object`)
+
+ return nil, diags
+ }
+
+ routerAddressVal, ok := routerAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`router_address expected to be basetypes.StringValue, was: %T`, routerAddressAttribute))
+ }
+
if diags.HasError() {
return nil, diags
}
return NetworkValue{
- AccessScope: accessScopeVal,
- Acl: aclVal,
- state: attr.ValueStateKnown,
+ AccessScope: accessScopeVal,
+ Acl: aclVal,
+ InstanceAddress: instanceAddressVal,
+ RouterAddress: routerAddressVal,
+ state: attr.ValueStateKnown,
}, diags
}
@@ -842,14 +908,52 @@ func NewNetworkValue(attributeTypes map[string]attr.Type, attributes map[string]
fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute))
}
+ instanceAddressAttribute, ok := attributes["instance_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `instance_address is missing from object`)
+
+ return NewNetworkValueUnknown(), diags
+ }
+
+ instanceAddressVal, ok := instanceAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`instance_address expected to be basetypes.StringValue, was: %T`, instanceAddressAttribute))
+ }
+
+ routerAddressAttribute, ok := attributes["router_address"]
+
+ if !ok {
+ diags.AddError(
+ "Attribute Missing",
+ `router_address is missing from object`)
+
+ return NewNetworkValueUnknown(), diags
+ }
+
+ routerAddressVal, ok := routerAddressAttribute.(basetypes.StringValue)
+
+ if !ok {
+ diags.AddError(
+ "Attribute Wrong Type",
+ fmt.Sprintf(`router_address expected to be basetypes.StringValue, was: %T`, routerAddressAttribute))
+ }
+
if diags.HasError() {
return NewNetworkValueUnknown(), diags
}
return NetworkValue{
- AccessScope: accessScopeVal,
- Acl: aclVal,
- state: attr.ValueStateKnown,
+ AccessScope: accessScopeVal,
+ Acl: aclVal,
+ InstanceAddress: instanceAddressVal,
+ RouterAddress: routerAddressVal,
+ state: attr.ValueStateKnown,
}, diags
}
@@ -921,13 +1025,15 @@ func (t NetworkType) ValueType(ctx context.Context) attr.Value {
var _ basetypes.ObjectValuable = NetworkValue{}
type NetworkValue struct {
- AccessScope basetypes.StringValue `tfsdk:"access_scope"`
- Acl basetypes.ListValue `tfsdk:"acl"`
- state attr.ValueState
+ AccessScope basetypes.StringValue `tfsdk:"access_scope"`
+ Acl basetypes.ListValue `tfsdk:"acl"`
+ InstanceAddress basetypes.StringValue `tfsdk:"instance_address"`
+ RouterAddress basetypes.StringValue `tfsdk:"router_address"`
+ state attr.ValueState
}
func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
- attrTypes := make(map[string]tftypes.Type, 2)
+ attrTypes := make(map[string]tftypes.Type, 4)
var val tftypes.Value
var err error
@@ -936,12 +1042,14 @@ func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, erro
attrTypes["acl"] = basetypes.ListType{
ElemType: types.StringType,
}.TerraformType(ctx)
+ attrTypes["instance_address"] = basetypes.StringType{}.TerraformType(ctx)
+ attrTypes["router_address"] = basetypes.StringType{}.TerraformType(ctx)
objectType := tftypes.Object{AttributeTypes: attrTypes}
switch v.state {
case attr.ValueStateKnown:
- vals := make(map[string]tftypes.Value, 2)
+ vals := make(map[string]tftypes.Value, 4)
val, err = v.AccessScope.ToTerraformValue(ctx)
@@ -959,6 +1067,22 @@ func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, erro
vals["acl"] = val
+ val, err = v.InstanceAddress.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["instance_address"] = val
+
+ val, err = v.RouterAddress.ToTerraformValue(ctx)
+
+ if err != nil {
+ return tftypes.NewValue(objectType, tftypes.UnknownValue), err
+ }
+
+ vals["router_address"] = val
+
if err := tftypes.ValidateValue(objectType, vals); err != nil {
return tftypes.NewValue(objectType, tftypes.UnknownValue), err
}
@@ -1006,6 +1130,8 @@ func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue,
"acl": basetypes.ListType{
ElemType: types.StringType,
},
+ "instance_address": basetypes.StringType{},
+ "router_address": basetypes.StringType{},
}), diags
}
@@ -1014,6 +1140,8 @@ func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue,
"acl": basetypes.ListType{
ElemType: types.StringType,
},
+ "instance_address": basetypes.StringType{},
+ "router_address": basetypes.StringType{},
}
if v.IsNull() {
@@ -1027,8 +1155,10 @@ func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue,
objVal, diags := types.ObjectValue(
attributeTypes,
map[string]attr.Value{
- "access_scope": v.AccessScope,
- "acl": aclVal,
+ "access_scope": v.AccessScope,
+ "acl": aclVal,
+ "instance_address": v.InstanceAddress,
+ "router_address": v.RouterAddress,
})
return objVal, diags
@@ -1057,6 +1187,14 @@ func (v NetworkValue) Equal(o attr.Value) bool {
return false
}
+ if !v.InstanceAddress.Equal(other.InstanceAddress) {
+ return false
+ }
+
+ if !v.RouterAddress.Equal(other.RouterAddress) {
+ return false
+ }
+
return true
}
@@ -1074,6 +1212,8 @@ func (v NetworkValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
"acl": basetypes.ListType{
ElemType: types.StringType,
},
+ "instance_address": basetypes.StringType{},
+ "router_address": basetypes.StringType{},
}
}
diff --git a/stackit/internal/wait/sqlserverflexalpha/wait.go b/stackit/internal/wait/sqlserverflexalpha/wait.go
index 7484cbe9..21bb8f70 100644
--- a/stackit/internal/wait/sqlserverflexalpha/wait.go
+++ b/stackit/internal/wait/sqlserverflexalpha/wait.go
@@ -44,17 +44,24 @@ func CreateInstanceWaitHandler(ctx context.Context, a APIClientInstanceInterface
}
switch strings.ToLower(string(*s.Status)) {
case strings.ToLower(InstanceStateSuccess):
- if s.Network.InstanceAddress == nil {
- tflog.Info(ctx, "Waiting for instance_address")
- return false, nil, nil
- }
- if s.Network.RouterAddress == nil {
- tflog.Info(ctx, "Waiting for router_address")
- return false, nil, nil
+ if *s.Network.AccessScope == "SNA" {
+ if s.Network.InstanceAddress == nil {
+ tflog.Info(ctx, "Waiting for instance_address")
+ return false, nil, nil
+ }
+ if s.Network.RouterAddress == nil {
+ tflog.Info(ctx, "Waiting for router_address")
+ return false, nil, nil
+ }
}
return true, s, nil
case strings.ToLower(InstanceStateUnknown), strings.ToLower(InstanceStateFailed):
return true, s, fmt.Errorf("create failed for instance with id %s", instanceId)
+ case strings.ToLower(InstanceStatePending), strings.ToLower(InstanceStateProcessing):
+ tflog.Info(ctx, "request is being handled", map[string]interface{}{
+ "status": *s.Status,
+ })
+ return false, nil, nil
default:
tflog.Info(ctx, "Wait (create) received unknown status", map[string]interface{}{
"instanceId": instanceId,
@@ -63,8 +70,6 @@ func CreateInstanceWaitHandler(ctx context.Context, a APIClientInstanceInterface
return false, s, nil
}
})
- handler.SetTimeout(45 * time.Minute)
- handler.SetSleepBeforeWait(15 * time.Second)
return handler
}
@@ -83,6 +88,11 @@ func UpdateInstanceWaitHandler(ctx context.Context, a APIClientInstanceInterface
return true, s, nil
case strings.ToLower(InstanceStateUnknown), strings.ToLower(InstanceStateFailed):
return true, s, fmt.Errorf("update failed for instance with id %s", instanceId)
+ case strings.ToLower(InstanceStatePending), strings.ToLower(InstanceStateProcessing):
+ tflog.Info(ctx, "request is being handled", map[string]interface{}{
+ "status": *s.Status,
+ })
+ return false, nil, nil
default:
tflog.Info(ctx, "Wait (update) received unknown status", map[string]interface{}{
"instanceId": instanceId,