fix: builder fix

fix: sqlserver upgrade to generated files
This commit is contained in:
Marcel S. Henselin 2026-02-03 13:58:39 +01:00
parent 48d3dcb526
commit 762c39fbbd
19 changed files with 3931 additions and 1126 deletions

1
.gitignore vendored
View file

@ -45,3 +45,4 @@ dist
.secrets .secrets
pkg_gen pkg_gen
/release/

View file

@ -35,10 +35,15 @@ type version struct {
minor int minor int
} }
func Build() error { type Builder struct {
SkipClone bool
SkipCleanup bool
}
func (b *Builder) Build() error {
slog.Info("Starting Builder") slog.Info("Starting Builder")
slog.Info("Checking needed commands available") slog.Info(" ... Checking needed commands available")
err := checkCommands([]string{"tfplugingen-framework", "tfplugingen-openapi"}) err := checkCommands([]string{"tfplugingen-framework", "tfplugingen-openapi"})
if err != nil { if err != nil {
return err return err
@ -51,28 +56,33 @@ func Build() error {
if root == nil || *root == "" { if root == nil || *root == "" {
return fmt.Errorf("unable to determine root directory from git") return fmt.Errorf("unable to determine root directory from git")
} }
slog.Info("Using root directory", "dir", *root) slog.Info(" ... using root directory", "dir", *root)
slog.Info("Cleaning up old generator directory") if !b.SkipCleanup {
err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME)) slog.Info("Cleaning up old generator directory")
if err != nil { err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME))
return err if err != nil {
} return err
}
slog.Info("Cleaning up old packages directory") slog.Info("Cleaning up old packages directory")
err = os.RemoveAll(path.Join(*root, "pkg_gen")) err = os.RemoveAll(path.Join(*root, "pkg_gen"))
if err != nil { if err != nil {
return err return err
}
} }
slog.Info("Creating generator dir", "dir", fmt.Sprintf("%s/%s", *root, GEN_REPO_NAME)) slog.Info("Creating generator dir", "dir", fmt.Sprintf("%s/%s", *root, GEN_REPO_NAME))
genDir, err := createGeneratorDir(*root, GEN_REPO, GEN_REPO_NAME) genDir := path.Join(*root, GEN_REPO_NAME)
if err != nil { if !b.SkipClone {
return err err = createGeneratorDir(GEN_REPO, genDir)
if err != nil {
return err
}
} }
slog.Info("Creating oas dir", "dir", fmt.Sprintf("%s/%s", *root, OAS_REPO_NAME)) slog.Info("Creating oas dir", "dir", fmt.Sprintf("%s/%s", *root, OAS_REPO_NAME))
repoDir, err := createRepoDir(genDir, OAS_REPO, OAS_REPO_NAME) repoDir, err := createRepoDir(genDir, OAS_REPO, OAS_REPO_NAME, !b.SkipClone)
if err != nil { if err != nil {
return fmt.Errorf("%s", err.Error()) return fmt.Errorf("%s", err.Error())
} }
@ -118,10 +128,12 @@ func Build() error {
} }
} }
slog.Info("Cleaning up", "dir", repoDir) if !b.SkipCleanup {
err = os.RemoveAll(filepath.Dir(repoDir)) slog.Info("Cleaning up", "dir", repoDir)
if err != nil { err = os.RemoveAll(filepath.Dir(repoDir))
return fmt.Errorf("%s", err.Error()) if err != nil {
return fmt.Errorf("%s", err.Error())
}
} }
slog.Info("Changing dir", "dir", genDir) slog.Info("Changing dir", "dir", genDir)
@ -191,30 +203,16 @@ func Build() error {
if item.IsDir() { if item.IsDir() {
slog.Info(" -> package", "name", item.Name()) slog.Info(" -> package", "name", item.Name())
tgtDir := path.Join(*root, "pkg_gen", item.Name()) tgtDir := path.Join(*root, "pkg_gen", item.Name())
// no backup needed as we generate new if fileExists(tgtDir) {
//bakName := fmt.Sprintf("%s.%s", item.Name(), time.Now().Format("20060102-150405")) delErr := os.RemoveAll(tgtDir)
//if _, err = os.Stat(tgtDir); !os.IsNotExist(err) { if delErr != nil {
// err = os.Rename( return delErr
// tgtDir, }
// path.Join(*root, "pkg", bakName), }
// )
// if err != nil {
// return err
// }
//}
err = os.Rename(path.Join(srcDir, item.Name()), tgtDir) err = os.Rename(path.Join(srcDir, item.Name()), tgtDir)
if err != nil { if err != nil {
return err return err
} }
// wait is placed outside now
//if _, err = os.Stat(path.Join(*root, "pkg", bakName, "wait")); !os.IsNotExist(err) {
// slog.Info(" Copying wait subfolder")
// err = os.Rename(path.Join(*root, "pkg", bakName, "wait"), path.Join(tgtDir, "wait"))
// if err != nil {
// return err
// }
//}
} }
} }
@ -238,17 +236,19 @@ func Build() error {
return err return err
} }
slog.Info("Finally removing temporary files and directories") if !b.SkipCleanup {
//err = os.RemoveAll(path.Join(*root, "generated")) slog.Info("Finally removing temporary files and directories")
//if err != nil { err = os.RemoveAll(path.Join(*root, "generated"))
// slog.Error("RemoveAll", "dir", path.Join(*root, "generated"), "err", err) if err != nil {
// return err slog.Error("RemoveAll", "dir", path.Join(*root, "generated"), "err", err)
//} return err
}
err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME)) err = os.RemoveAll(path.Join(*root, GEN_REPO_NAME))
if err != nil { if err != nil {
slog.Error("RemoveAll", "dir", path.Join(*root, GEN_REPO_NAME), "err", err) slog.Error("RemoveAll", "dir", path.Join(*root, GEN_REPO_NAME), "err", err)
return err return err
}
} }
slog.Info("Done") slog.Info("Done")
@ -443,7 +443,7 @@ func generateServiceFiles(rootDir, generatorDir string) error {
fileName := matches[0][0] fileName := matches[0][0]
resource := matches[0][1] resource := matches[0][1]
slog.Info( slog.Info(
"Found service spec", " found service spec",
"name", "name",
specFile.Name(), specFile.Name(),
"service", "service",
@ -452,137 +452,147 @@ func generateServiceFiles(rootDir, generatorDir string) error {
resource, resource,
) )
//for _, part := range []string{"alpha", "beta"} {
oasFile := path.Join(generatorDir, "oas", fmt.Sprintf("%s%s.json", service.Name(), svcVersion.Name())) oasFile := path.Join(generatorDir, "oas", fmt.Sprintf("%s%s.json", service.Name(), svcVersion.Name()))
// slog.Info("oas file", "path", oasFile, "version", svcVersion.Name()) if _, oasErr := os.Stat(oasFile); os.IsNotExist(oasErr) {
if _, err = os.Stat(oasFile); !os.IsNotExist(err) { slog.Warn(" coulc not find matching oas", "svc", service.Name(), "version", svcVersion.Name())
slog.Info("found matching oas", "svc", service.Name(), "version", svcVersion.Name()) continue
scName := fmt.Sprintf("%s%s", service.Name(), svcVersion.Name()) }
scName = strings.ReplaceAll(scName, "-", "")
err = os.MkdirAll(path.Join(rootDir, "generated", "internal", "services", scName, resource), 0755)
if err != nil {
return err
}
// slog.Info("Generating openapi spec json") scName := fmt.Sprintf("%s%s", service.Name(), svcVersion.Name())
specJsonFile := path.Join(rootDir, "generated", "specs", fmt.Sprintf("%s_%s_spec.json", scName, resource)) scName = strings.ReplaceAll(scName, "-", "")
err = os.MkdirAll(path.Join(rootDir, "generated", "internal", "services", scName, resource), 0755)
if err != nil {
return err
}
var stdOut, stdErr bytes.Buffer // slog.Info("Generating openapi spec json")
specJsonFile := path.Join(rootDir, "generated", "specs", fmt.Sprintf("%s_%s_spec.json", scName, resource))
// noqa:gosec var stdOut, stdErr bytes.Buffer
cmd := exec.Command(
"tfplugingen-openapi", // noqa:gosec
"generate", cmd := exec.Command(
"--config", "tfplugingen-openapi",
path.Join(rootDir, "service_specs", service.Name(), svcVersion.Name(), fileName), "generate",
"--output", "--config",
specJsonFile, path.Join(rootDir, "service_specs", service.Name(), svcVersion.Name(), fileName),
oasFile, "--output",
specJsonFile,
oasFile,
)
cmd.Stdout = &stdOut
cmd.Stderr = &stdErr
if err = cmd.Start(); err != nil {
slog.Error(
"tfplugingen-openapi generate",
"error",
err,
"stdOut",
stdOut.String(),
"stdErr",
stdErr.String(),
) )
cmd.Stdout = &stdOut return err
cmd.Stderr = &stdErr }
if err = cmd.Start(); err != nil { if err = cmd.Wait(); err != nil {
slog.Error("tfplugingen-openapi generate", "error", err) var exitErr *exec.ExitError
return err if errors.As(err, &exitErr) {
slog.Error("tfplugingen-openapi generate", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return fmt.Errorf("%s", stdErr.String())
} }
if err = cmd.Wait(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
slog.Error("tfplugingen-openapi generate", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return fmt.Errorf("%s", stdErr.String())
}
if err != nil {
slog.Error("tfplugingen-openapi generate", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return err
}
}
// slog.Info("Creating terraform svc resource files folder")
tgtFolder := path.Join(rootDir, "generated", "internal", "services", scName, resource, "resources_gen")
err = os.MkdirAll(tgtFolder, 0755)
if err != nil { if err != nil {
slog.Error("tfplugingen-openapi generate", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return err
}
}
if stdOut.Len() > 0 {
slog.Warn(" command output", "stdout", stdOut.String(), "stderr", stdErr.String())
}
// slog.Info("Creating terraform svc resource files folder")
tgtFolder := path.Join(rootDir, "generated", "internal", "services", scName, resource, "resources_gen")
err = os.MkdirAll(tgtFolder, 0755)
if err != nil {
return err
}
// slog.Info("Generating terraform svc resource files")
// noqa:gosec
cmd2 := exec.Command(
"tfplugingen-framework",
"generate",
"resources",
"--input",
specJsonFile,
"--output",
tgtFolder,
"--package",
scName,
)
cmd2.Stdout = &stdOut
cmd2.Stderr = &stdErr
if err = cmd2.Start(); err != nil {
slog.Error("tfplugingen-framework generate resources", "error", err)
return err
}
if err = cmd2.Wait(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
slog.Error("tfplugingen-framework generate resources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return fmt.Errorf("%s", stdErr.String())
}
if err != nil {
slog.Error("tfplugingen-framework generate resources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return err
}
}
// slog.Info("Creating terraform svc datasource files folder")
tgtFolder = path.Join(rootDir, "generated", "internal", "services", scName, resource, "datasources_gen")
err = os.MkdirAll(tgtFolder, 0755)
if err != nil {
return err
}
// slog.Info("Generating terraform svc resource files")
// noqa:gosec
cmd3 := exec.Command(
"tfplugingen-framework",
"generate",
"data-sources",
"--input",
specJsonFile,
"--output",
tgtFolder,
"--package",
scName,
)
var stdOut3, stdErr3 bytes.Buffer
cmd3.Stdout = &stdOut3
cmd3.Stderr = &stdErr3
if err = cmd3.Start(); err != nil {
slog.Error("tfplugingen-framework generate data-sources", "error", err)
return err
}
if err = cmd3.Wait(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
slog.Error("tfplugingen-framework generate data-sources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return fmt.Errorf("%s", stdErr.String())
}
if err != nil {
slog.Error("tfplugingen-framework generate data-sources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return err return err
} }
// slog.Info("Generating terraform svc resource files")
// noqa:gosec
cmd2 := exec.Command(
"tfplugingen-framework",
"generate",
"resources",
"--input",
specJsonFile,
"--output",
tgtFolder,
"--package",
scName,
)
cmd2.Stdout = &stdOut
cmd2.Stderr = &stdErr
if err = cmd2.Start(); err != nil {
slog.Error("tfplugingen-framework generate resources", "error", err)
return err
}
if err = cmd2.Wait(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
slog.Error("tfplugingen-framework generate resources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return fmt.Errorf("%s", stdErr.String())
}
if err != nil {
slog.Error("tfplugingen-framework generate resources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return err
}
}
// slog.Info("Creating terraform svc datasource files folder")
tgtFolder = path.Join(rootDir, "generated", "internal", "services", scName, resource, "datasources_gen")
err = os.MkdirAll(tgtFolder, 0755)
if err != nil {
return err
}
// slog.Info("Generating terraform svc resource files")
// noqa:gosec
cmd3 := exec.Command(
"tfplugingen-framework",
"generate",
"data-sources",
"--input",
specJsonFile,
"--output",
tgtFolder,
"--package",
scName,
)
var stdOut3, stdErr3 bytes.Buffer
cmd3.Stdout = &stdOut3
cmd3.Stderr = &stdErr3
if err = cmd3.Start(); err != nil {
slog.Error("tfplugingen-framework generate data-sources", "error", err)
return err
}
if err = cmd3.Wait(); err != nil {
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
slog.Error("tfplugingen-framework generate data-sources", "code", exitErr.ExitCode(), "error", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return fmt.Errorf("%s", stdErr.String())
}
if err != nil {
slog.Error("tfplugingen-framework generate data-sources", "err", err, "stdout", stdOut.String(), "stderr", stdErr.String())
return err
}
}
} }
//}
} }
} }
} }
@ -595,7 +605,7 @@ func checkCommands(commands []string) error {
if !commandExists(commandName) { if !commandExists(commandName) {
return fmt.Errorf("missing command %s", commandName) return fmt.Errorf("missing command %s", commandName)
} }
slog.Info("found", "command", commandName) slog.Info(" found", "command", commandName)
} }
return nil return nil
} }
@ -728,32 +738,35 @@ func handleVersion(service string, match []string) (*string, *version, error) {
return &resStr, &version{verString: verString, major: majVer, minor: minVer}, nil return &resStr, &version{verString: verString, major: majVer, minor: minVer}, nil
} }
func createRepoDir(root, repoUrl, repoName string) (string, error) { func createRepoDir(root, repoUrl, repoName string, doClone bool) (string, error) {
oasTmpDir, err := os.MkdirTemp(root, "oas-tmp") targetDir := path.Join(root, repoName)
if err != nil { if doClone {
return "", err if fileExists(targetDir) {
} slog.Warn("target dir exists - skipping", "targetDir", targetDir)
targetDir := path.Join(oasTmpDir, repoName) return targetDir, nil
_, err = git.Clone( }
clone.Repository(repoUrl), _, err := git.Clone(
clone.Directory(targetDir), clone.Repository(repoUrl),
) clone.Directory(targetDir),
if err != nil { )
return "", err if err != nil {
return "", err
}
} }
return targetDir, nil return targetDir, nil
} }
func createGeneratorDir(root, repoUrl, repoName string) (string, error) { func createGeneratorDir(repoUrl, targetDir string) error {
targetDir := path.Join(root, repoName) if !fileExists(targetDir) {
_, err := git.Clone( _, cloneErr := git.Clone(
clone.Repository(repoUrl), clone.Repository(repoUrl),
clone.Directory(targetDir), clone.Directory(targetDir),
) )
if err != nil { if cloneErr != nil {
return "", err return cloneErr
}
} }
return targetDir, nil return nil
} }
func getRoot() (*string, error) { func getRoot() (*string, error) {

View file

@ -3,6 +3,7 @@ package {{.PackageName}}
import ( import (
"context" "context"
"github.com/hashicorp/terraform-plugin-framework/resource/identityschema"
"github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types"
@ -17,6 +18,7 @@ var (
_ resource.ResourceWithConfigure = &{{.NameCamel}}Resource{} _ resource.ResourceWithConfigure = &{{.NameCamel}}Resource{}
_ resource.ResourceWithImportState = &{{.NameCamel}}Resource{} _ resource.ResourceWithImportState = &{{.NameCamel}}Resource{}
_ resource.ResourceWithModifyPlan = &{{.NameCamel}}Resource{} _ resource.ResourceWithModifyPlan = &{{.NameCamel}}Resource{}
_ resource.ResourceWithIdentity = &{{.NameCamel}}Resource{}
) )
func New{{.NamePascal}}Resource() resource.Resource { func New{{.NamePascal}}Resource() resource.Resource {
@ -28,6 +30,13 @@ type {{.NameCamel}}Resource struct{
providerData core.ProviderData providerData core.ProviderData
} }
type InstanceResourceIdentityModel struct {
ProjectID types.String `tfsdk:"project_id"`
Region types.String `tfsdk:"region"`
InstanceID types.String `tfsdk:"instance_id"`
// TODO: implement further needed parts
}
func (r *{{.NameCamel}}Resource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { func (r *{{.NameCamel}}Resource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
resp.TypeName = req.ProviderTypeName + "_{{.PackageName}}_{{.NameSnake}}" resp.TypeName = req.ProviderTypeName + "_{{.PackageName}}_{{.NameSnake}}"
} }
@ -36,6 +45,23 @@ func (r *{{.NameCamel}}Resource) Schema(ctx context.Context, req resource.Schema
resp.Schema = {{.PackageName}}Gen.{{.NamePascal}}ResourceSchema(ctx) resp.Schema = {{.PackageName}}Gen.{{.NamePascal}}ResourceSchema(ctx)
} }
func (r *instanceResource) IdentitySchema(_ context.Context, _ resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) {
resp.IdentitySchema = identityschema.Schema{
Attributes: map[string]identityschema.Attribute{
"project_id": identityschema.StringAttribute{
RequiredForImport: true, // must be set during import by the practitioner
},
"region": identityschema.StringAttribute{
RequiredForImport: true, // can be defaulted by the provider configuration
},
"instance_id": identityschema.StringAttribute{
RequiredForImport: true, // can be defaulted by the provider configuration
},
},
}
}
// Configure adds the provider configured client to the resource. // Configure adds the provider configured client to the resource.
func (r *{{.NameCamel}}Resource) Configure( func (r *{{.NameCamel}}Resource) Configure(
ctx context.Context, ctx context.Context,
@ -81,6 +107,19 @@ func (r *{{.NameCamel}}Resource) Create(ctx context.Context, req resource.Create
// Example data value setting // Example data value setting
data.{{.NameCamel | ucfirst}}Id = types.StringValue("id-from-response") data.{{.NameCamel | ucfirst}}Id = types.StringValue("id-from-response")
// TODO: Set data returned by API in identity
identity := InstanceResourceIdentityModel{
ProjectID: types.StringValue(projectId),
Region: types.StringValue(region),
InstanceID: types.StringValue(instanceId),
}
resp.Diagnostics.Append(resp.Identity.Set(ctx, identity)...)
if resp.Diagnostics.HasError() {
return
}
// TODO: implement wait handler if needed
// Save data into Terraform state // Save data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
@ -93,6 +132,13 @@ func (r *{{.NameCamel}}Resource) Read(ctx context.Context, req resource.ReadRequ
// Read Terraform prior state data into the model // Read Terraform prior state data into the model
resp.Diagnostics.Append(req.State.Get(ctx, &data)...) resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
// Read identity data
var identityData InstanceResourceIdentityModel
resp.Diagnostics.Append(req.Identity.Get(ctx, &identityData)...)
if resp.Diagnostics.HasError() {
return
}
if resp.Diagnostics.HasError() { if resp.Diagnostics.HasError() {
return return
} }

View file

@ -5,13 +5,29 @@ import (
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/cmd/cmd/build" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/cmd/cmd/build"
) )
func NewBuildCmd() *cobra.Command { var (
return &cobra.Command{ skipCleanup bool
Use: "build", skipClone bool
Short: "Build the necessary boilerplate", )
Long: `...`,
RunE: func(cmd *cobra.Command, args []string) error { var buildCmd = &cobra.Command{
return build.Build() Use: "build",
}, Short: "Build the necessary boilerplate",
} Long: `...`,
RunE: func(cmd *cobra.Command, args []string) error {
b := build.Builder{
SkipClone: skipClone,
SkipCleanup: skipCleanup,
}
return b.Build()
},
}
func NewBuildCmd() *cobra.Command {
return buildCmd
}
func init() { // nolint: gochecknoinits
buildCmd.Flags().BoolVarP(&skipCleanup, "skip-clean", "c", false, "Skip cleanup steps")
buildCmd.Flags().BoolVarP(&skipClone, "skip-clone", "g", false, "Skip cloning from git")
} }

View file

@ -2,17 +2,30 @@ package main
import ( import (
"log" "log"
"log/slog"
"os" "os"
"github.com/MatusOllah/slogcolor"
cc "github.com/ivanpirog/coloredcobra"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/cmd/cmd" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/cmd/cmd"
) )
func main() { func main() {
slog.SetDefault(slog.New(slogcolor.NewHandler(os.Stderr, slogcolor.DefaultOptions)))
rootCmd := cmd.NewRootCmd() rootCmd := cmd.NewRootCmd()
//rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") //rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
//rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") //rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
//rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") //rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
cc.Init(&cc.Config{
RootCmd: rootCmd,
Headings: cc.HiCyan + cc.Bold + cc.Underline,
Commands: cc.HiYellow + cc.Bold,
Example: cc.Italic,
ExecName: cc.Bold,
Flags: cc.Bold,
})
rootCmd.SetOut(os.Stdout) rootCmd.SetOut(os.Stdout)
rootCmd.AddCommand( rootCmd.AddCommand(

View file

@ -30,11 +30,12 @@ data "stackitprivatepreview_postgresflexalpha_instance" "example" {
### Read-Only ### Read-Only
- `acl` (List of String) List of IPV4 cidr.
- `backup_schedule` (String) The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule. - `backup_schedule` (String) The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.
- `connection_info` (Attributes) The DNS name and port in the instance overview (see [below for nested schema](#nestedatt--connection_info)) - `connection_info` (Attributes) The DNS name and port in the instance overview (see [below for nested schema](#nestedatt--connection_info))
- `encryption` (Attributes) The configuration for instance's volume and backup storage encryption. - `encryption` (Attributes) The configuration for instance's volume and backup storage encryption.
**Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption)) **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption))
- `flavor_id` (String) The id of the instance flavor. - `flavor_id` (String) The id of the instance flavor.
- `id` (String) The ID of the instance. - `id` (String) The ID of the instance.
- `is_deletable` (Boolean) Whether the instance can be deleted or not. - `is_deletable` (Boolean) Whether the instance can be deleted or not.

View file

@ -3,12 +3,12 @@
page_title: "stackitprivatepreview_sqlserverflexalpha_instance Data Source - stackitprivatepreview" page_title: "stackitprivatepreview_sqlserverflexalpha_instance Data Source - stackitprivatepreview"
subcategory: "" subcategory: ""
description: |- description: |-
SQLServer Flex ALPHA instance resource schema. Must have a region specified in the provider configuration.
--- ---
# stackitprivatepreview_sqlserverflexalpha_instance (Data Source) # stackitprivatepreview_sqlserverflexalpha_instance (Data Source)
SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.
## Example Usage ## Example Usage
@ -24,61 +24,48 @@ data "stackitprivatepreview_sqlserverflexalpha_instance" "example" {
### Required ### Required
- `instance_id` (String) ID of the SQLServer Flex instance. - `instance_id` (String) The ID of the instance.
- `project_id` (String) STACKIT project ID to which the instance is associated. - `project_id` (String) The STACKIT project ID.
- `region` (String) The region which should be addressed
### Optional
- `region` (String) The resource region. If not defined, the provider region is used.
### Read-Only ### Read-Only
- `backup_schedule` (String) The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *") - `backup_schedule` (String) The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.
- `edition` (String) - `edition` (String) Edition of the MSSQL server instance
- `encryption` (Attributes) The encryption block. (see [below for nested schema](#nestedatt--encryption)) - `encryption` (Attributes) this defines which key to use for storage encryption (see [below for nested schema](#nestedatt--encryption))
- `flavor` (Attributes) (see [below for nested schema](#nestedatt--flavor)) - `flavor_id` (String) The id of the instance flavor.
- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`instance_id`". - `id` (String) The ID of the instance.
- `is_deletable` (Boolean) - `is_deletable` (Boolean) Whether the instance can be deleted or not.
- `name` (String) Instance name. - `name` (String) The name of the instance.
- `network` (Attributes) The network block. (see [below for nested schema](#nestedatt--network)) - `network` (Attributes) The access configuration of the instance (see [below for nested schema](#nestedatt--network))
- `replicas` (Number) - `replicas` (Number) How many replicas the instance should have.
- `retention_days` (Number) - `retention_days` (Number) The days for how long the backup files should be stored before cleaned up. 30 to 365
- `status` (String) - `status` (String)
- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage)) - `storage` (Attributes) The object containing information about the storage size and class. (see [below for nested schema](#nestedatt--storage))
- `version` (String) - `version` (String) The sqlserver version used for the instance.
<a id="nestedatt--encryption"></a> <a id="nestedatt--encryption"></a>
### Nested Schema for `encryption` ### Nested Schema for `encryption`
Read-Only: Read-Only:
- `key_id` (String) STACKIT KMS - Key ID of the encryption key to use. - `kek_key_id` (String) The key identifier
- `key_version` (String) STACKIT KMS - Key version to use in the encryption key. - `kek_key_ring_id` (String) The keyring identifier
- `keyring_id` (String) STACKIT KMS - KeyRing ID of the encryption key to use. - `kek_key_version` (String) The key version
- `service_account` (String) - `service_account` (String)
<a id="nestedatt--flavor"></a>
### Nested Schema for `flavor`
Read-Only:
- `cpu` (Number)
- `description` (String)
- `id` (String)
- `node_type` (String)
- `ram` (Number)
<a id="nestedatt--network"></a> <a id="nestedatt--network"></a>
### Nested Schema for `network` ### Nested Schema for `network`
Read-Only: Read-Only:
- `access_scope` (String) The access scope of the instance. (e.g. SNA) - `access_scope` (String) The network access scope of the instance
- `acl` (List of String) The Access Control List (ACL) for the SQLServer Flex instance.
- `instance_address` (String) The returned instance IP address of the SQLServer Flex instance. ⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected.
- `router_address` (String) The returned router IP address of the SQLServer Flex instance. - `acl` (List of String) List of IPV4 cidr.
- `instance_address` (String)
- `router_address` (String)
<a id="nestedatt--storage"></a> <a id="nestedatt--storage"></a>
@ -86,5 +73,5 @@ Read-Only:
Read-Only: Read-Only:
- `class` (String) - `class` (String) The storage class for the storage.
- `size` (Number) - `size` (Number) The storage size in Gigabytes.

View file

@ -55,13 +55,14 @@ import {
- `encryption` (Attributes) The configuration for instance's volume and backup storage encryption. - `encryption` (Attributes) The configuration for instance's volume and backup storage encryption.
**Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption)) **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected. (see [below for nested schema](#nestedatt--encryption))
- `instance_id` (String) The ID of the instance. - `instance_id` (String) The ID of the instance.
- `project_id` (String) The STACKIT project ID. - `project_id` (String) The STACKIT project ID.
- `region` (String) The region which should be addressed - `region` (String) The region which should be addressed
### Read-Only ### Read-Only
- `acl` (List of String) List of IPV4 cidr.
- `connection_info` (Attributes) The DNS name and port in the instance overview (see [below for nested schema](#nestedatt--connection_info)) - `connection_info` (Attributes) The DNS name and port in the instance overview (see [below for nested schema](#nestedatt--connection_info))
- `id` (String) The ID of the instance. - `id` (String) The ID of the instance.
- `is_deletable` (Boolean) Whether the instance can be deleted or not. - `is_deletable` (Boolean) Whether the instance can be deleted or not.
@ -77,6 +78,9 @@ Required:
Optional: Optional:
- `access_scope` (String) The access scope of the instance. It defines if the instance is public or airgapped. - `access_scope` (String) The access scope of the instance. It defines if the instance is public or airgapped.
Read-Only:
- `instance_address` (String) - `instance_address` (String)
- `router_address` (String) - `router_address` (String)

View file

@ -3,12 +3,12 @@
page_title: "stackitprivatepreview_sqlserverflexalpha_instance Resource - stackitprivatepreview" page_title: "stackitprivatepreview_sqlserverflexalpha_instance Resource - stackitprivatepreview"
subcategory: "" subcategory: ""
description: |- description: |-
SQLServer Flex ALPHA instance resource schema. Must have a region specified in the provider configuration.
--- ---
# stackitprivatepreview_sqlserverflexalpha_instance (Resource) # stackitprivatepreview_sqlserverflexalpha_instance (Resource)
SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.
## Example Usage ## Example Usage
@ -41,41 +41,55 @@ import {
### Required ### Required
- `flavor_id` (String) - `backup_schedule` (String) The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.
- `name` (String) Instance name. - `flavor_id` (String) The id of the instance flavor.
- `network` (Attributes) The network block. (see [below for nested schema](#nestedatt--network)) - `name` (String) The name of the instance.
- `project_id` (String) STACKIT project ID to which the instance is associated. - `network` (Attributes) the network configuration of the instance. (see [below for nested schema](#nestedatt--network))
- `retention_days` (Number) The days for how long the backup files should be stored before cleaned up. 30 to 365
- `storage` (Attributes) The object containing information about the storage size and class. (see [below for nested schema](#nestedatt--storage))
- `version` (String) The sqlserver version used for the instance.
### Optional ### Optional
- `backup_schedule` (String) The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *") - `encryption` (Attributes) this defines which key to use for storage encryption (see [below for nested schema](#nestedatt--encryption))
- `encryption` (Attributes) The encryption block. (see [below for nested schema](#nestedatt--encryption)) - `instance_id` (String) The ID of the instance.
- `is_deletable` (Boolean) - `project_id` (String) The STACKIT project ID.
- `region` (String) The resource region. If not defined, the provider region is used. - `region` (String) The region which should be addressed
- `retention_days` (Number)
- `status` (String)
- `storage` (Attributes) (see [below for nested schema](#nestedatt--storage))
- `version` (String)
### Read-Only ### Read-Only
- `edition` (String) - `edition` (String) Edition of the MSSQL server instance
- `id` (String) Terraform's internal resource ID. It is structured as "`project_id`,`region`,`instance_id`". - `id` (String) The ID of the instance.
- `instance_id` (String) ID of the SQLServer Flex instance. - `is_deletable` (Boolean) Whether the instance can be deleted or not.
- `replicas` (Number) - `replicas` (Number) How many replicas the instance should have.
- `status` (String)
<a id="nestedatt--network"></a> <a id="nestedatt--network"></a>
### Nested Schema for `network` ### Nested Schema for `network`
Required: Required:
- `access_scope` (String) The access scope of the instance. (SNA | PUBLIC) - `acl` (List of String) List of IPV4 cidr.
- `acl` (List of String) The Access Control List (ACL) for the SQLServer Flex instance.
Optional:
- `access_scope` (String) The network access scope of the instance
⚠️ **Note:** This feature is in private preview. Supplying this object is only permitted for enabled accounts. If your account does not have access, the request will be rejected.
Read-Only: Read-Only:
- `instance_address` (String) The returned instance IP address of the SQLServer Flex instance. - `instance_address` (String)
- `router_address` (String) The returned router IP address of the SQLServer Flex instance. - `router_address` (String)
<a id="nestedatt--storage"></a>
### Nested Schema for `storage`
Required:
- `class` (String) The storage class for the storage.
- `size` (Number) The storage size in Gigabytes.
<a id="nestedatt--encryption"></a> <a id="nestedatt--encryption"></a>
@ -83,16 +97,7 @@ Read-Only:
Required: Required:
- `key_id` (String) STACKIT KMS - Key ID of the encryption key to use. - `kek_key_id` (String) The key identifier
- `key_version` (String) STACKIT KMS - Key version to use in the encryption key. - `kek_key_ring_id` (String) The keyring identifier
- `keyring_id` (String) STACKIT KMS - KeyRing ID of the encryption key to use. - `kek_key_version` (String) The key version
- `service_account` (String) - `service_account` (String)
<a id="nestedatt--storage"></a>
### Nested Schema for `storage`
Optional:
- `class` (String)
- `size` (Number)

2
go.mod
View file

@ -3,6 +3,7 @@ module tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stac
go 1.25.6 go 1.25.6
require ( require (
github.com/MatusOllah/slogcolor v1.7.0
github.com/google/go-cmp v0.7.0 github.com/google/go-cmp v0.7.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/hashicorp/terraform-plugin-framework v1.17.0 github.com/hashicorp/terraform-plugin-framework v1.17.0
@ -11,6 +12,7 @@ require (
github.com/hashicorp/terraform-plugin-log v0.10.0 github.com/hashicorp/terraform-plugin-log v0.10.0
github.com/hashicorp/terraform-plugin-testing v1.14.0 github.com/hashicorp/terraform-plugin-testing v1.14.0
github.com/iancoleman/strcase v0.3.0 github.com/iancoleman/strcase v0.3.0
github.com/ivanpirog/coloredcobra v1.0.1
github.com/ldez/go-git-cmd-wrapper/v2 v2.9.1 github.com/ldez/go-git-cmd-wrapper/v2 v2.9.1
github.com/spf13/cobra v1.10.2 github.com/spf13/cobra v1.10.2
github.com/stackitcloud/stackit-sdk-go/core v0.21.0 github.com/stackitcloud/stackit-sdk-go/core v0.21.0

9
go.sum
View file

@ -1,5 +1,7 @@
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/MatusOllah/slogcolor v1.7.0 h1:Nrd7yBPv2EBEEBEwl7WEPRmMd1ozZzw2jm8SLMYDbKs=
github.com/MatusOllah/slogcolor v1.7.0/go.mod h1:5y1H50XuQIBvuYTJlmokWi+4FuPiJN5L7Z0jM4K4bYA=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
@ -13,6 +15,7 @@ github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/
github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
github.com/cloudflare/circl v1.6.2 h1:hL7VBpHHKzrV5WTfHCaBsgx/HGbBYlgrwvNXEVDYYsQ= github.com/cloudflare/circl v1.6.2 h1:hL7VBpHHKzrV5WTfHCaBsgx/HGbBYlgrwvNXEVDYYsQ=
github.com/cloudflare/circl v1.6.2/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cloudflare/circl v1.6.2/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
@ -106,8 +109,11 @@ github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/ivanpirog/coloredcobra v1.0.1 h1:aURSdEmlR90/tSiWS0dMjdwOvCVUeYLfltLfbgNxrN4=
github.com/ivanpirog/coloredcobra v1.0.1/go.mod h1:iho4nEKcnwZFiniGSdcgdvRgZNjxm+h20acv8vqmN6Q=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94=
@ -155,8 +161,10 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@ -268,5 +276,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -1,4 +1,3 @@
provider: provider:
name: stackitprivatepreview name: stackitprivatepreview
@ -18,6 +17,11 @@ resources:
method: DELETE method: DELETE
data_sources: data_sources:
instances:
read:
path: /v3alpha1/projects/{projectId}/regions/{region}/instances
method: GET
instance: instance:
read: read:
path: /v3alpha1/projects/{projectId}/regions/{region}/instances/{instanceId} path: /v3alpha1/projects/{projectId}/regions/{region}/instances/{instanceId}

View file

@ -7,21 +7,17 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"github.com/hashicorp/terraform-plugin-framework/types"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
sqlserverflexalpha "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/datasources_gen"
sqlserverflexalpha2 "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen"
sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils" sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils"
sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
"github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-log/tflog"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/validate"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
) )
// Ensure the implementation satisfies the expected interfaces. // Ensure the implementation satisfies the expected interfaces.
@ -62,165 +58,167 @@ func (r *instanceDataSource) Configure(ctx context.Context, req datasource.Confi
} }
// Schema defines the schema for the data source. // Schema defines the schema for the data source.
func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { func (r *instanceDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
descriptions := map[string]string{ //descriptions := map[string]string{
"main": "SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.", // "main": "SQLServer Flex ALPHA instance resource schema. Must have a `region` specified in the provider configuration.",
"id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".", // "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`region`,`instance_id`\".",
"instance_id": "ID of the SQLServer Flex instance.", // "instance_id": "ID of the SQLServer Flex instance.",
"project_id": "STACKIT project ID to which the instance is associated.", // "project_id": "STACKIT project ID to which the instance is associated.",
"name": "Instance name.", // "name": "Instance name.",
"access_scope": "The access scope of the instance. (e.g. SNA)", // "access_scope": "The access scope of the instance. (e.g. SNA)",
"acl": "The Access Control List (ACL) for the SQLServer Flex instance.", // "acl": "The Access Control List (ACL) for the SQLServer Flex instance.",
"backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`, // "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *")`,
"region": "The resource region. If not defined, the provider region is used.", // "region": "The resource region. If not defined, the provider region is used.",
"encryption": "The encryption block.", // "encryption": "The encryption block.",
"network": "The network block.", // "network": "The network block.",
"keyring_id": "STACKIT KMS - KeyRing ID of the encryption key to use.", // "keyring_id": "STACKIT KMS - KeyRing ID of the encryption key to use.",
"key_id": "STACKIT KMS - Key ID of the encryption key to use.", // "key_id": "STACKIT KMS - Key ID of the encryption key to use.",
"key_version": "STACKIT KMS - Key version to use in the encryption key.", // "key_version": "STACKIT KMS - Key version to use in the encryption key.",
"service:account": "STACKIT KMS - service account to use in the encryption key.", // "service:account": "STACKIT KMS - service account to use in the encryption key.",
"instance_address": "The returned instance IP address of the SQLServer Flex instance.", // "instance_address": "The returned instance IP address of the SQLServer Flex instance.",
"router_address": "The returned router IP address of the SQLServer Flex instance.", // "router_address": "The returned router IP address of the SQLServer Flex instance.",
} //}
resp.Schema = schema.Schema{ resp.Schema = sqlserverflexalpha.InstanceDataSourceSchema(ctx)
Description: descriptions["main"],
Attributes: map[string]schema.Attribute{ //resp.Schema = schema.Schema{
"id": schema.StringAttribute{ // Description: descriptions["main"],
Description: descriptions["id"], // Attributes: map[string]schema.Attribute{
Computed: true, // "id": schema.StringAttribute{
}, // Description: descriptions["id"],
"instance_id": schema.StringAttribute{ // Computed: true,
Description: descriptions["instance_id"], // },
Required: true, // "instance_id": schema.StringAttribute{
Validators: []validator.String{ // Description: descriptions["instance_id"],
validate.UUID(), // Required: true,
validate.NoSeparator(), // Validators: []validator.String{
}, // validate.UUID(),
}, // validate.NoSeparator(),
"project_id": schema.StringAttribute{ // },
Description: descriptions["project_id"], // },
Required: true, // "project_id": schema.StringAttribute{
Validators: []validator.String{ // Description: descriptions["project_id"],
validate.UUID(), // Required: true,
validate.NoSeparator(), // Validators: []validator.String{
}, // validate.UUID(),
}, // validate.NoSeparator(),
"name": schema.StringAttribute{ // },
Description: descriptions["name"], // },
Computed: true, // "name": schema.StringAttribute{
}, // Description: descriptions["name"],
"backup_schedule": schema.StringAttribute{ // Computed: true,
Description: descriptions["backup_schedule"], // },
Computed: true, // "backup_schedule": schema.StringAttribute{
}, // Description: descriptions["backup_schedule"],
"is_deletable": schema.BoolAttribute{ // Computed: true,
Description: descriptions["is_deletable"], // },
Computed: true, // "is_deletable": schema.BoolAttribute{
}, // Description: descriptions["is_deletable"],
"flavor": schema.SingleNestedAttribute{ // Computed: true,
Computed: true, // },
Attributes: map[string]schema.Attribute{ // "flavor": schema.SingleNestedAttribute{
"id": schema.StringAttribute{ // Computed: true,
Computed: true, // Attributes: map[string]schema.Attribute{
}, // "id": schema.StringAttribute{
"description": schema.StringAttribute{ // Computed: true,
Computed: true, // },
}, // "description": schema.StringAttribute{
"cpu": schema.Int64Attribute{ // Computed: true,
Computed: true, // },
}, // "cpu": schema.Int64Attribute{
"ram": schema.Int64Attribute{ // Computed: true,
Computed: true, // },
}, // "ram": schema.Int64Attribute{
"node_type": schema.StringAttribute{ // Computed: true,
Computed: true, // },
}, // "node_type": schema.StringAttribute{
}, // Computed: true,
}, // },
"replicas": schema.Int64Attribute{ // },
Computed: true, // },
}, // "replicas": schema.Int64Attribute{
"storage": schema.SingleNestedAttribute{ // Computed: true,
Computed: true, // },
Attributes: map[string]schema.Attribute{ // "storage": schema.SingleNestedAttribute{
"class": schema.StringAttribute{ // Computed: true,
Computed: true, // Attributes: map[string]schema.Attribute{
}, // "class": schema.StringAttribute{
"size": schema.Int64Attribute{ // Computed: true,
Computed: true, // },
}, // "size": schema.Int64Attribute{
}, // Computed: true,
}, // },
"version": schema.StringAttribute{ // },
Computed: true, // },
}, // "version": schema.StringAttribute{
"status": schema.StringAttribute{ // Computed: true,
Computed: true, // },
}, // "status": schema.StringAttribute{
"edition": schema.StringAttribute{ // Computed: true,
Computed: true, // },
}, // "edition": schema.StringAttribute{
"retention_days": schema.Int64Attribute{ // Computed: true,
Computed: true, // },
}, // "retention_days": schema.Int64Attribute{
"region": schema.StringAttribute{ // Computed: true,
// the region cannot be found, so it has to be passed // },
Optional: true, // "region": schema.StringAttribute{
Description: descriptions["region"], // // the region cannot be found, so it has to be passed
}, // Optional: true,
"encryption": schema.SingleNestedAttribute{ // Description: descriptions["region"],
Computed: true, // },
Attributes: map[string]schema.Attribute{ // "encryption": schema.SingleNestedAttribute{
"key_id": schema.StringAttribute{ // Computed: true,
Description: descriptions["key_id"], // Attributes: map[string]schema.Attribute{
Computed: true, // "key_id": schema.StringAttribute{
}, // Description: descriptions["key_id"],
"key_version": schema.StringAttribute{ // Computed: true,
Description: descriptions["key_version"], // },
Computed: true, // "key_version": schema.StringAttribute{
}, // Description: descriptions["key_version"],
"keyring_id": schema.StringAttribute{ // Computed: true,
Description: descriptions["keyring_id"], // },
Computed: true, // "keyring_id": schema.StringAttribute{
}, // Description: descriptions["keyring_id"],
"service_account": schema.StringAttribute{ // Computed: true,
Description: descriptions["service_account"], // },
Computed: true, // "service_account": schema.StringAttribute{
}, // Description: descriptions["service_account"],
}, // Computed: true,
Description: descriptions["encryption"], // },
}, // },
"network": schema.SingleNestedAttribute{ // Description: descriptions["encryption"],
Computed: true, // },
Attributes: map[string]schema.Attribute{ // "network": schema.SingleNestedAttribute{
"access_scope": schema.StringAttribute{ // Computed: true,
Description: descriptions["access_scope"], // Attributes: map[string]schema.Attribute{
Computed: true, // "access_scope": schema.StringAttribute{
}, // Description: descriptions["access_scope"],
"instance_address": schema.StringAttribute{ // Computed: true,
Description: descriptions["instance_address"], // },
Computed: true, // "instance_address": schema.StringAttribute{
}, // Description: descriptions["instance_address"],
"router_address": schema.StringAttribute{ // Computed: true,
Description: descriptions["router_address"], // },
Computed: true, // "router_address": schema.StringAttribute{
}, // Description: descriptions["router_address"],
"acl": schema.ListAttribute{ // Computed: true,
Description: descriptions["acl"], // },
ElementType: types.StringType, // "acl": schema.ListAttribute{
Computed: true, // Description: descriptions["acl"],
}, // ElementType: types.StringType,
}, // Computed: true,
Description: descriptions["network"], // },
}, // },
}, // Description: descriptions["network"],
} // },
// },
//}
} }
// Read refreshes the Terraform state with the latest data. // Read refreshes the Terraform state with the latest data.
func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var model Model var model sqlserverflexalpha2.InstanceModel
diags := req.Config.Get(ctx, &model) diags := req.Config.Get(ctx, &model)
resp.Diagnostics.Append(diags...) resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() { if resp.Diagnostics.HasError() {
@ -253,34 +251,35 @@ func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadReques
ctx = core.LogResponse(ctx) ctx = core.LogResponse(ctx)
var storage = &storageModel{} //var storage = &storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() { //if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) // diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...) // resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() { // if resp.Diagnostics.HasError() {
return // return
} // }
} //}
//
//var encryption = &encryptionModel{}
//if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
// diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
// resp.Diagnostics.Append(diags...)
// if resp.Diagnostics.HasError() {
// return
// }
//}
//
//var network = &networkModel{}
//if !model.Network.IsNull() && !model.Network.IsUnknown() {
// diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
// resp.Diagnostics.Append(diags...)
// if resp.Diagnostics.HasError() {
// return
// }
//}
var encryption = &encryptionModel{} err = mapDataSourceResponseToModel(ctx, instanceResp, &model, resp)
if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() { //err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
var network = &networkModel{}
if !model.Network.IsNull() && !model.Network.IsUnknown() {
diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err))
return return

View file

@ -6,202 +6,397 @@ import (
"math" "math"
"github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types"
sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core" sqlserverflexResGen "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/utils"
) )
func mapFields( func mapCreateResponseToModel(
ctx context.Context, ctx context.Context,
resp *sqlserverflex.GetInstanceResponse, resp *sqlserverflex.GetInstanceResponse,
model *Model, m *sqlserverflexResGen.InstanceModel,
storage *storageModel, tfResp *resource.CreateResponse,
encryption *encryptionModel,
network *networkModel,
region string,
) error { ) error {
if resp == nil { m.BackupSchedule = types.StringValue(resp.GetBackupSchedule())
return fmt.Errorf("response input is nil")
}
if model == nil {
return fmt.Errorf("model input is nil")
}
instance := resp
var instanceId string if resp.HasEncryption() {
if model.InstanceId.ValueString() != "" { m.Encryption = sqlserverflexResGen.NewEncryptionValueMust(
instanceId = model.InstanceId.ValueString() m.Encryption.AttributeTypes(ctx),
} else if instance.Id != nil { map[string]attr.Value{
instanceId = *instance.Id "kek_key_id": types.StringValue(resp.Encryption.GetKekKeyId()),
} else { "kek_key_ring_id": types.StringValue(resp.Encryption.GetKekKeyRingId()),
return fmt.Errorf("instance id not present") "kek_key_version": types.StringValue(resp.Encryption.GetKekKeyVersion()),
"service_account": types.StringValue(resp.Encryption.GetServiceAccount()),
},
)
} }
var storageValues map[string]attr.Value m.FlavorId = types.StringValue(resp.GetFlavorId())
if instance.Storage == nil { m.Id = types.StringValue(resp.GetId())
storageValues = map[string]attr.Value{ m.InstanceId = types.StringValue(resp.GetId())
"class": storage.Class, m.Name = types.StringValue(resp.GetName())
"size": storage.Size,
} netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl())
} else { tfResp.Diagnostics.Append(diags...)
storageValues = map[string]attr.Value{
"class": types.StringValue(*instance.Storage.Class),
"size": types.Int64PointerValue(instance.Storage.Size),
}
}
storageObject, diags := types.ObjectValue(storageTypes, storageValues)
if diags.HasError() { if diags.HasError() {
return fmt.Errorf("creating storage: %w", core.DiagsToError(diags)) return fmt.Errorf("error converting api response value")
} }
var encryptionValues map[string]attr.Value m.Network = sqlserverflexResGen.NetworkValue{
if instance.Encryption == nil { AccessScope: types.StringValue(string(resp.Network.GetAccessScope())),
encryptionValues = map[string]attr.Value{ Acl: netAcl,
"keyring_id": encryption.KeyRingId, InstanceAddress: types.StringValue(resp.Network.GetInstanceAddress()),
"key_id": encryption.KeyId, RouterAddress: types.StringValue(resp.Network.GetRouterAddress()),
"key_version": encryption.KeyVersion,
"service_account": encryption.ServiceAccount,
}
} else {
encryptionValues = map[string]attr.Value{
"keyring_id": types.StringValue(*instance.Encryption.KekKeyRingId),
"key_id": types.StringValue(*instance.Encryption.KekKeyId),
"key_version": types.StringValue(*instance.Encryption.KekKeyVersion),
"service_account": types.StringValue(*instance.Encryption.ServiceAccount),
}
}
encryptionObject, diags := types.ObjectValue(encryptionTypes, encryptionValues)
if diags.HasError() {
return fmt.Errorf("creating encryption: %w", core.DiagsToError(diags))
} }
var networkValues map[string]attr.Value m.RetentionDays = types.Int64Value(resp.GetRetentionDays())
if instance.Network == nil { m.Storage = sqlserverflexResGen.StorageValue{
networkValues = map[string]attr.Value{ Class: types.StringValue(resp.Storage.GetClass()),
"acl": network.ACL, Size: types.Int64Value(resp.Storage.GetSize()),
"access_scope": network.AccessScope,
"instance_address": network.InstanceAddress,
"router_address": network.RouterAddress,
}
} else {
aclList, diags := types.ListValueFrom(ctx, types.StringType, *instance.Network.Acl)
if diags.HasError() {
return fmt.Errorf("creating network (acl list): %w", core.DiagsToError(diags))
}
var routerAddress string
if instance.Network.RouterAddress != nil {
routerAddress = *instance.Network.RouterAddress
diags.AddWarning("field missing while mapping fields", "router_address was empty in API response")
}
if instance.Network.InstanceAddress == nil {
return fmt.Errorf("creating network: no instance address returned")
}
networkValues = map[string]attr.Value{
"acl": aclList,
"access_scope": types.StringValue(string(*instance.Network.AccessScope)),
"instance_address": types.StringValue(*instance.Network.InstanceAddress),
"router_address": types.StringValue(routerAddress),
}
}
networkObject, diags := types.ObjectValue(networkTypes, networkValues)
if diags.HasError() {
return fmt.Errorf("creating network: %w", core.DiagsToError(diags))
} }
m.Status = types.StringValue(string(resp.GetStatus()))
m.Version = types.StringValue(string(resp.GetVersion()))
simplifiedModelBackupSchedule := utils.SimplifyBackupSchedule(model.BackupSchedule.ValueString())
// If the value returned by the API is different from the one in the model after simplification,
// we update the model so that it causes an error in Terraform
if simplifiedModelBackupSchedule != types.StringPointerValue(instance.BackupSchedule).ValueString() {
model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule)
}
if instance.Replicas == nil {
return fmt.Errorf("instance has no replicas set")
}
if instance.RetentionDays == nil {
return fmt.Errorf("instance has no retention days set")
}
if instance.Version == nil {
return fmt.Errorf("instance has no version set")
}
if instance.Edition == nil {
return fmt.Errorf("instance has no edition set")
}
if instance.Status == nil {
return fmt.Errorf("instance has no status set")
}
if instance.IsDeletable == nil {
return fmt.Errorf("instance has no IsDeletable set")
}
model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, instanceId)
model.InstanceId = types.StringValue(instanceId)
model.Name = types.StringPointerValue(instance.Name)
model.FlavorId = types.StringPointerValue(instance.FlavorId)
model.Replicas = types.Int64Value(int64(*instance.Replicas))
model.Storage = storageObject
model.Version = types.StringValue(string(*instance.Version))
model.Edition = types.StringValue(string(*instance.Edition))
model.Region = types.StringValue(region)
model.Encryption = encryptionObject
model.Network = networkObject
model.RetentionDays = types.Int64Value(*instance.RetentionDays)
model.Status = types.StringValue(string(*instance.Status))
model.IsDeletable = types.BoolValue(*instance.IsDeletable)
return nil return nil
} }
func mapReadResponseToModel(
ctx context.Context,
resp *sqlserverflex.GetInstanceResponse,
m *sqlserverflexResGen.InstanceModel,
tfResp *resource.ReadResponse,
) error {
m.BackupSchedule = types.StringValue(resp.GetBackupSchedule())
if resp.HasEncryption() {
m.Encryption = sqlserverflexResGen.NewEncryptionValueMust(
m.Encryption.AttributeTypes(ctx),
map[string]attr.Value{
"kek_key_id": types.StringValue(resp.Encryption.GetKekKeyId()),
"kek_key_ring_id": types.StringValue(resp.Encryption.GetKekKeyRingId()),
"kek_key_version": types.StringValue(resp.Encryption.GetKekKeyVersion()),
"service_account": types.StringValue(resp.Encryption.GetServiceAccount()),
},
)
}
m.FlavorId = types.StringValue(resp.GetFlavorId())
m.Id = types.StringValue(resp.GetId())
m.InstanceId = types.StringValue(resp.GetId())
m.Name = types.StringValue(resp.GetName())
netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl())
tfResp.Diagnostics.Append(diags...)
if diags.HasError() {
return fmt.Errorf("error converting api response value")
}
m.Network = sqlserverflexResGen.NetworkValue{
AccessScope: types.StringValue(string(resp.Network.GetAccessScope())),
Acl: netAcl,
InstanceAddress: types.StringValue(resp.Network.GetInstanceAddress()),
RouterAddress: types.StringValue(resp.Network.GetRouterAddress()),
}
m.RetentionDays = types.Int64Value(resp.GetRetentionDays())
m.Storage = sqlserverflexResGen.StorageValue{
Class: types.StringValue(resp.Storage.GetClass()),
Size: types.Int64Value(resp.Storage.GetSize()),
}
m.Status = types.StringValue(string(resp.GetStatus()))
m.Version = types.StringValue(string(resp.GetVersion()))
return nil
}
func mapUpdateResponseToModel(
ctx context.Context,
resp *sqlserverflex.GetInstanceResponse,
m *sqlserverflexResGen.InstanceModel,
tfResp *resource.UpdateResponse,
) error {
m.BackupSchedule = types.StringValue(resp.GetBackupSchedule())
if resp.HasEncryption() {
m.Encryption = sqlserverflexResGen.NewEncryptionValueMust(
m.Encryption.AttributeTypes(ctx),
map[string]attr.Value{
"kek_key_id": types.StringValue(resp.Encryption.GetKekKeyId()),
"kek_key_ring_id": types.StringValue(resp.Encryption.GetKekKeyRingId()),
"kek_key_version": types.StringValue(resp.Encryption.GetKekKeyVersion()),
"service_account": types.StringValue(resp.Encryption.GetServiceAccount()),
},
)
}
m.FlavorId = types.StringValue(resp.GetFlavorId())
m.Id = types.StringValue(resp.GetId())
m.InstanceId = types.StringValue(resp.GetId())
m.Name = types.StringValue(resp.GetName())
netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl())
tfResp.Diagnostics.Append(diags...)
if diags.HasError() {
return fmt.Errorf("error converting api response value")
}
m.Network = sqlserverflexResGen.NetworkValue{
AccessScope: types.StringValue(string(resp.Network.GetAccessScope())),
Acl: netAcl,
InstanceAddress: types.StringValue(resp.Network.GetInstanceAddress()),
RouterAddress: types.StringValue(resp.Network.GetRouterAddress()),
}
m.RetentionDays = types.Int64Value(resp.GetRetentionDays())
m.Storage = sqlserverflexResGen.StorageValue{
Class: types.StringValue(resp.Storage.GetClass()),
Size: types.Int64Value(resp.Storage.GetSize()),
}
m.Status = types.StringValue(string(resp.GetStatus()))
m.Version = types.StringValue(string(resp.GetVersion()))
return nil
}
func mapDataSourceResponseToModel(
ctx context.Context,
resp *sqlserverflex.GetInstanceResponse,
m *sqlserverflexResGen.InstanceModel,
tfResp *datasource.ReadResponse,
) error {
m.BackupSchedule = types.StringValue(resp.GetBackupSchedule())
if resp.HasEncryption() {
m.Encryption = sqlserverflexResGen.NewEncryptionValueMust(
m.Encryption.AttributeTypes(ctx),
map[string]attr.Value{
"kek_key_id": types.StringValue(resp.Encryption.GetKekKeyId()),
"kek_key_ring_id": types.StringValue(resp.Encryption.GetKekKeyRingId()),
"kek_key_version": types.StringValue(resp.Encryption.GetKekKeyVersion()),
"service_account": types.StringValue(resp.Encryption.GetServiceAccount()),
},
)
}
m.FlavorId = types.StringValue(resp.GetFlavorId())
m.Id = types.StringValue(resp.GetId())
m.InstanceId = types.StringValue(resp.GetId())
m.Name = types.StringValue(resp.GetName())
netAcl, diags := types.ListValueFrom(ctx, types.StringType, resp.Network.GetAcl())
tfResp.Diagnostics.Append(diags...)
if diags.HasError() {
return fmt.Errorf("error converting api response value")
}
m.Network = sqlserverflexResGen.NetworkValue{
AccessScope: types.StringValue(string(resp.Network.GetAccessScope())),
Acl: netAcl,
InstanceAddress: types.StringValue(resp.Network.GetInstanceAddress()),
RouterAddress: types.StringValue(resp.Network.GetRouterAddress()),
}
m.RetentionDays = types.Int64Value(resp.GetRetentionDays())
m.Storage = sqlserverflexResGen.StorageValue{
Class: types.StringValue(resp.Storage.GetClass()),
Size: types.Int64Value(resp.Storage.GetSize()),
}
m.Status = types.StringValue(string(resp.GetStatus()))
m.Version = types.StringValue(string(resp.GetVersion()))
return nil
}
//func mapFields(
// ctx context.Context,
// resp *sqlserverflex.GetInstanceResponse,
// model *Model,
// storage *storageModel,
// encryption *encryptionModel,
// network *networkModel,
// region string,
//) error {
// if resp == nil {
// return fmt.Errorf("response input is nil")
// }
// if model == nil {
// return fmt.Errorf("model input is nil")
// }
// instance := resp
//
// var instanceId string
// if model.InstanceId.ValueString() != "" {
// instanceId = model.InstanceId.ValueString()
// } else if instance.Id != nil {
// instanceId = *instance.Id
// } else {
// return fmt.Errorf("instance id not present")
// }
//
// var storageValues map[string]attr.Value
// if instance.Storage == nil {
// storageValues = map[string]attr.Value{
// "class": storage.Class,
// "size": storage.Size,
// }
// } else {
// storageValues = map[string]attr.Value{
// "class": types.StringValue(*instance.Storage.Class),
// "size": types.Int64PointerValue(instance.Storage.Size),
// }
// }
// storageObject, diags := types.ObjectValue(storageTypes, storageValues)
// if diags.HasError() {
// return fmt.Errorf("creating storage: %w", core.DiagsToError(diags))
// }
//
// var encryptionValues map[string]attr.Value
// if instance.Encryption == nil {
// encryptionValues = map[string]attr.Value{
// "keyring_id": encryption.KeyRingId,
// "key_id": encryption.KeyId,
// "key_version": encryption.KeyVersion,
// "service_account": encryption.ServiceAccount,
// }
// } else {
// encryptionValues = map[string]attr.Value{
// "keyring_id": types.StringValue(*instance.Encryption.KekKeyRingId),
// "key_id": types.StringValue(*instance.Encryption.KekKeyId),
// "key_version": types.StringValue(*instance.Encryption.KekKeyVersion),
// "service_account": types.StringValue(*instance.Encryption.ServiceAccount),
// }
// }
// encryptionObject, diags := types.ObjectValue(encryptionTypes, encryptionValues)
// if diags.HasError() {
// return fmt.Errorf("creating encryption: %w", core.DiagsToError(diags))
// }
//
// var networkValues map[string]attr.Value
// if instance.Network == nil {
// networkValues = map[string]attr.Value{
// "acl": network.ACL,
// "access_scope": network.AccessScope,
// "instance_address": network.InstanceAddress,
// "router_address": network.RouterAddress,
// }
// } else {
// aclList, diags := types.ListValueFrom(ctx, types.StringType, *instance.Network.Acl)
// if diags.HasError() {
// return fmt.Errorf("creating network (acl list): %w", core.DiagsToError(diags))
// }
//
// var routerAddress string
// if instance.Network.RouterAddress != nil {
// routerAddress = *instance.Network.RouterAddress
// diags.AddWarning("field missing while mapping fields", "router_address was empty in API response")
// }
// if instance.Network.InstanceAddress == nil {
// return fmt.Errorf("creating network: no instance address returned")
// }
// networkValues = map[string]attr.Value{
// "acl": aclList,
// "access_scope": types.StringValue(string(*instance.Network.AccessScope)),
// "instance_address": types.StringValue(*instance.Network.InstanceAddress),
// "router_address": types.StringValue(routerAddress),
// }
// }
// networkObject, diags := types.ObjectValue(networkTypes, networkValues)
// if diags.HasError() {
// return fmt.Errorf("creating network: %w", core.DiagsToError(diags))
// }
//
// simplifiedModelBackupSchedule := utils.SimplifyBackupSchedule(model.BackupSchedule.ValueString())
// // If the value returned by the API is different from the one in the model after simplification,
// // we update the model so that it causes an error in Terraform
// if simplifiedModelBackupSchedule != types.StringPointerValue(instance.BackupSchedule).ValueString() {
// model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule)
// }
//
// if instance.Replicas == nil {
// return fmt.Errorf("instance has no replicas set")
// }
//
// if instance.RetentionDays == nil {
// return fmt.Errorf("instance has no retention days set")
// }
//
// if instance.Version == nil {
// return fmt.Errorf("instance has no version set")
// }
//
// if instance.Edition == nil {
// return fmt.Errorf("instance has no edition set")
// }
//
// if instance.Status == nil {
// return fmt.Errorf("instance has no status set")
// }
//
// if instance.IsDeletable == nil {
// return fmt.Errorf("instance has no IsDeletable set")
// }
//
// model.Id = utils.BuildInternalTerraformId(model.ProjectId.ValueString(), region, instanceId)
// model.InstanceId = types.StringValue(instanceId)
// model.Name = types.StringPointerValue(instance.Name)
// model.FlavorId = types.StringPointerValue(instance.FlavorId)
// model.Replicas = types.Int64Value(int64(*instance.Replicas))
// model.Storage = storageObject
// model.Version = types.StringValue(string(*instance.Version))
// model.Edition = types.StringValue(string(*instance.Edition))
// model.Region = types.StringValue(region)
// model.Encryption = encryptionObject
// model.Network = networkObject
// model.RetentionDays = types.Int64Value(*instance.RetentionDays)
// model.Status = types.StringValue(string(*instance.Status))
// model.IsDeletable = types.BoolValue(*instance.IsDeletable)
// return nil
//}
func toCreatePayload( func toCreatePayload(
model *Model, model *sqlserverflexResGen.InstanceModel,
storage *storageModel,
encryption *encryptionModel,
network *networkModel,
) (*sqlserverflex.CreateInstanceRequestPayload, error) { ) (*sqlserverflex.CreateInstanceRequestPayload, error) {
if model == nil { if model == nil {
return nil, fmt.Errorf("nil model") return nil, fmt.Errorf("nil model")
} }
storagePayload := &sqlserverflex.CreateInstanceRequestPayloadGetStorageArgType{} storagePayload := &sqlserverflex.CreateInstanceRequestPayloadGetStorageArgType{}
if storage != nil { if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
storagePayload.Class = conversion.StringValueToPointer(storage.Class) storagePayload.Class = model.Storage.Class.ValueStringPointer()
storagePayload.Size = conversion.Int64ValueToPointer(storage.Size) storagePayload.Size = model.Storage.Size.ValueInt64Pointer()
} }
var encryptionPayload *sqlserverflex.CreateInstanceRequestPayloadGetEncryptionArgType = nil var encryptionPayload *sqlserverflex.CreateInstanceRequestPayloadGetEncryptionArgType = nil
if encryption != nil && if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() &&
!encryption.KeyId.IsNull() && !encryption.KeyId.IsUnknown() && encryption.KeyId.ValueString() != "" && !model.Encryption.KekKeyId.IsNull() && model.Encryption.KekKeyId.IsUnknown() && model.Encryption.KekKeyId.ValueString() != "" &&
!encryption.KeyRingId.IsNull() && !encryption.KeyRingId.IsUnknown() && encryption.KeyRingId.ValueString() != "" && !model.Encryption.KekKeyRingId.IsNull() && !model.Encryption.KekKeyRingId.IsUnknown() && model.Encryption.KekKeyRingId.ValueString() != "" &&
!encryption.KeyVersion.IsNull() && !encryption.KeyVersion.IsUnknown() && encryption.KeyVersion.ValueString() != "" && !model.Encryption.KekKeyVersion.IsNull() && !model.Encryption.KekKeyVersion.IsUnknown() && model.Encryption.KekKeyVersion.ValueString() != "" &&
!encryption.ServiceAccount.IsNull() && !encryption.ServiceAccount.IsUnknown() && encryption.ServiceAccount.ValueString() != "" { !model.Encryption.ServiceAccount.IsNull() && !model.Encryption.ServiceAccount.IsUnknown() && model.Encryption.ServiceAccount.ValueString() != "" {
encryptionPayload = &sqlserverflex.CreateInstanceRequestPayloadGetEncryptionArgType{ encryptionPayload = &sqlserverflex.CreateInstanceRequestPayloadGetEncryptionArgType{
KekKeyId: conversion.StringValueToPointer(encryption.KeyId), KekKeyId: model.Encryption.KekKeyId.ValueStringPointer(),
KekKeyRingId: conversion.StringValueToPointer(encryption.KeyVersion), KekKeyRingId: model.Encryption.KekKeyVersion.ValueStringPointer(),
KekKeyVersion: conversion.StringValueToPointer(encryption.KeyRingId), KekKeyVersion: model.Encryption.KekKeyRingId.ValueStringPointer(),
ServiceAccount: conversion.StringValueToPointer(encryption.ServiceAccount), ServiceAccount: model.Encryption.ServiceAccount.ValueStringPointer(),
}
}
var aclElements []string
if network != nil && !network.ACL.IsNull() && !network.ACL.IsUnknown() {
aclElements = make([]string, 0, len(network.ACL.Elements()))
diags := network.ACL.ElementsAs(context.TODO(), &aclElements, false)
if diags.HasError() {
return nil, fmt.Errorf("creating network: %w", core.DiagsToError(diags))
} }
} }
networkPayload := &sqlserverflex.CreateInstanceRequestPayloadGetNetworkArgType{} networkPayload := &sqlserverflex.CreateInstanceRequestPayloadGetNetworkArgType{}
if network != nil { if !model.Network.IsNull() && !model.Network.IsUnknown() {
networkPayload.AccessScope = sqlserverflex.CreateInstanceRequestPayloadNetworkGetAccessScopeAttributeType(conversion.StringValueToPointer(network.AccessScope)) networkPayload.AccessScope = sqlserverflex.CreateInstanceRequestPayloadNetworkGetAccessScopeAttributeType(
networkPayload.Acl = &aclElements model.Network.AccessScope.ValueStringPointer(),
)
var resList []string
aclList := model.Network.Acl.Elements()
for _, aclItem := range aclList {
if !aclItem.IsNull() && !aclItem.IsUnknown() {
resList = append(resList, aclItem.String())
}
}
networkPayload.Acl = &resList
} }
return &sqlserverflex.CreateInstanceRequestPayload{ return &sqlserverflex.CreateInstanceRequestPayload{
@ -216,66 +411,80 @@ func toCreatePayload(
}, nil }, nil
} }
//nolint:unused // TODO: remove if not needed later ////nolint:unused // TODO: remove if not needed later
func toUpdatePartiallyPayload( //func toUpdatePartiallyPayload(
model *Model, // model *Model,
storage *storageModel, // storage *storageModel,
network *networkModel, // network *networkModel,
) (*sqlserverflex.UpdateInstancePartiallyRequestPayload, error) { //) (*sqlserverflex.UpdateInstancePartiallyRequestPayload, error) {
if model == nil { // if model == nil {
return nil, fmt.Errorf("nil model") // return nil, fmt.Errorf("nil model")
} // }
//
storagePayload := &sqlserverflex.UpdateInstanceRequestPayloadGetStorageArgType{} // storagePayload := &sqlserverflex.UpdateInstanceRequestPayloadGetStorageArgType{}
if storage != nil { // if storage != nil {
storagePayload.Size = conversion.Int64ValueToPointer(storage.Size) // storagePayload.Size = conversion.Int64ValueToPointer(storage.Size)
} // }
//
var aclElements []string // var aclElements []string
if network != nil && !network.ACL.IsNull() && !network.ACL.IsUnknown() { // if network != nil && !network.ACL.IsNull() && !network.ACL.IsUnknown() {
aclElements = make([]string, 0, len(network.ACL.Elements())) // aclElements = make([]string, 0, len(network.ACL.Elements()))
diags := network.ACL.ElementsAs(context.TODO(), &aclElements, false) // diags := network.ACL.ElementsAs(context.TODO(), &aclElements, false)
if diags.HasError() { // if diags.HasError() {
return nil, fmt.Errorf("creating network: %w", core.DiagsToError(diags)) // return nil, fmt.Errorf("creating network: %w", core.DiagsToError(diags))
} // }
} // }
//
networkPayload := &sqlserverflex.UpdateInstancePartiallyRequestPayloadGetNetworkArgType{} // networkPayload := &sqlserverflex.UpdateInstancePartiallyRequestPayloadGetNetworkArgType{}
if network != nil { // if network != nil {
networkPayload.AccessScope = sqlserverflex.UpdateInstancePartiallyRequestPayloadNetworkGetAccessScopeAttributeType(conversion.StringValueToPointer(network.AccessScope)) // networkPayload.AccessScope = sqlserverflex.UpdateInstancePartiallyRequestPayloadNetworkGetAccessScopeAttributeType(conversion.StringValueToPointer(network.AccessScope))
networkPayload.Acl = &aclElements // networkPayload.Acl = &aclElements
} // }
//
if model.Replicas.ValueInt64() > math.MaxInt32 { // if model.Replicas.ValueInt64() > math.MaxInt32 {
return nil, fmt.Errorf("replica count too big: %d", model.Replicas.ValueInt64()) // return nil, fmt.Errorf("replica count too big: %d", model.Replicas.ValueInt64())
} // }
replCount := int32(model.Replicas.ValueInt64()) // nolint:gosec // check is performed above // replCount := int32(model.Replicas.ValueInt64()) // nolint:gosec // check is performed above
return &sqlserverflex.UpdateInstancePartiallyRequestPayload{ // return &sqlserverflex.UpdateInstancePartiallyRequestPayload{
BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule), // BackupSchedule: conversion.StringValueToPointer(model.BackupSchedule),
FlavorId: conversion.StringValueToPointer(model.FlavorId), // FlavorId: conversion.StringValueToPointer(model.FlavorId),
Name: conversion.StringValueToPointer(model.Name), // Name: conversion.StringValueToPointer(model.Name),
Network: networkPayload, // Network: networkPayload,
Replicas: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetReplicasAttributeType(&replCount), // Replicas: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetReplicasAttributeType(&replCount),
RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays), // RetentionDays: conversion.Int64ValueToPointer(model.RetentionDays),
Storage: storagePayload, // Storage: storagePayload,
Version: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetVersionAttributeType(conversion.StringValueToPointer(model.Version)), // Version: sqlserverflex.UpdateInstancePartiallyRequestPayloadGetVersionAttributeType(conversion.StringValueToPointer(model.Version)),
}, nil // }, nil
} //}
// TODO: check func with his args // TODO: check func with his args
func toUpdatePayload( func toUpdatePayload(
_ *Model, ctx context.Context,
_ *storageModel, m *sqlserverflexResGen.InstanceModel,
_ *networkModel, resp *resource.UpdateResponse,
) (*sqlserverflex.UpdateInstanceRequestPayload, error) { ) (*sqlserverflex.UpdateInstanceRequestPayload, error) {
if m.Replicas.ValueInt64() > math.MaxUint32 {
return nil, fmt.Errorf("replicas value is too big for uint32")
}
replVal := sqlserverflex.Replicas(uint32(m.Replicas.ValueInt64()))
var netAcl []string
diags := m.Network.Acl.ElementsAs(ctx, &netAcl, false)
resp.Diagnostics.Append(diags...)
if diags.HasError() {
return nil, fmt.Errorf("error converting model network acl value")
}
return &sqlserverflex.UpdateInstanceRequestPayload{ return &sqlserverflex.UpdateInstanceRequestPayload{
BackupSchedule: nil, BackupSchedule: m.BackupSchedule.ValueStringPointer(),
FlavorId: nil, FlavorId: m.FlavorId.ValueStringPointer(),
Name: nil, Name: m.Name.ValueStringPointer(),
Network: nil, Network: &sqlserverflex.CreateInstanceRequestPayloadNetwork{
Replicas: nil, AccessScope: sqlserverflex.CreateInstanceRequestPayloadNetworkGetAccessScopeAttributeType(m.Network.AccessScope.ValueStringPointer()),
RetentionDays: nil, Acl: &netAcl,
Storage: nil, },
Version: nil, Replicas: &replVal,
RetentionDays: m.RetentionDays.ValueInt64Pointer(),
Storage: &sqlserverflex.StorageUpdate{Size: m.Storage.Size.ValueInt64Pointer()},
Version: sqlserverflex.UpdateInstanceRequestPayloadGetVersionAttributeType(m.Version.ValueStringPointer()),
}, nil }, nil
} }

View file

@ -10,15 +10,14 @@ import (
"strings" "strings"
"time" "time"
"github.com/hashicorp/terraform-plugin-framework/resource/identityschema"
postgresflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils" postgresflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/postgresflexalpha/utils"
sqlserverflexalpha2 "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen" sqlserverflexalpha2 "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/instance/resources_gen"
sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils" sqlserverflexUtils "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/services/sqlserverflexalpha/utils"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-log/tflog"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion" "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/conversion"
@ -35,6 +34,7 @@ var (
_ resource.ResourceWithConfigure = &instanceResource{} _ resource.ResourceWithConfigure = &instanceResource{}
_ resource.ResourceWithImportState = &instanceResource{} _ resource.ResourceWithImportState = &instanceResource{}
_ resource.ResourceWithModifyPlan = &instanceResource{} _ resource.ResourceWithModifyPlan = &instanceResource{}
_ resource.ResourceWithIdentity = &instanceResource{}
) )
//nolint:unused // TODO: remove if not needed later //nolint:unused // TODO: remove if not needed later
@ -43,63 +43,10 @@ var validNodeTypes []string = []string{
"Replica", "Replica",
} }
type Model struct { type InstanceResourceIdentityModel struct {
Id types.String `tfsdk:"id"` // needed by TF ProjectID types.String `tfsdk:"project_id"`
InstanceId types.String `tfsdk:"instance_id"` Region types.String `tfsdk:"region"`
ProjectId types.String `tfsdk:"project_id"` InstanceID types.String `tfsdk:"instance_id"`
Name types.String `tfsdk:"name"`
BackupSchedule types.String `tfsdk:"backup_schedule"`
FlavorId types.String `tfsdk:"flavor_id"`
Encryption types.Object `tfsdk:"encryption"`
IsDeletable types.Bool `tfsdk:"is_deletable"`
Storage types.Object `tfsdk:"storage"`
Status types.String `tfsdk:"status"`
Version types.String `tfsdk:"version"`
Replicas types.Int64 `tfsdk:"replicas"`
Region types.String `tfsdk:"region"`
Network types.Object `tfsdk:"network"`
Edition types.String `tfsdk:"edition"`
RetentionDays types.Int64 `tfsdk:"retention_days"`
}
type encryptionModel struct {
KeyRingId types.String `tfsdk:"keyring_id"`
KeyId types.String `tfsdk:"key_id"`
KeyVersion types.String `tfsdk:"key_version"`
ServiceAccount types.String `tfsdk:"service_account"`
}
var encryptionTypes = map[string]attr.Type{
"keyring_id": basetypes.StringType{},
"key_id": basetypes.StringType{},
"key_version": basetypes.StringType{},
"service_account": basetypes.StringType{},
}
type networkModel struct {
ACL types.List `tfsdk:"acl"`
AccessScope types.String `tfsdk:"access_scope"`
InstanceAddress types.String `tfsdk:"instance_address"`
RouterAddress types.String `tfsdk:"router_address"`
}
var networkTypes = map[string]attr.Type{
"acl": basetypes.ListType{ElemType: types.StringType},
"access_scope": basetypes.StringType{},
"instance_address": basetypes.StringType{},
"router_address": basetypes.StringType{},
}
// Struct corresponding to Model.Storage
type storageModel struct {
Class types.String `tfsdk:"class"`
Size types.Int64 `tfsdk:"size"`
}
// Types corresponding to storageModel
var storageTypes = map[string]attr.Type{
"class": basetypes.StringType{},
"size": basetypes.Int64Type{},
} }
// NewInstanceResource is a helper function to simplify the provider implementation. // NewInstanceResource is a helper function to simplify the provider implementation.
@ -145,7 +92,7 @@ func (r *instanceResource) ModifyPlan(
req resource.ModifyPlanRequest, req resource.ModifyPlanRequest,
resp *resource.ModifyPlanResponse, resp *resource.ModifyPlanResponse,
) { // nolint:gocritic // function signature required by Terraform ) { // nolint:gocritic // function signature required by Terraform
var configModel Model var configModel sqlserverflexalpha2.InstanceModel
// skip initial empty configuration to avoid follow-up errors // skip initial empty configuration to avoid follow-up errors
if req.Config.Raw.IsNull() { if req.Config.Raw.IsNull() {
return return
@ -155,7 +102,7 @@ func (r *instanceResource) ModifyPlan(
return return
} }
var planModel Model var planModel sqlserverflexalpha2.InstanceModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...) resp.Diagnostics.Append(req.Plan.Get(ctx, &planModel)...)
if resp.Diagnostics.HasError() { if resp.Diagnostics.HasError() {
return return
@ -440,6 +387,22 @@ func (r *instanceResource) Schema(ctx context.Context, _ resource.SchemaRequest,
//} //}
} }
func (r *instanceResource) IdentitySchema(_ context.Context, _ resource.IdentitySchemaRequest, resp *resource.IdentitySchemaResponse) {
resp.IdentitySchema = identityschema.Schema{
Attributes: map[string]identityschema.Attribute{
"project_id": identityschema.StringAttribute{
RequiredForImport: true, // must be set during import by the practitioner
},
"region": identityschema.StringAttribute{
RequiredForImport: true, // can be defaulted by the provider configuration
},
"instance_id": identityschema.StringAttribute{
RequiredForImport: true, // can be defaulted by the provider configuration
},
},
}
}
// Create creates the resource and sets the initial Terraform state. // Create creates the resource and sets the initial Terraform state.
func (r *instanceResource) Create( func (r *instanceResource) Create(
ctx context.Context, ctx context.Context,
@ -460,35 +423,8 @@ func (r *instanceResource) Create(
ctx = tflog.SetField(ctx, "project_id", projectId) ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "region", region)
var storage = &storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
var encryption = &encryptionModel{}
if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
var network = &networkModel{}
if !model.Network.IsNull() && !model.Network.IsUnknown() {
diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Generate API request body from model // Generate API request body from model
payload, err := toCreatePayload(&model, storage, encryption, network) payload, err := toCreatePayload(&model)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
@ -552,7 +488,8 @@ func (r *instanceResource) Create(
} }
// Map response body to schema // Map response body to schema
err = mapFields(ctx, waitResp, &model, storage, encryption, network, region) // err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
err = mapCreateResponseToModel(ctx, waitResp, &model, resp)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
@ -578,7 +515,7 @@ func (r *instanceResource) Read(
req resource.ReadRequest, req resource.ReadRequest,
resp *resource.ReadResponse, resp *resource.ReadResponse,
) { // nolint:gocritic // function signature required by Terraform ) { // nolint:gocritic // function signature required by Terraform
var model Model var model sqlserverflexalpha2.InstanceModel
diags := req.State.Get(ctx, &model) diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...) resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() { if resp.Diagnostics.HasError() {
@ -595,33 +532,6 @@ func (r *instanceResource) Read(
ctx = tflog.SetField(ctx, "instance_id", instanceId) ctx = tflog.SetField(ctx, "instance_id", instanceId)
ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "region", region)
var storage = &storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
var encryption = &encryptionModel{}
if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
var network = &networkModel{}
if !model.Network.IsNull() && !model.Network.IsUnknown() {
diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
instanceResp, err := r.client.GetInstanceRequest(ctx, projectId, region, instanceId).Execute() instanceResp, err := r.client.GetInstanceRequest(ctx, projectId, region, instanceId).Execute()
if err != nil { if err != nil {
oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped oapiErr, ok := err.(*oapierror.GenericOpenAPIError) //nolint:errorlint //complaining that error.As should be used to catch wrapped errors, but this error should not be wrapped
@ -636,7 +546,8 @@ func (r *instanceResource) Read(
ctx = core.LogResponse(ctx) ctx = core.LogResponse(ctx)
// Map response body to schema // Map response body to schema
err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region) // err = mapFields(ctx, instanceResp, &model, storage, encryption, network, region)
err = mapReadResponseToModel(ctx, instanceResp, &model, resp)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
@ -662,7 +573,7 @@ func (r *instanceResource) Update(
resp *resource.UpdateResponse, resp *resource.UpdateResponse,
) { // nolint:gocritic // function signature required by Terraform ) { // nolint:gocritic // function signature required by Terraform
// Retrieve values from plan // Retrieve values from plan
var model Model var model sqlserverflexalpha2.InstanceModel
diags := req.Plan.Get(ctx, &model) diags := req.Plan.Get(ctx, &model)
resp.Diagnostics.Append(diags...) resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() { if resp.Diagnostics.HasError() {
@ -679,35 +590,8 @@ func (r *instanceResource) Update(
ctx = tflog.SetField(ctx, "instance_id", instanceId) ctx = tflog.SetField(ctx, "instance_id", instanceId)
ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "region", region)
var storage = &storageModel{}
if !model.Storage.IsNull() && !model.Storage.IsUnknown() {
diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
var encryption = &encryptionModel{}
if !model.Encryption.IsNull() && !model.Encryption.IsUnknown() {
diags = model.Encryption.As(ctx, encryption, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
var network = &networkModel{}
if !model.Network.IsNull() && !model.Network.IsUnknown() {
diags = model.Network.As(ctx, network, basetypes.ObjectAsOptions{})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
}
// Generate API request body from model // Generate API request body from model
payload, err := toUpdatePayload(&model, storage, network) payload, err := toUpdatePayload(ctx, &model, resp)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
@ -743,7 +627,8 @@ func (r *instanceResource) Update(
} }
// Map response body to schema // Map response body to schema
err = mapFields(ctx, waitResp, &model, storage, encryption, network, region) err = mapUpdateResponseToModel(ctx, waitResp, &model, resp)
// err = mapFields(ctx, waitResp, &model, storage, encryption, network, region)
if err != nil { if err != nil {
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
@ -768,7 +653,7 @@ func (r *instanceResource) Delete(
resp *resource.DeleteResponse, resp *resource.DeleteResponse,
) { // nolint:gocritic // function signature required by Terraform ) { // nolint:gocritic // function signature required by Terraform
// Retrieve values from state // Retrieve values from state
var model Model var model sqlserverflexalpha2.InstanceModel
diags := req.State.Get(ctx, &model) diags := req.State.Get(ctx, &model)
resp.Diagnostics.Append(diags...) resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() { if resp.Diagnostics.HasError() {

View file

@ -1,280 +0,0 @@
package sqlserverflex
import (
"context"
"reflect"
"testing"
"github.com/hashicorp/terraform-plugin-framework/resource"
"tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/stackit/internal/core"
sqlserverflex "tf-provider.git.onstackit.cloud/stackit-dev-tools/terraform-provider-stackitprivatepreview/pkg_gen/sqlserverflexalpha"
)
func TestNewInstanceResource(t *testing.T) {
tests := []struct {
name string
want resource.Resource
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewInstanceResource(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewInstanceResource() = %v, want %v", got, tt.want)
}
})
}
}
func Test_instanceResource_Configure(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
ctx context.Context
req resource.ConfigureRequest
resp *resource.ConfigureResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.Configure(tt.args.ctx, tt.args.req, tt.args.resp)
})
}
}
func Test_instanceResource_Create(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
ctx context.Context
req resource.CreateRequest
resp *resource.CreateResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.Create(tt.args.ctx, tt.args.req, tt.args.resp)
})
}
}
func Test_instanceResource_Delete(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
ctx context.Context
req resource.DeleteRequest
resp *resource.DeleteResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.Delete(tt.args.ctx, tt.args.req, tt.args.resp)
})
}
}
func Test_instanceResource_ImportState(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
ctx context.Context
req resource.ImportStateRequest
resp *resource.ImportStateResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.ImportState(tt.args.ctx, tt.args.req, tt.args.resp)
})
}
}
func Test_instanceResource_Metadata(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
in0 context.Context
req resource.MetadataRequest
resp *resource.MetadataResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.Metadata(tt.args.in0, tt.args.req, tt.args.resp)
})
}
}
func Test_instanceResource_ModifyPlan(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
ctx context.Context
req resource.ModifyPlanRequest
resp *resource.ModifyPlanResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.ModifyPlan(tt.args.ctx, tt.args.req, tt.args.resp)
})
}
}
func Test_instanceResource_Read(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
ctx context.Context
req resource.ReadRequest
resp *resource.ReadResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.Read(tt.args.ctx, tt.args.req, tt.args.resp)
})
}
}
func Test_instanceResource_Schema(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
in0 context.Context
in1 resource.SchemaRequest
resp *resource.SchemaResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.Schema(tt.args.in0, tt.args.in1, tt.args.resp)
})
}
}
func Test_instanceResource_Update(t *testing.T) {
type fields struct {
client *sqlserverflex.APIClient
providerData core.ProviderData
}
type args struct {
ctx context.Context
req resource.UpdateRequest
resp *resource.UpdateResponse
}
tests := []struct {
name string
fields fields
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &instanceResource{
client: tt.fields.client,
providerData: tt.fields.providerData,
}
r.Update(tt.args.ctx, tt.args.req, tt.args.resp)
})
}
}

View file

@ -26,6 +26,11 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
Description: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.", Description: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
MarkdownDescription: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.", MarkdownDescription: "The schedule for on what time and how often the database backup will be created. The schedule is written as a cron schedule.",
}, },
"edition": schema.StringAttribute{
Computed: true,
Description: "Edition of the MSSQL server instance",
MarkdownDescription: "Edition of the MSSQL server instance",
},
"encryption": schema.SingleNestedAttribute{ "encryption": schema.SingleNestedAttribute{
Attributes: map[string]schema.Attribute{ Attributes: map[string]schema.Attribute{
"kek_key_id": schema.StringAttribute{ "kek_key_id": schema.StringAttribute{
@ -73,6 +78,11 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
Description: "The ID of the instance.", Description: "The ID of the instance.",
MarkdownDescription: "The ID of the instance.", MarkdownDescription: "The ID of the instance.",
}, },
"is_deletable": schema.BoolAttribute{
Computed: true,
Description: "Whether the instance can be deleted or not.",
MarkdownDescription: "Whether the instance can be deleted or not.",
},
"name": schema.StringAttribute{ "name": schema.StringAttribute{
Required: true, Required: true,
Description: "The name of the instance.", Description: "The name of the instance.",
@ -99,6 +109,12 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
Description: "List of IPV4 cidr.", Description: "List of IPV4 cidr.",
MarkdownDescription: "List of IPV4 cidr.", MarkdownDescription: "List of IPV4 cidr.",
}, },
"instance_address": schema.StringAttribute{
Computed: true,
},
"router_address": schema.StringAttribute{
Computed: true,
},
}, },
CustomType: NetworkType{ CustomType: NetworkType{
ObjectType: types.ObjectType{ ObjectType: types.ObjectType{
@ -126,11 +142,19 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
), ),
}, },
}, },
"replicas": schema.Int64Attribute{
Computed: true,
Description: "How many replicas the instance should have.",
MarkdownDescription: "How many replicas the instance should have.",
},
"retention_days": schema.Int64Attribute{ "retention_days": schema.Int64Attribute{
Required: true, Required: true,
Description: "The days for how long the backup files should be stored before cleaned up. 30 to 365", Description: "The days for how long the backup files should be stored before cleaned up. 30 to 365",
MarkdownDescription: "The days for how long the backup files should be stored before cleaned up. 30 to 365", MarkdownDescription: "The days for how long the backup files should be stored before cleaned up. 30 to 365",
}, },
"status": schema.StringAttribute{
Computed: true,
},
"storage": schema.SingleNestedAttribute{ "storage": schema.SingleNestedAttribute{
Attributes: map[string]schema.Attribute{ Attributes: map[string]schema.Attribute{
"class": schema.StringAttribute{ "class": schema.StringAttribute{
@ -169,15 +193,19 @@ func InstanceResourceSchema(ctx context.Context) schema.Schema {
type InstanceModel struct { type InstanceModel struct {
BackupSchedule types.String `tfsdk:"backup_schedule"` BackupSchedule types.String `tfsdk:"backup_schedule"`
Edition types.String `tfsdk:"edition"`
Encryption EncryptionValue `tfsdk:"encryption"` Encryption EncryptionValue `tfsdk:"encryption"`
FlavorId types.String `tfsdk:"flavor_id"` FlavorId types.String `tfsdk:"flavor_id"`
Id types.String `tfsdk:"id"` Id types.String `tfsdk:"id"`
InstanceId types.String `tfsdk:"instance_id"` InstanceId types.String `tfsdk:"instance_id"`
IsDeletable types.Bool `tfsdk:"is_deletable"`
Name types.String `tfsdk:"name"` Name types.String `tfsdk:"name"`
Network NetworkValue `tfsdk:"network"` Network NetworkValue `tfsdk:"network"`
ProjectId types.String `tfsdk:"project_id"` ProjectId types.String `tfsdk:"project_id"`
Region types.String `tfsdk:"region"` Region types.String `tfsdk:"region"`
Replicas types.Int64 `tfsdk:"replicas"`
RetentionDays types.Int64 `tfsdk:"retention_days"` RetentionDays types.Int64 `tfsdk:"retention_days"`
Status types.String `tfsdk:"status"`
Storage StorageValue `tfsdk:"storage"` Storage StorageValue `tfsdk:"storage"`
Version types.String `tfsdk:"version"` Version types.String `tfsdk:"version"`
} }
@ -732,14 +760,52 @@ func (t NetworkType) ValueFromObject(ctx context.Context, in basetypes.ObjectVal
fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute)) fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute))
} }
instanceAddressAttribute, ok := attributes["instance_address"]
if !ok {
diags.AddError(
"Attribute Missing",
`instance_address is missing from object`)
return nil, diags
}
instanceAddressVal, ok := instanceAddressAttribute.(basetypes.StringValue)
if !ok {
diags.AddError(
"Attribute Wrong Type",
fmt.Sprintf(`instance_address expected to be basetypes.StringValue, was: %T`, instanceAddressAttribute))
}
routerAddressAttribute, ok := attributes["router_address"]
if !ok {
diags.AddError(
"Attribute Missing",
`router_address is missing from object`)
return nil, diags
}
routerAddressVal, ok := routerAddressAttribute.(basetypes.StringValue)
if !ok {
diags.AddError(
"Attribute Wrong Type",
fmt.Sprintf(`router_address expected to be basetypes.StringValue, was: %T`, routerAddressAttribute))
}
if diags.HasError() { if diags.HasError() {
return nil, diags return nil, diags
} }
return NetworkValue{ return NetworkValue{
AccessScope: accessScopeVal, AccessScope: accessScopeVal,
Acl: aclVal, Acl: aclVal,
state: attr.ValueStateKnown, InstanceAddress: instanceAddressVal,
RouterAddress: routerAddressVal,
state: attr.ValueStateKnown,
}, diags }, diags
} }
@ -842,14 +908,52 @@ func NewNetworkValue(attributeTypes map[string]attr.Type, attributes map[string]
fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute)) fmt.Sprintf(`acl expected to be basetypes.ListValue, was: %T`, aclAttribute))
} }
instanceAddressAttribute, ok := attributes["instance_address"]
if !ok {
diags.AddError(
"Attribute Missing",
`instance_address is missing from object`)
return NewNetworkValueUnknown(), diags
}
instanceAddressVal, ok := instanceAddressAttribute.(basetypes.StringValue)
if !ok {
diags.AddError(
"Attribute Wrong Type",
fmt.Sprintf(`instance_address expected to be basetypes.StringValue, was: %T`, instanceAddressAttribute))
}
routerAddressAttribute, ok := attributes["router_address"]
if !ok {
diags.AddError(
"Attribute Missing",
`router_address is missing from object`)
return NewNetworkValueUnknown(), diags
}
routerAddressVal, ok := routerAddressAttribute.(basetypes.StringValue)
if !ok {
diags.AddError(
"Attribute Wrong Type",
fmt.Sprintf(`router_address expected to be basetypes.StringValue, was: %T`, routerAddressAttribute))
}
if diags.HasError() { if diags.HasError() {
return NewNetworkValueUnknown(), diags return NewNetworkValueUnknown(), diags
} }
return NetworkValue{ return NetworkValue{
AccessScope: accessScopeVal, AccessScope: accessScopeVal,
Acl: aclVal, Acl: aclVal,
state: attr.ValueStateKnown, InstanceAddress: instanceAddressVal,
RouterAddress: routerAddressVal,
state: attr.ValueStateKnown,
}, diags }, diags
} }
@ -921,13 +1025,15 @@ func (t NetworkType) ValueType(ctx context.Context) attr.Value {
var _ basetypes.ObjectValuable = NetworkValue{} var _ basetypes.ObjectValuable = NetworkValue{}
type NetworkValue struct { type NetworkValue struct {
AccessScope basetypes.StringValue `tfsdk:"access_scope"` AccessScope basetypes.StringValue `tfsdk:"access_scope"`
Acl basetypes.ListValue `tfsdk:"acl"` Acl basetypes.ListValue `tfsdk:"acl"`
state attr.ValueState InstanceAddress basetypes.StringValue `tfsdk:"instance_address"`
RouterAddress basetypes.StringValue `tfsdk:"router_address"`
state attr.ValueState
} }
func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) {
attrTypes := make(map[string]tftypes.Type, 2) attrTypes := make(map[string]tftypes.Type, 4)
var val tftypes.Value var val tftypes.Value
var err error var err error
@ -936,12 +1042,14 @@ func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, erro
attrTypes["acl"] = basetypes.ListType{ attrTypes["acl"] = basetypes.ListType{
ElemType: types.StringType, ElemType: types.StringType,
}.TerraformType(ctx) }.TerraformType(ctx)
attrTypes["instance_address"] = basetypes.StringType{}.TerraformType(ctx)
attrTypes["router_address"] = basetypes.StringType{}.TerraformType(ctx)
objectType := tftypes.Object{AttributeTypes: attrTypes} objectType := tftypes.Object{AttributeTypes: attrTypes}
switch v.state { switch v.state {
case attr.ValueStateKnown: case attr.ValueStateKnown:
vals := make(map[string]tftypes.Value, 2) vals := make(map[string]tftypes.Value, 4)
val, err = v.AccessScope.ToTerraformValue(ctx) val, err = v.AccessScope.ToTerraformValue(ctx)
@ -959,6 +1067,22 @@ func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, erro
vals["acl"] = val vals["acl"] = val
val, err = v.InstanceAddress.ToTerraformValue(ctx)
if err != nil {
return tftypes.NewValue(objectType, tftypes.UnknownValue), err
}
vals["instance_address"] = val
val, err = v.RouterAddress.ToTerraformValue(ctx)
if err != nil {
return tftypes.NewValue(objectType, tftypes.UnknownValue), err
}
vals["router_address"] = val
if err := tftypes.ValidateValue(objectType, vals); err != nil { if err := tftypes.ValidateValue(objectType, vals); err != nil {
return tftypes.NewValue(objectType, tftypes.UnknownValue), err return tftypes.NewValue(objectType, tftypes.UnknownValue), err
} }
@ -1006,6 +1130,8 @@ func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue,
"acl": basetypes.ListType{ "acl": basetypes.ListType{
ElemType: types.StringType, ElemType: types.StringType,
}, },
"instance_address": basetypes.StringType{},
"router_address": basetypes.StringType{},
}), diags }), diags
} }
@ -1014,6 +1140,8 @@ func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue,
"acl": basetypes.ListType{ "acl": basetypes.ListType{
ElemType: types.StringType, ElemType: types.StringType,
}, },
"instance_address": basetypes.StringType{},
"router_address": basetypes.StringType{},
} }
if v.IsNull() { if v.IsNull() {
@ -1027,8 +1155,10 @@ func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue,
objVal, diags := types.ObjectValue( objVal, diags := types.ObjectValue(
attributeTypes, attributeTypes,
map[string]attr.Value{ map[string]attr.Value{
"access_scope": v.AccessScope, "access_scope": v.AccessScope,
"acl": aclVal, "acl": aclVal,
"instance_address": v.InstanceAddress,
"router_address": v.RouterAddress,
}) })
return objVal, diags return objVal, diags
@ -1057,6 +1187,14 @@ func (v NetworkValue) Equal(o attr.Value) bool {
return false return false
} }
if !v.InstanceAddress.Equal(other.InstanceAddress) {
return false
}
if !v.RouterAddress.Equal(other.RouterAddress) {
return false
}
return true return true
} }
@ -1074,6 +1212,8 @@ func (v NetworkValue) AttributeTypes(ctx context.Context) map[string]attr.Type {
"acl": basetypes.ListType{ "acl": basetypes.ListType{
ElemType: types.StringType, ElemType: types.StringType,
}, },
"instance_address": basetypes.StringType{},
"router_address": basetypes.StringType{},
} }
} }