Logging and error handling improvements, bug fixes (#21)

- Uniformed logs and diagnostics:
  - Logging and adding to diagnostics is done by the highest level function (Create/Read/Update/Delete/Import) using `LogAndAddError`
  - Lower-level routines' signature changed to return error instead of writing to diagnostics
  - Standardize summary and details across services
  - Removed manual adding of relevant variables to details (they're in the context, TF adds them to logs)
- Changed validators to be closer to official implementation
- Fix logging wrong output after wait
- Fix Argus checking wrong diagnostics
- Fix Resource Manager not updating state after project update
- Fix unnecessary pointer in LogAndAddError
This commit is contained in:
Henrique Santos 2023-09-21 14:52:52 +01:00 committed by GitHub
parent 29b8c91999
commit 4e8514df00
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
51 changed files with 1389 additions and 1092 deletions

View file

@ -45,7 +45,7 @@ func (r *clusterDataSource) Configure(ctx context.Context, req datasource.Config
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
@ -64,12 +64,12 @@ func (r *clusterDataSource) Configure(ctx context.Context, req datasource.Config
}
if err != nil {
resp.Diagnostics.AddError("Could not Configure API Client", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err))
return
}
tflog.Info(ctx, "SKE client configured")
r.client = apiClient
tflog.Info(ctx, "SKE client configured")
}
func (r *clusterDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
@ -292,19 +292,22 @@ func (r *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest
ctx = tflog.SetField(ctx, "name", name)
clusterResp, err := r.client.GetCluster(ctx, projectId, name).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, fmt.Sprintf("Unable to read cluster, project_id = %s, name = %s", projectId, name), err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Calling API: %v", err))
return
}
err = mapFields(ctx, clusterResp, &state)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Mapping fields", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Processing API payload: %v", err))
return
}
r.getCredential(ctx, &diags, &state)
// Set refreshed state
diags = resp.State.Set(ctx, state)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE cluster read")
}

View file

@ -148,7 +148,7 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
@ -167,12 +167,12 @@ func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureR
}
if err != nil {
resp.Diagnostics.AddError("Could not Configure API Client", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err))
return
}
tflog.Info(ctx, "SKE cluster client configured")
r.client = apiClient
tflog.Info(ctx, "SKE cluster client configured")
}
// Schema defines the schema for the resource.
@ -213,7 +213,7 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
Description: "Kubernetes version. Must only contain major and minor version (e.g. 1.22)",
Required: true,
Validators: []validator.String{
validate.SemanticMinorVersion(),
validate.MinorVersionNumber(),
},
},
"kubernetes_version_used": schema.StringAttribute{
@ -502,8 +502,9 @@ func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", clusterName)
availableVersions := r.loadAvaiableVersions(ctx, &resp.Diagnostics)
if resp.Diagnostics.HasError() {
availableVersions, err := r.loadAvaiableVersions(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating cluster", fmt.Sprintf("Loading available Kubernetes versions: %v", err))
return
}
@ -512,34 +513,27 @@ func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest
return
}
// handle credential
r.getCredential(ctx, &resp.Diagnostics, &model)
if resp.Diagnostics.HasError() {
return
}
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE cluster created")
}
func (r *clusterResource) loadAvaiableVersions(ctx context.Context, diags *diag.Diagnostics) []ske.KubernetesVersion {
func (r *clusterResource) loadAvaiableVersions(ctx context.Context) ([]ske.KubernetesVersion, error) {
c := r.client
res, err := c.GetOptions(ctx).Execute()
if err != nil {
diags.AddError("Failed loading cluster available versions: getting cluster options", err.Error())
return nil
return nil, fmt.Errorf("calling API: %w", err)
}
if res.KubernetesVersions == nil {
diags.AddError("Failed loading cluster available versions: nil kubernetesVersions", err.Error())
return nil
return nil, fmt.Errorf("API response has nil kubernetesVersions")
}
return *res.KubernetesVersions
return *res.KubernetesVersions, nil
}
func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag.Diagnostics, model *Cluster, availableVersions []ske.KubernetesVersion) {
@ -548,23 +542,22 @@ func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag
name := model.Name.ValueString()
kubernetes, hasDeprecatedVersion, err := toKubernetesPayload(model, availableVersions)
if err != nil {
diags.AddError("Failed to create cluster config payload", err.Error())
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Creating cluster config API payload: %v", err))
return
}
if hasDeprecatedVersion {
warningMessage := fmt.Sprintf("Using deprecated kubernetes version %s", *kubernetes.Version)
diags.AddWarning(warningMessage, "")
diags.AddWarning("Deprecated Kubernetes version", fmt.Sprintf("Version %s of Kubernetes is deprecated, please update it", *kubernetes.Version))
}
nodePools := toNodepoolsPayload(ctx, model)
maintenance, err := toMaintenancePayload(ctx, model)
if err != nil {
diags.AddError("Failed to create maintenance payload", err.Error())
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Creating maintenance API payload: %v", err))
return
}
hibernations := toHibernationsPayload(model)
extensions, err := toExtensionsPayload(ctx, model)
if err != nil {
diags.AddError("Failed to create extension payload", err.Error())
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Creating extension API payload: %v", err))
return
}
@ -577,35 +570,42 @@ func (r *clusterResource) createOrUpdateCluster(ctx context.Context, diags *diag
}
_, err = r.client.CreateOrUpdateCluster(ctx, projectId, name).CreateOrUpdateClusterPayload(payload).Execute()
if err != nil {
diags.AddError("failed during SKE create/update", err.Error())
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Calling API: %v", err))
return
}
wr, err := ske.CreateOrUpdateClusterWaitHandler(ctx, r.client, projectId, name).SetTimeout(30 * time.Minute).WaitWithContext(ctx)
if err != nil {
diags.AddError("Error creating cluster", fmt.Sprintf("Cluster creation waiting: %v", err))
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Cluster creation waiting: %v", err))
return
}
got, ok := wr.(*ske.ClusterResponse)
if !ok {
diags.AddError("Error creating cluster", fmt.Sprintf("Wait result conversion, got %+v", got))
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Wait result conversion, got %+v", wr))
return
}
err = mapFields(ctx, got, model)
if err != nil {
diags.AddError("Mapping cluster fields", err.Error())
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Processing API payload: %v", err))
return
}
// Handle credential
err = r.getCredential(ctx, model)
if err != nil {
core.LogAndAddError(ctx, diags, "Error creating/updating cluster", fmt.Sprintf("Getting credential: %v", err))
return
}
}
func (r *clusterResource) getCredential(ctx context.Context, diags *diag.Diagnostics, model *Cluster) {
func (r *clusterResource) getCredential(ctx context.Context, model *Cluster) error {
c := r.client
res, err := c.GetCredentials(ctx, model.ProjectId.ValueString(), model.Name.ValueString()).Execute()
if err != nil {
diags.AddError("failed fetching cluster credentials", err.Error())
return
return fmt.Errorf("fetching cluster credentials: %w", err)
}
model.KubeConfig = types.StringPointerValue(res.Kubeconfig)
return nil
}
func toNodepoolsPayload(ctx context.Context, m *Cluster) []ske.Nodepool {
@ -1079,17 +1079,20 @@ func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, re
clResp, err := r.client.GetCluster(ctx, projectId, name).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, fmt.Sprintf("Unable to read cluster, project_id = %s, name = %s", projectId, name), err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Calling API: %v", err))
return
}
err = mapFields(ctx, clResp, &state)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading cluster", fmt.Sprintf("Processing API payload: %v", err))
return
}
diags = resp.State.Set(ctx, state)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE cluster read")
}
@ -1105,8 +1108,9 @@ func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "name", clName)
availableVersions := r.loadAvaiableVersions(ctx, &resp.Diagnostics)
if resp.Diagnostics.HasError() {
availableVersions, err := r.loadAvaiableVersions(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating cluster", fmt.Sprintf("Loading available Kubernetes versions: %v", err))
return
}
@ -1115,14 +1119,11 @@ func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest
return
}
// handle credential
r.getCredential(ctx, &resp.Diagnostics, &model)
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
tflog.Info(ctx, "SKE cluster updated")
}
@ -1140,7 +1141,7 @@ func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest
c := r.client
_, err := c.DeleteCluster(ctx, projectId, name).Execute()
if err != nil {
resp.Diagnostics.AddError("failed deleting cluster", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting cluster", fmt.Sprintf("Calling API: %v", err))
return
}
_, err = ske.DeleteClusterWaitHandler(ctx, r.client, projectId, name).SetTimeout(15 * time.Minute).WaitWithContext(ctx)
@ -1157,8 +1158,8 @@ func (r *clusterResource) ImportState(ctx context.Context, req resource.ImportSt
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing cluster",
fmt.Sprintf("Expected import identifier with format: [project_id],[name] Got: %q", req.ID),
)
return

View file

@ -44,7 +44,7 @@ func (r *projectDataSource) Configure(ctx context.Context, req datasource.Config
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
resp.Diagnostics.AddError("Unexpected Data Source Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
@ -63,12 +63,12 @@ func (r *projectDataSource) Configure(ctx context.Context, req datasource.Config
}
if err != nil {
resp.Diagnostics.AddError("Could not Configure API Client", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err))
return
}
tflog.Info(ctx, "SKE client configured")
r.client = apiClient
tflog.Info(ctx, "SKE client configured")
}
// Schema defines the schema for the resource.
@ -93,23 +93,26 @@ func (r *projectDataSource) Schema(_ context.Context, _ datasource.SchemaRequest
// Read refreshes the Terraform state with the latest data.
func (r *projectDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform
var state Model
diags := req.Config.Get(ctx, &state)
var model Model
diags := req.Config.Get(ctx, &model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
projectId := state.ProjectId.ValueString()
projectId := model.ProjectId.ValueString()
ctx = tflog.SetField(ctx, "project_id", projectId)
_, err := r.client.GetProject(ctx, projectId).Execute()
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Unable to read project", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading project", fmt.Sprintf("Calling API: %v", err))
return
}
state.Id = types.StringValue(projectId)
state.ProjectId = types.StringValue(projectId)
diags = resp.State.Set(ctx, state)
model.Id = types.StringValue(projectId)
model.ProjectId = types.StringValue(projectId)
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE project read")
}

View file

@ -56,7 +56,7 @@ func (r *projectResource) Configure(ctx context.Context, req resource.ConfigureR
providerData, ok := req.ProviderData.(core.ProviderData)
if !ok {
resp.Diagnostics.AddError("Unexpected Resource Configure Type", fmt.Sprintf("Expected stackit.ProviderData, got %T. Please report this issue to the provider developers.", req.ProviderData))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData))
return
}
@ -75,12 +75,12 @@ func (r *projectResource) Configure(ctx context.Context, req resource.ConfigureR
}
if err != nil {
resp.Diagnostics.AddError("Could not Configure API Client", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err))
return
}
tflog.Info(ctx, "SKE project client configured")
r.client = apiClient
tflog.Info(ctx, "SKE project client configured")
}
// Schema returns the Terraform schema structure
@ -119,19 +119,19 @@ func (r *projectResource) Create(ctx context.Context, req resource.CreateRequest
projectId := model.ProjectId.ValueString()
_, err := r.client.CreateProject(ctx, projectId).Execute()
if err != nil {
resp.Diagnostics.AddError("failed during SKE project creation", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating project", fmt.Sprintf("Calling API: %v", err))
return
}
model.Id = types.StringValue(projectId)
wr, err := ske.CreateProjectWaitHandler(ctx, r.client, projectId).SetTimeout(5 * time.Minute).WaitWithContext(ctx)
if err != nil {
resp.Diagnostics.AddError("Error creating cluster", fmt.Sprintf("Project creation waiting: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating cluster", fmt.Sprintf("Project creation waiting: %v", err))
return
}
got, ok := wr.(*ske.ProjectResponse)
_, ok := wr.(*ske.ProjectResponse)
if !ok {
resp.Diagnostics.AddError("Error creating cluster", fmt.Sprintf("Wait result conversion, got %+v", got))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating cluster", fmt.Sprintf("Wait result conversion, got %+v", wr))
return
}
diags := resp.State.Set(ctx, model)
@ -139,7 +139,7 @@ func (r *projectResource) Create(ctx context.Context, req resource.CreateRequest
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE project created or updated")
tflog.Info(ctx, "SKE project created")
}
// Read refreshes the Terraform state with the latest data.
@ -151,23 +151,25 @@ func (r *projectResource) Read(ctx context.Context, req resource.ReadRequest, re
return
}
projectId := model.ProjectId.ValueString()
// read
_, err := r.client.GetProject(ctx, projectId).Execute()
if err != nil {
resp.Diagnostics.AddError("failed during SKE project read", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading project", fmt.Sprintf("Calling API: %v", err))
return
}
model.Id = types.StringValue(projectId)
model.ProjectId = types.StringValue(projectId)
diags = resp.State.Set(ctx, model)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
tflog.Info(ctx, "SKE project read")
}
// Update updates the resource and sets the updated Terraform state on success.
func (r *projectResource) Update(_ context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
func (r *projectResource) Update(ctx context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform
// Update shouldn't be called
resp.Diagnostics.AddError("Error updating ", "project can't be updated")
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating project", "Project can't be updated")
}
// Delete deletes the resource and removes the Terraform state on success.
@ -183,12 +185,12 @@ func (r *projectResource) Delete(ctx context.Context, req resource.DeleteRequest
c := r.client
_, err := c.DeleteProject(ctx, projectId).Execute()
if err != nil {
resp.Diagnostics.AddError("failed deleting project", err.Error())
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credential", fmt.Sprintf("Calling API: %v", err))
return
}
_, err = ske.DeleteProjectWaitHandler(ctx, r.client, projectId).SetTimeout(10 * time.Minute).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting project", fmt.Sprintf("Project deletion waiting: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting credential", fmt.Sprintf("Project deletion waiting: %v", err))
return
}
tflog.Info(ctx, "SKE project deleted")
@ -199,8 +201,8 @@ func (r *projectResource) Delete(ctx context.Context, req resource.DeleteRequest
func (r *projectResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { // nolint:gocritic // function signature required by Terraform
idParts := strings.Split(req.ID, core.Separator)
if len(idParts) != 1 || idParts[0] == "" {
resp.Diagnostics.AddError(
"Unexpected Import Identifier",
core.LogAndAddError(ctx, &resp.Diagnostics,
"Error importing project",
fmt.Sprintf("Expected import identifier with format: [project_id] Got: %q", req.ID),
)
return