feat(observability): add logs and traces retentions days (#1032)
* feat(observability): add logs and traces retentions days * feat(observability): add inputs to acceptance test * feat(observability): add inputs to example * feat(observability): fix docs * feat(observability): fix ModifyPlan checks after review * feat(observability): fix acceptance test max values * feat(observability): fix lint issues * feat(observability): apply suggestion
This commit is contained in:
parent
e4e2e55e94
commit
10eced46c7
8 changed files with 366 additions and 5 deletions
|
|
@ -42,6 +42,7 @@ data "stackit_observability_instance" "example" {
|
|||
- `jaeger_traces_url` (String)
|
||||
- `jaeger_ui_url` (String)
|
||||
- `logs_push_url` (String) Specifies URL for pushing logs.
|
||||
- `logs_retention_days` (Number) Specifies for how many days the logs are kept. Default is set to `7`.
|
||||
- `logs_url` (String) Specifies Logs URL.
|
||||
- `metrics_push_url` (String) Specifies URL for pushing metrics.
|
||||
- `metrics_retention_days` (Number) Specifies for how many days the raw metrics are kept. Default is set to `90`.
|
||||
|
|
@ -54,6 +55,7 @@ data "stackit_observability_instance" "example" {
|
|||
- `plan_id` (String) The Observability plan ID.
|
||||
- `plan_name` (String) Specifies the Observability plan. E.g. `Observability-Monitoring-Medium-EU01`.
|
||||
- `targets_url` (String) Specifies Targets URL.
|
||||
- `traces_retention_days` (Number) Specifies for how many days the traces are kept. Default is set to `7`.
|
||||
- `zipkin_spans_url` (String)
|
||||
|
||||
<a id="nestedatt--alert_config"></a>
|
||||
|
|
|
|||
|
|
@ -16,8 +16,10 @@ Observability instance resource schema. Must have a `region` specified in the pr
|
|||
resource "stackit_observability_instance" "example" {
|
||||
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||
name = "example-instance"
|
||||
plan_name = "Observability-Monitoring-Medium-EU01"
|
||||
plan_name = "Observability-Starter-EU01"
|
||||
acl = ["1.1.1.1/32", "2.2.2.2/32"]
|
||||
logs_retention_days = 30
|
||||
traces_retention_days = 30
|
||||
metrics_retention_days = 90
|
||||
metrics_retention_days_5m_downsampling = 90
|
||||
metrics_retention_days_1h_downsampling = 90
|
||||
|
|
@ -43,10 +45,12 @@ import {
|
|||
|
||||
- `acl` (Set of String) The access control list for this instance. Each entry is an IP address range that is permitted to access, in CIDR notation.
|
||||
- `alert_config` (Attributes) Alert configuration for the instance. (see [below for nested schema](#nestedatt--alert_config))
|
||||
- `logs_retention_days` (Number) Specifies for how many days the logs are kept. Default is set to `7`.
|
||||
- `metrics_retention_days` (Number) Specifies for how many days the raw metrics are kept. Default is set to `90`.
|
||||
- `metrics_retention_days_1h_downsampling` (Number) Specifies for how many days the 1h downsampled metrics are kept. must be less than the value of the 5m downsampling retention. Default is set to `90`.
|
||||
- `metrics_retention_days_5m_downsampling` (Number) Specifies for how many days the 5m downsampled metrics are kept. must be less than the value of the general retention. Default is set to `90`.
|
||||
- `parameters` (Map of String) Additional parameters.
|
||||
- `traces_retention_days` (Number) Specifies for how many days the traces are kept. Default is set to `7`.
|
||||
|
||||
### Read-Only
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
resource "stackit_observability_instance" "example" {
|
||||
project_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||
name = "example-instance"
|
||||
plan_name = "Observability-Monitoring-Medium-EU01"
|
||||
plan_name = "Observability-Starter-EU01"
|
||||
acl = ["1.1.1.1/32", "2.2.2.2/32"]
|
||||
logs_retention_days = 30
|
||||
traces_retention_days = 30
|
||||
metrics_retention_days = 90
|
||||
metrics_retention_days_5m_downsampling = 90
|
||||
metrics_retention_days_1h_downsampling = 90
|
||||
|
|
|
|||
|
|
@ -132,6 +132,14 @@ func (d *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaReques
|
|||
Computed: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
"traces_retention_days": schema.Int64Attribute{
|
||||
Description: "Specifies for how many days the traces are kept. Default is set to `7`.",
|
||||
Computed: true,
|
||||
},
|
||||
"logs_retention_days": schema.Int64Attribute{
|
||||
Description: "Specifies for how many days the logs are kept. Default is set to `7`.",
|
||||
Computed: true,
|
||||
},
|
||||
"metrics_retention_days": schema.Int64Attribute{
|
||||
Description: "Specifies for how many days the raw metrics are kept. Default is set to `90`.",
|
||||
Computed: true,
|
||||
|
|
@ -454,6 +462,44 @@ func (d *instanceDataSource) Read(ctx context.Context, req datasource.ReadReques
|
|||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle Logs Retentions
|
||||
logsRetentionResp, err := d.client.GetLogsConfigs(ctx, instanceId, projectId).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to get logs retention: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
err = mapLogsRetentionField(logsRetentionResp, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API response for the logs retention: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
diags = setLogsRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle Traces Retentions
|
||||
tracesRetentionResp, err := d.client.GetTracesConfigs(ctx, instanceId, projectId).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to get traces retention: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
err = mapTracesRetentionField(tracesRetentionResp, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API response for the traces retention: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
diags = setTracesRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// There are plans where no alert matchers and receivers are present e.g. like Observability-Metrics-Endpoint-100k-EU01
|
||||
|
|
|
|||
|
|
@ -71,6 +71,8 @@ type Model struct {
|
|||
MetricsPushURL types.String `tfsdk:"metrics_push_url"`
|
||||
TargetsURL types.String `tfsdk:"targets_url"`
|
||||
AlertingURL types.String `tfsdk:"alerting_url"`
|
||||
LogsRetentionDays types.Int64 `tfsdk:"logs_retention_days"`
|
||||
TracesRetentionDays types.Int64 `tfsdk:"traces_retention_days"`
|
||||
LogsURL types.String `tfsdk:"logs_url"`
|
||||
LogsPushURL types.String `tfsdk:"logs_push_url"`
|
||||
JaegerTracesURL types.String `tfsdk:"jaeger_traces_url"`
|
||||
|
|
@ -512,6 +514,16 @@ func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, r
|
|||
stringplanmodifier.UseStateForUnknown(),
|
||||
},
|
||||
},
|
||||
"traces_retention_days": schema.Int64Attribute{
|
||||
Description: "Specifies for how many days the traces are kept. Default is set to `7`.",
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"logs_retention_days": schema.Int64Attribute{
|
||||
Description: "Specifies for how many days the logs are kept. Default is set to `7`.",
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
"metrics_retention_days": schema.Int64Attribute{
|
||||
Description: "Specifies for how many days the raw metrics are kept. Default is set to `90`.",
|
||||
Optional: true,
|
||||
|
|
@ -855,10 +867,19 @@ func (r *instanceResource) ModifyPlan(ctx context.Context, req resource.ModifyPl
|
|||
|
||||
// Plan does not support log storage and trace storage
|
||||
if plan.GetLogsStorage() == 0 && plan.GetTracesStorage() == 0 {
|
||||
logsRetentionDays := conversion.Int64ValueToPointer(configModel.LogsRetentionDays)
|
||||
tracesRetentionDays := conversion.Int64ValueToPointer(configModel.TracesRetentionDays)
|
||||
metricsRetentionDays := conversion.Int64ValueToPointer(configModel.MetricsRetentionDays)
|
||||
metricsRetentionDays5mDownsampling := conversion.Int64ValueToPointer(configModel.MetricsRetentionDays5mDownsampling)
|
||||
metricsRetentionDays1hDownsampling := conversion.Int64ValueToPointer(configModel.MetricsRetentionDays1hDownsampling)
|
||||
|
||||
// If logs retention days are set, return an error to the user
|
||||
if logsRetentionDays != nil {
|
||||
resp.Diagnostics.AddAttributeError(path.Root("logs_retention_days"), "Error validating plan", fmt.Sprintf("Plan (%s) does not support configuring logs retention days. Remove this from your config or use a different plan.", *plan.Name))
|
||||
}
|
||||
// If traces retention days are set, return an error to the user
|
||||
if tracesRetentionDays != nil {
|
||||
resp.Diagnostics.AddAttributeError(path.Root("traces_retention_days"), "Error validating plan", fmt.Sprintf("Plan (%s) does not support configuring trace retention days. Remove this from your config or use a different plan.", *plan.Name))
|
||||
}
|
||||
// If any of the metrics retention days are set, return an error to the user
|
||||
if metricsRetentionDays != nil || metricsRetentionDays5mDownsampling != nil || metricsRetentionDays1hDownsampling != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error validating plan", fmt.Sprintf("Plan (%s) does not support configuring metrics retention days. Remove this from your config or use a different plan.", *plan.Name))
|
||||
|
|
@ -974,6 +995,28 @@ func (r *instanceResource) Create(ctx context.Context, req resource.CreateReques
|
|||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.getLogsRetention(ctx, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
diags = setLogsRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.getTracesRetention(ctx, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
diags = setTracesRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Set metric retention days to zero
|
||||
diags = setMetricsRetentionsZero(ctx, &resp.State)
|
||||
|
|
@ -981,6 +1024,18 @@ func (r *instanceResource) Create(ctx context.Context, req resource.CreateReques
|
|||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
// Set logs retention days to zero
|
||||
diags = setLogsRetentionsZero(ctx, &resp.State)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
// Set traces retention days to zero
|
||||
diags = setTracesRetentionsZero(ctx, &resp.State)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// There are plans where no alert matchers and receivers are present e.g. like Observability-Metrics-Endpoint-100k-EU01
|
||||
|
|
@ -1088,6 +1143,42 @@ func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, r
|
|||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
logsRetentionResp, err := r.client.GetLogsConfigs(ctx, instanceId, projectId).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to get logs retention: %v", err))
|
||||
return
|
||||
}
|
||||
// Map response body to schema
|
||||
err = mapLogsRetentionField(logsRetentionResp, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API response for the logs retention: %v", err))
|
||||
return
|
||||
}
|
||||
// Set state to fully populated data
|
||||
diags = setLogsRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
tracesRetentionResp, err := r.client.GetTracesConfigs(ctx, instanceId, projectId).Execute()
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API to get logs retention: %v", err))
|
||||
return
|
||||
}
|
||||
// Map response body to schema
|
||||
err = mapTracesRetentionField(tracesRetentionResp, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API response for the logs retention: %v", err))
|
||||
return
|
||||
}
|
||||
// Set state to fully populated data
|
||||
diags = setTracesRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// There are plans where no alert matchers and receivers are present e.g. like Observability-Metrics-Endpoint-100k-EU01
|
||||
|
|
@ -1239,6 +1330,28 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
|
|||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.getLogsRetention(ctx, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
diags = setLogsRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.getTracesRetention(ctx, &model)
|
||||
if err != nil {
|
||||
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
diags = setTracesRetentions(ctx, &resp.State, &model)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Set metric retention days to zero
|
||||
diags = setMetricsRetentionsZero(ctx, &resp.State)
|
||||
|
|
@ -1246,6 +1359,18 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques
|
|||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
diags = setLogsRetentionsZero(ctx, &resp.State)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
|
||||
diags = setTracesRetentionsZero(ctx, &resp.State)
|
||||
resp.Diagnostics.Append(diags...)
|
||||
if resp.Diagnostics.HasError() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// There are plans where no alert matchers and receivers are present e.g. like Observability-Metrics-Endpoint-100k-EU01
|
||||
|
|
@ -1396,6 +1521,56 @@ func mapACLField(aclList *observability.ListACLResponse, model *Model) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func mapLogsRetentionField(r *observability.LogsConfigResponse, model *Model) error {
|
||||
if r == nil {
|
||||
return fmt.Errorf("response input is nil")
|
||||
}
|
||||
if model == nil {
|
||||
return fmt.Errorf("model input is nil")
|
||||
}
|
||||
|
||||
if r.Config == nil {
|
||||
return fmt.Errorf("logs retention config is nil")
|
||||
}
|
||||
|
||||
if r.Config.Retention == nil {
|
||||
return fmt.Errorf("logs retention days is nil")
|
||||
}
|
||||
|
||||
stripedLogsRetentionHours := strings.TrimSuffix(*r.Config.Retention, "h")
|
||||
logsRetentionHours, err := strconv.ParseInt(stripedLogsRetentionHours, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing logs retention hours: %w", err)
|
||||
}
|
||||
model.LogsRetentionDays = types.Int64Value(logsRetentionHours / 24)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapTracesRetentionField(r *observability.TracesConfigResponse, model *Model) error {
|
||||
if r == nil {
|
||||
return fmt.Errorf("response input is nil")
|
||||
}
|
||||
if model == nil {
|
||||
return fmt.Errorf("model input is nil")
|
||||
}
|
||||
|
||||
if r.Config == nil {
|
||||
return fmt.Errorf("traces retention config is nil")
|
||||
}
|
||||
|
||||
if r.Config.Retention == nil {
|
||||
return fmt.Errorf("traces retention days is nil")
|
||||
}
|
||||
|
||||
stripedTracesRetentionHours := strings.TrimSuffix(*r.Config.Retention, "h")
|
||||
tracesRetentionHours, err := strconv.ParseInt(stripedTracesRetentionHours, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing traces retention hours: %w", err)
|
||||
}
|
||||
model.TracesRetentionDays = types.Int64Value(tracesRetentionHours / 24)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mapMetricsRetentionField(r *observability.GetMetricsStorageRetentionResponse, model *Model) error {
|
||||
if r == nil {
|
||||
return fmt.Errorf("response input is nil")
|
||||
|
|
@ -2266,6 +2441,74 @@ func (r *instanceResource) getAlertConfigs(ctx context.Context, alertConfig *ale
|
|||
return nil
|
||||
}
|
||||
|
||||
func (r *instanceResource) getTracesRetention(ctx context.Context, model *Model) error {
|
||||
tracesRetentionDays := conversion.Int64ValueToPointer(model.TracesRetentionDays)
|
||||
projectId := model.ProjectId.ValueString()
|
||||
instanceId := model.InstanceId.ValueString()
|
||||
|
||||
if tracesRetentionDays != nil {
|
||||
tracesResp, err := r.client.GetTracesConfigs(ctx, instanceId, projectId).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Getting traces retention policy: %w", err)
|
||||
}
|
||||
if tracesResp == nil {
|
||||
return fmt.Errorf("nil response")
|
||||
}
|
||||
|
||||
retentionDays := fmt.Sprintf("%dh", *tracesRetentionDays*24)
|
||||
_, err = r.client.UpdateTracesConfigs(ctx, instanceId, projectId).UpdateTracesConfigsPayload(observability.UpdateTracesConfigsPayload{Retention: &retentionDays}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Setting traces retention policy: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
tracesResp, err := r.client.GetTracesConfigsExecute(ctx, instanceId, projectId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Getting traces retention policy: %w", err)
|
||||
}
|
||||
|
||||
err = mapTracesRetentionField(tracesResp, model)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Processing API response for the traces retention %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *instanceResource) getLogsRetention(ctx context.Context, model *Model) error {
|
||||
logsRetentionDays := conversion.Int64ValueToPointer(model.LogsRetentionDays)
|
||||
projectId := model.ProjectId.ValueString()
|
||||
instanceId := model.InstanceId.ValueString()
|
||||
|
||||
if logsRetentionDays != nil {
|
||||
logsResp, err := r.client.GetLogsConfigs(ctx, instanceId, projectId).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Getting logs retention policy: %w", err)
|
||||
}
|
||||
if logsResp == nil {
|
||||
return fmt.Errorf("nil response")
|
||||
}
|
||||
|
||||
retentionDays := fmt.Sprintf("%dh", *logsRetentionDays*24)
|
||||
_, err = r.client.UpdateLogsConfigs(ctx, instanceId, projectId).UpdateLogsConfigsPayload(observability.UpdateLogsConfigsPayload{Retention: &retentionDays}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Setting logs retention policy: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
logsResp, err := r.client.GetLogsConfigsExecute(ctx, instanceId, projectId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Getting logs retention policy: %w", err)
|
||||
}
|
||||
|
||||
err = mapLogsRetentionField(logsResp, model)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Processing API response for the logs retention %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *instanceResource) getMetricsRetention(ctx context.Context, model *Model) error {
|
||||
metricsRetentionDays := conversion.Int64ValueToPointer(model.MetricsRetentionDays)
|
||||
metricsRetentionDays5mDownsampling := conversion.Int64ValueToPointer(model.MetricsRetentionDays5mDownsampling)
|
||||
|
|
@ -2325,6 +2568,26 @@ func setMetricsRetentions(ctx context.Context, state *tfsdk.State, model *Model)
|
|||
return diags
|
||||
}
|
||||
|
||||
func setTracesRetentionsZero(ctx context.Context, state *tfsdk.State) (diags diag.Diagnostics) {
|
||||
diags = append(diags, state.SetAttribute(ctx, path.Root("traces_retention_days"), 0)...)
|
||||
return diags
|
||||
}
|
||||
|
||||
func setTracesRetentions(ctx context.Context, state *tfsdk.State, model *Model) (diags diag.Diagnostics) {
|
||||
diags = append(diags, state.SetAttribute(ctx, path.Root("traces_retention_days"), model.TracesRetentionDays)...)
|
||||
return diags
|
||||
}
|
||||
|
||||
func setLogsRetentionsZero(ctx context.Context, state *tfsdk.State) (diags diag.Diagnostics) {
|
||||
diags = append(diags, state.SetAttribute(ctx, path.Root("logs_retention_days"), 0)...)
|
||||
return diags
|
||||
}
|
||||
|
||||
func setLogsRetentions(ctx context.Context, state *tfsdk.State, model *Model) (diags diag.Diagnostics) {
|
||||
diags = append(diags, state.SetAttribute(ctx, path.Root("logs_retention_days"), model.LogsRetentionDays)...)
|
||||
return diags
|
||||
}
|
||||
|
||||
func setAlertConfig(ctx context.Context, state *tfsdk.State, model *Model) diag.Diagnostics {
|
||||
return state.SetAttribute(ctx, path.Root("alert_config"), model.AlertConfig)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -365,6 +365,8 @@ func TestMapFields(t *testing.T) {
|
|||
instanceResp *observability.GetInstanceResponse
|
||||
listACLResp *observability.ListACLResponse
|
||||
getMetricsRetentionResp *observability.GetMetricsStorageRetentionResponse
|
||||
getLogsRetentionResp *observability.LogsConfigResponse
|
||||
getTracesRetentionResp *observability.TracesConfigResponse
|
||||
expected Model
|
||||
isValid bool
|
||||
}{
|
||||
|
|
@ -379,6 +381,8 @@ func TestMapFields(t *testing.T) {
|
|||
MetricsRetentionTime1h: utils.Ptr("30d"),
|
||||
MetricsRetentionTime5m: utils.Ptr("7d"),
|
||||
},
|
||||
&observability.LogsConfigResponse{Config: &observability.LogsConfig{Retention: utils.Ptr("168h")}},
|
||||
&observability.TracesConfigResponse{Config: &observability.TraceConfig{Retention: utils.Ptr("168h")}},
|
||||
Model{
|
||||
Id: types.StringValue("pid,iid"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
|
|
@ -388,6 +392,8 @@ func TestMapFields(t *testing.T) {
|
|||
Name: types.StringNull(),
|
||||
Parameters: types.MapNull(types.StringType),
|
||||
ACL: types.SetNull(types.StringType),
|
||||
TracesRetentionDays: types.Int64Value(7),
|
||||
LogsRetentionDays: types.Int64Value(7),
|
||||
MetricsRetentionDays: types.Int64Value(60),
|
||||
MetricsRetentionDays1hDownsampling: types.Int64Value(30),
|
||||
MetricsRetentionDays5mDownsampling: types.Int64Value(7),
|
||||
|
|
@ -419,6 +425,8 @@ func TestMapFields(t *testing.T) {
|
|||
MetricsRetentionTime1h: utils.Ptr("30d"),
|
||||
MetricsRetentionTime5m: utils.Ptr("7d"),
|
||||
},
|
||||
&observability.LogsConfigResponse{Config: &observability.LogsConfig{Retention: utils.Ptr("168h")}},
|
||||
&observability.TracesConfigResponse{Config: &observability.TraceConfig{Retention: utils.Ptr("168h")}},
|
||||
Model{
|
||||
Id: types.StringValue("pid,iid"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
|
|
@ -430,6 +438,8 @@ func TestMapFields(t *testing.T) {
|
|||
ACL: types.SetValueMust(types.StringType, []attr.Value{
|
||||
types.StringValue("1.1.1.1/32"),
|
||||
}),
|
||||
TracesRetentionDays: types.Int64Value(7),
|
||||
LogsRetentionDays: types.Int64Value(7),
|
||||
MetricsRetentionDays: types.Int64Value(60),
|
||||
MetricsRetentionDays1hDownsampling: types.Int64Value(30),
|
||||
MetricsRetentionDays5mDownsampling: types.Int64Value(7),
|
||||
|
|
@ -457,6 +467,8 @@ func TestMapFields(t *testing.T) {
|
|||
MetricsRetentionTime1h: utils.Ptr("30d"),
|
||||
MetricsRetentionTime5m: utils.Ptr("7d"),
|
||||
},
|
||||
&observability.LogsConfigResponse{Config: &observability.LogsConfig{Retention: utils.Ptr("168h")}},
|
||||
&observability.TracesConfigResponse{Config: &observability.TraceConfig{Retention: utils.Ptr("168h")}},
|
||||
Model{
|
||||
Id: types.StringValue("pid,iid"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
|
|
@ -469,6 +481,8 @@ func TestMapFields(t *testing.T) {
|
|||
types.StringValue("1.1.1.1/32"),
|
||||
types.StringValue("8.8.8.8/32"),
|
||||
}),
|
||||
TracesRetentionDays: types.Int64Value(7),
|
||||
LogsRetentionDays: types.Int64Value(7),
|
||||
MetricsRetentionDays: types.Int64Value(60),
|
||||
MetricsRetentionDays1hDownsampling: types.Int64Value(30),
|
||||
MetricsRetentionDays5mDownsampling: types.Int64Value(7),
|
||||
|
|
@ -490,6 +504,8 @@ func TestMapFields(t *testing.T) {
|
|||
MetricsRetentionTime1h: utils.Ptr("30d"),
|
||||
MetricsRetentionTime5m: utils.Ptr("7d"),
|
||||
},
|
||||
&observability.LogsConfigResponse{Config: &observability.LogsConfig{Retention: utils.Ptr("168h")}},
|
||||
&observability.TracesConfigResponse{Config: &observability.TraceConfig{Retention: utils.Ptr("168h")}},
|
||||
Model{
|
||||
Id: types.StringValue("pid,iid"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
|
|
@ -499,6 +515,8 @@ func TestMapFields(t *testing.T) {
|
|||
Name: types.StringNull(),
|
||||
Parameters: types.MapNull(types.StringType),
|
||||
ACL: types.SetNull(types.StringType),
|
||||
TracesRetentionDays: types.Int64Value(7),
|
||||
LogsRetentionDays: types.Int64Value(7),
|
||||
MetricsRetentionDays: types.Int64Value(60),
|
||||
MetricsRetentionDays1hDownsampling: types.Int64Value(30),
|
||||
MetricsRetentionDays5mDownsampling: types.Int64Value(7),
|
||||
|
|
@ -510,6 +528,8 @@ func TestMapFields(t *testing.T) {
|
|||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
Model{},
|
||||
false,
|
||||
},
|
||||
|
|
@ -518,6 +538,8 @@ func TestMapFields(t *testing.T) {
|
|||
&observability.GetInstanceResponse{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
Model{},
|
||||
false,
|
||||
},
|
||||
|
|
@ -532,6 +554,8 @@ func TestMapFields(t *testing.T) {
|
|||
Message: nil,
|
||||
},
|
||||
&observability.GetMetricsStorageRetentionResponse{},
|
||||
&observability.LogsConfigResponse{},
|
||||
&observability.TracesConfigResponse{},
|
||||
Model{},
|
||||
false,
|
||||
},
|
||||
|
|
@ -546,6 +570,8 @@ func TestMapFields(t *testing.T) {
|
|||
Message: nil,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
Model{},
|
||||
false,
|
||||
},
|
||||
|
|
@ -574,6 +600,8 @@ func TestMapFields(t *testing.T) {
|
|||
MetricsRetentionTime1h: utils.Ptr("30d"),
|
||||
MetricsRetentionTime5m: utils.Ptr("7d"),
|
||||
},
|
||||
&observability.LogsConfigResponse{Config: &observability.LogsConfig{Retention: utils.Ptr("480h")}},
|
||||
&observability.TracesConfigResponse{Config: &observability.TraceConfig{Retention: utils.Ptr("720h")}},
|
||||
Model{
|
||||
Id: types.StringValue("pid,iid"),
|
||||
ProjectId: types.StringValue("pid"),
|
||||
|
|
@ -585,6 +613,8 @@ func TestMapFields(t *testing.T) {
|
|||
ACL: types.SetValueMust(types.StringType, []attr.Value{
|
||||
types.StringValue("1.1.1.1/32"),
|
||||
}),
|
||||
LogsRetentionDays: types.Int64Value(20),
|
||||
TracesRetentionDays: types.Int64Value(30),
|
||||
MetricsRetentionDays: types.Int64Value(60),
|
||||
MetricsRetentionDays1hDownsampling: types.Int64Value(30),
|
||||
MetricsRetentionDays5mDownsampling: types.Int64Value(7),
|
||||
|
|
@ -601,10 +631,12 @@ func TestMapFields(t *testing.T) {
|
|||
err := mapFields(context.Background(), tt.instanceResp, state)
|
||||
aclErr := mapACLField(tt.listACLResp, state)
|
||||
metricsErr := mapMetricsRetentionField(tt.getMetricsRetentionResp, state)
|
||||
if !tt.isValid && err == nil && aclErr == nil && metricsErr == nil {
|
||||
logsErr := mapLogsRetentionField(tt.getLogsRetentionResp, state)
|
||||
tracesErr := mapTracesRetentionField(tt.getTracesRetentionResp, state)
|
||||
if !tt.isValid && err == nil && aclErr == nil && metricsErr == nil && logsErr == nil && tracesErr == nil {
|
||||
t.Fatalf("Should have failed")
|
||||
}
|
||||
if tt.isValid && (err != nil || aclErr != nil || metricsErr != nil) {
|
||||
if tt.isValid && (err != nil || aclErr != nil || metricsErr != nil || logsErr != nil || tracesErr != nil) {
|
||||
t.Fatalf("Should not have failed: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -69,6 +69,8 @@ var testConfigVarsMax = config.Variables{
|
|||
"alert_annotation": config.StringVariable("annotation1"),
|
||||
"alert_interval": config.StringVariable("5h"),
|
||||
// max instance
|
||||
"logs_retention_days": config.StringVariable("30"),
|
||||
"traces_retention_days": config.StringVariable("30"),
|
||||
"metrics_retention_days": config.StringVariable("90"),
|
||||
"metrics_retention_days_5m_downsampling": config.StringVariable("90"),
|
||||
"metrics_retention_days_1h_downsampling": config.StringVariable("90"),
|
||||
|
|
@ -501,6 +503,8 @@ func TestAccResourceMax(t *testing.T) {
|
|||
resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "otlp_traces_url"),
|
||||
resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "zipkin_spans_url"),
|
||||
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "logs_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["logs_retention_days"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "traces_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["traces_retention_days"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_5m_downsampling", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days_5m_downsampling"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_1h_downsampling", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days_1h_downsampling"])),
|
||||
|
|
@ -669,6 +673,8 @@ func TestAccResourceMax(t *testing.T) {
|
|||
resource.TestCheckResourceAttrSet("data.stackit_observability_instance.instance", "otlp_traces_url"),
|
||||
resource.TestCheckResourceAttrSet("data.stackit_observability_instance.instance", "zipkin_spans_url"),
|
||||
|
||||
resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "logs_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["logs_retention_days"])),
|
||||
resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "traces_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["traces_retention_days"])),
|
||||
resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "metrics_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days"])),
|
||||
resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "metrics_retention_days_5m_downsampling", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days_5m_downsampling"])),
|
||||
resource.TestCheckResourceAttr("data.stackit_observability_instance.instance", "metrics_retention_days_1h_downsampling", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days_1h_downsampling"])),
|
||||
|
|
@ -895,6 +901,8 @@ func TestAccResourceMax(t *testing.T) {
|
|||
resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "otlp_traces_url"),
|
||||
resource.TestCheckResourceAttrSet("stackit_observability_instance.instance", "zipkin_spans_url"),
|
||||
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "logs_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["logs_retention_days"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "traces_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["traces_retention_days"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_5m_downsampling", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days_5m_downsampling"])),
|
||||
resource.TestCheckResourceAttr("stackit_observability_instance.instance", "metrics_retention_days_1h_downsampling", testutil.ConvertConfigVariable(testConfigVarsMax["metrics_retention_days_1h_downsampling"])),
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ variable "alert_interval" {}
|
|||
|
||||
variable "instance_name" {}
|
||||
variable "plan_name" {}
|
||||
variable "logs_retention_days" {}
|
||||
variable "traces_retention_days" {}
|
||||
variable "metrics_retention_days" {}
|
||||
variable "metrics_retention_days_5m_downsampling" {}
|
||||
variable "metrics_retention_days_1h_downsampling" {}
|
||||
|
|
@ -102,6 +104,8 @@ resource "stackit_observability_instance" "instance" {
|
|||
name = var.instance_name
|
||||
plan_name = var.plan_name
|
||||
|
||||
logs_retention_days = var.logs_retention_days
|
||||
traces_retention_days = var.traces_retention_days
|
||||
metrics_retention_days = var.metrics_retention_days
|
||||
metrics_retention_days_5m_downsampling = var.metrics_retention_days_5m_downsampling
|
||||
metrics_retention_days_1h_downsampling = var.metrics_retention_days_1h_downsampling
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue