chore: rename model serving to AI model serving (#751)

This commit is contained in:
Melvin Klein 2025-03-31 10:59:49 +02:00 committed by GitHub
parent 862db91f84
commit 513808a8a0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 49 additions and 49 deletions

View file

@ -162,7 +162,7 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de
- `loadbalancer_custom_endpoint` (String) Custom endpoint for the Load Balancer service
- `logme_custom_endpoint` (String) Custom endpoint for the LogMe service
- `mariadb_custom_endpoint` (String) Custom endpoint for the MariaDB service
- `modelserving_custom_endpoint` (String) Custom endpoint for the Model Serving service
- `modelserving_custom_endpoint` (String) Custom endpoint for the AI Model Serving service
- `mongodbflex_custom_endpoint` (String) Custom endpoint for the MongoDB Flex service
- `objectstorage_custom_endpoint` (String) Custom endpoint for the Object Storage service
- `observability_custom_endpoint` (String) Custom endpoint for the Observability service

View file

@ -3,9 +3,9 @@
page_title: "stackit_modelserving_token Resource - stackit"
subcategory: ""
description: |-
Model Serving Auth Token Resource schema.
AI Model Serving Auth Token Resource schema.
Example Usage
Automatically rotate model serving token
Automatically rotate AI model serving token
resource "time_rotating" "rotate" {
rotation_days = 80
@ -24,11 +24,11 @@ description: |-
# stackit_modelserving_token (Resource)
Model Serving Auth Token Resource schema.
AI Model Serving Auth Token Resource schema.
## Example Usage
### Automatically rotate model serving token
### Automatically rotate AI model serving token
```terraform
resource "time_rotating" "rotate" {
rotation_days = 80
@ -52,20 +52,20 @@ resource "stackit_modelserving_token" "example" {
### Required
- `name` (String) Name of the model serving auth token.
- `project_id` (String) STACKIT project ID to which the model serving auth token is associated.
- `name` (String) Name of the AI model serving auth token.
- `project_id` (String) STACKIT project ID to which the AI model serving auth token is associated.
### Optional
- `description` (String) The description of the model serving auth token.
- `region` (String) Region to which the model serving auth token is associated. If not defined, the provider region is used
- `description` (String) The description of the AI model serving auth token.
- `region` (String) Region to which the AI model serving auth token is associated. If not defined, the provider region is used
- `rotate_when_changed` (Map of String) A map of arbitrary key/value pairs that will force recreation of the token when they change, enabling token rotation based on external conditions such as a rotating timestamp. Changing this forces a new resource to be created.
- `ttl_duration` (String) The TTL duration of the model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s
- `ttl_duration` (String) The TTL duration of the AI model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s
### Read-Only
- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`region`,`token_id`".
- `state` (String) State of the model serving auth token.
- `token` (String, Sensitive) Content of the model serving auth token.
- `token_id` (String) The model serving auth token ID.
- `valid_until` (String) The time until the model serving auth token is valid.
- `state` (String) State of the AI model serving auth token.
- `token` (String, Sensitive) Content of the AI model serving auth token.
- `token_id` (String) The AI model serving auth token ID.
- `valid_until` (String) The time until the AI model serving auth token is valid.

View file

@ -1,8 +1,8 @@
Model Serving Auth Token Resource schema.
AI Model Serving Auth Token Resource schema.
## Example Usage
### Automatically rotate model serving token
### Automatically rotate AI model serving token
```terraform
resource "time_rotating" "rotate" {
rotation_days = 80

View file

@ -199,7 +199,7 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
Computed: true,
},
"project_id": schema.StringAttribute{
Description: "STACKIT project ID to which the model serving auth token is associated.",
Description: "STACKIT project ID to which the AI model serving auth token is associated.",
Required: true,
Validators: []validator.String{
validate.UUID(),
@ -210,13 +210,13 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
Optional: true,
// must be computed to allow for storing the override value from the provider
Computed: true,
Description: "Region to which the model serving auth token is associated. If not defined, the provider region is used",
Description: "Region to which the AI model serving auth token is associated. If not defined, the provider region is used",
PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(),
},
},
"token_id": schema.StringAttribute{
Description: "The model serving auth token ID.",
Description: "The AI model serving auth token ID.",
Computed: true,
Validators: []validator.String{
validate.UUID(),
@ -224,7 +224,7 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
},
},
"ttl_duration": schema.StringAttribute{
Description: "The TTL duration of the model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s",
Description: "The TTL duration of the AI model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s",
Required: false,
Optional: true,
PlanModifiers: []planmodifier.String{
@ -247,7 +247,7 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
},
},
"description": schema.StringAttribute{
Description: "The description of the model serving auth token.",
Description: "The description of the AI model serving auth token.",
Required: false,
Optional: true,
Validators: []validator.String{
@ -255,23 +255,23 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
},
},
"name": schema.StringAttribute{
Description: "Name of the model serving auth token.",
Description: "Name of the AI model serving auth token.",
Required: true,
Validators: []validator.String{
stringvalidator.LengthBetween(1, 200),
},
},
"state": schema.StringAttribute{
Description: "State of the model serving auth token.",
Description: "State of the AI model serving auth token.",
Computed: true,
},
"token": schema.StringAttribute{
Description: "Content of the model serving auth token.",
Description: "Content of the AI model serving auth token.",
Computed: true,
Sensitive: true,
},
"valid_until": schema.StringAttribute{
Description: "The time until the model serving auth token is valid.",
Description: "The time until the AI model serving auth token is valid.",
Computed: true,
},
},
@ -300,14 +300,14 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "region", region)
// If model serving is not enabled, enable it
// If AI model serving is not enabled, enable it
err := r.serviceEnablementClient.EnableServiceRegional(ctx, region, projectId, utils.ModelServingServiceId).
Execute()
if err != nil {
var oapiErr *oapierror.GenericOpenAPIError
if errors.As(err, &oapiErr) {
if oapiErr.StatusCode == http.StatusNotFound {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error enabling model serving",
core.LogAndAddError(ctx, &resp.Diagnostics, "Error enabling AI model serving",
fmt.Sprintf("Service not available in region %s \n%v", region, err),
)
return
@ -316,8 +316,8 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
core.LogAndAddError(
ctx,
&resp.Diagnostics,
"Error enabling model serving",
fmt.Sprintf("Error enabling model serving: %v", err),
"Error enabling AI model serving",
fmt.Sprintf("Error enabling AI model serving: %v", err),
)
return
}
@ -328,8 +328,8 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
core.LogAndAddError(
ctx,
&resp.Diagnostics,
"Error enabling model serving",
fmt.Sprintf("Error enabling model serving: %v", err),
"Error enabling AI model serving",
fmt.Sprintf("Error enabling AI model serving: %v", err),
)
return
}
@ -337,11 +337,11 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
// Generate API request body from model
payload, err := toCreatePayload(&model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
return
}
// Create new model serving auth token
// Create new AI model serving auth token
createTokenResp, err := r.client.CreateToken(ctx, region, projectId).
CreateTokenPayload(*payload).
Execute()
@ -349,7 +349,7 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
core.LogAndAddError(
ctx,
&resp.Diagnostics,
"Error creating model serving auth token",
"Error creating AI model serving auth token",
fmt.Sprintf("Calling API: %v", err),
)
return
@ -357,14 +357,14 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
waitResp, err := wait.CreateModelServingWaitHandler(ctx, r.client, region, projectId, *createTokenResp.Token.Id).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Waiting for token to be active: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Waiting for token to be active: %v", err))
return
}
// Map response body to schema
err = mapCreateResponse(createTokenResp, waitResp, &model, region)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
return
}
@ -413,21 +413,21 @@ func (r *tokenResource) Read(ctx context.Context, req resource.ReadRequest, resp
}
}
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading model serving auth token", fmt.Sprintf("Calling API: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", fmt.Sprintf("Calling API: %v", err))
return
}
if getTokenResp != nil && getTokenResp.Token.State != nil &&
*getTokenResp.Token.State == inactiveState {
resp.State.RemoveResource(ctx)
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error reading model serving auth token", "Model serving auth token has expired")
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", "AI model serving auth token has expired")
return
}
// Map response body to schema
err = mapGetResponse(getTokenResp, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
return
}
@ -476,11 +476,11 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
// Generate API request body from model
payload, err := toUpdatePayload(&model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
return
}
// Update model serving auth token
// Update AI model serving auth token
updateTokenResp, err := r.client.PartialUpdateToken(ctx, region, projectId, tokenId).PartialUpdateTokenPayload(*payload).Execute()
if err != nil {
var oapiErr *oapierror.GenericOpenAPIError
@ -495,7 +495,7 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
core.LogAndAddError(
ctx,
&resp.Diagnostics,
"Error updating model serving auth token",
"Error updating AI model serving auth token",
fmt.Sprintf(
"Calling API: %v, tokenId: %s, region: %s, projectId: %s",
err,
@ -510,13 +510,13 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
if updateTokenResp != nil && updateTokenResp.Token.State != nil &&
*updateTokenResp.Token.State == inactiveState {
resp.State.RemoveResource(ctx)
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error updating model serving auth token", "Model serving auth token has expired")
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", "AI model serving auth token has expired")
return
}
waitResp, err := wait.UpdateModelServingWaitHandler(ctx, r.client, region, projectId, tokenId).WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Waiting for token to be updated: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Waiting for token to be updated: %v", err))
return
}
@ -524,7 +524,7 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
model.Token = state.Token
err = mapGetResponse(waitResp, &model)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
return
}
@ -561,7 +561,7 @@ func (r *tokenResource) Delete(ctx context.Context, req resource.DeleteRequest,
ctx = tflog.SetField(ctx, "token_id", tokenId)
ctx = tflog.SetField(ctx, "region", region)
// Delete existing model serving auth token. We will ignore the state 'deleting' for now.
// Delete existing AI model serving auth token. We will ignore the state 'deleting' for now.
_, err := r.client.DeleteToken(ctx, region, projectId, tokenId).Execute()
if err != nil {
var oapiErr *oapierror.GenericOpenAPIError
@ -572,14 +572,14 @@ func (r *tokenResource) Delete(ctx context.Context, req resource.DeleteRequest,
}
}
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting model serving auth token", fmt.Sprintf("Calling API: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting AI model serving auth token", fmt.Sprintf("Calling API: %v", err))
return
}
_, err = wait.DeleteModelServingWaitHandler(ctx, r.client, region, projectId, tokenId).
WaitWithContext(ctx)
if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting model serving auth token", fmt.Sprintf("Waiting for token to be deleted: %v", err))
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting AI model serving auth token", fmt.Sprintf("Waiting for token to be deleted: %v", err))
return
}

View file

@ -158,7 +158,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
"dns_custom_endpoint": "Custom endpoint for the DNS service",
"iaas_custom_endpoint": "Custom endpoint for the IaaS service",
"mongodbflex_custom_endpoint": "Custom endpoint for the MongoDB Flex service",
"modelserving_custom_endpoint": "Custom endpoint for the Model Serving service",
"modelserving_custom_endpoint": "Custom endpoint for the AI Model Serving service",
"loadbalancer_custom_endpoint": "Custom endpoint for the Load Balancer service",
"logme_custom_endpoint": "Custom endpoint for the LogMe service",
"rabbitmq_custom_endpoint": "Custom endpoint for the RabbitMQ service",