chore: rename model serving to AI model serving (#751)

This commit is contained in:
Melvin Klein 2025-03-31 10:59:49 +02:00 committed by GitHub
parent 862db91f84
commit 513808a8a0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 49 additions and 49 deletions

View file

@ -162,7 +162,7 @@ Note: AWS specific checks must be skipped as they do not work on STACKIT. For de
- `loadbalancer_custom_endpoint` (String) Custom endpoint for the Load Balancer service - `loadbalancer_custom_endpoint` (String) Custom endpoint for the Load Balancer service
- `logme_custom_endpoint` (String) Custom endpoint for the LogMe service - `logme_custom_endpoint` (String) Custom endpoint for the LogMe service
- `mariadb_custom_endpoint` (String) Custom endpoint for the MariaDB service - `mariadb_custom_endpoint` (String) Custom endpoint for the MariaDB service
- `modelserving_custom_endpoint` (String) Custom endpoint for the Model Serving service - `modelserving_custom_endpoint` (String) Custom endpoint for the AI Model Serving service
- `mongodbflex_custom_endpoint` (String) Custom endpoint for the MongoDB Flex service - `mongodbflex_custom_endpoint` (String) Custom endpoint for the MongoDB Flex service
- `objectstorage_custom_endpoint` (String) Custom endpoint for the Object Storage service - `objectstorage_custom_endpoint` (String) Custom endpoint for the Object Storage service
- `observability_custom_endpoint` (String) Custom endpoint for the Observability service - `observability_custom_endpoint` (String) Custom endpoint for the Observability service

View file

@ -3,9 +3,9 @@
page_title: "stackit_modelserving_token Resource - stackit" page_title: "stackit_modelserving_token Resource - stackit"
subcategory: "" subcategory: ""
description: |- description: |-
Model Serving Auth Token Resource schema. AI Model Serving Auth Token Resource schema.
Example Usage Example Usage
Automatically rotate model serving token Automatically rotate AI model serving token
resource "time_rotating" "rotate" { resource "time_rotating" "rotate" {
rotation_days = 80 rotation_days = 80
@ -24,11 +24,11 @@ description: |-
# stackit_modelserving_token (Resource) # stackit_modelserving_token (Resource)
Model Serving Auth Token Resource schema. AI Model Serving Auth Token Resource schema.
## Example Usage ## Example Usage
### Automatically rotate model serving token ### Automatically rotate AI model serving token
```terraform ```terraform
resource "time_rotating" "rotate" { resource "time_rotating" "rotate" {
rotation_days = 80 rotation_days = 80
@ -52,20 +52,20 @@ resource "stackit_modelserving_token" "example" {
### Required ### Required
- `name` (String) Name of the model serving auth token. - `name` (String) Name of the AI model serving auth token.
- `project_id` (String) STACKIT project ID to which the model serving auth token is associated. - `project_id` (String) STACKIT project ID to which the AI model serving auth token is associated.
### Optional ### Optional
- `description` (String) The description of the model serving auth token. - `description` (String) The description of the AI model serving auth token.
- `region` (String) Region to which the model serving auth token is associated. If not defined, the provider region is used - `region` (String) Region to which the AI model serving auth token is associated. If not defined, the provider region is used
- `rotate_when_changed` (Map of String) A map of arbitrary key/value pairs that will force recreation of the token when they change, enabling token rotation based on external conditions such as a rotating timestamp. Changing this forces a new resource to be created. - `rotate_when_changed` (Map of String) A map of arbitrary key/value pairs that will force recreation of the token when they change, enabling token rotation based on external conditions such as a rotating timestamp. Changing this forces a new resource to be created.
- `ttl_duration` (String) The TTL duration of the model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s - `ttl_duration` (String) The TTL duration of the AI model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s
### Read-Only ### Read-Only
- `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`region`,`token_id`". - `id` (String) Terraform's internal data source. ID. It is structured as "`project_id`,`region`,`token_id`".
- `state` (String) State of the model serving auth token. - `state` (String) State of the AI model serving auth token.
- `token` (String, Sensitive) Content of the model serving auth token. - `token` (String, Sensitive) Content of the AI model serving auth token.
- `token_id` (String) The model serving auth token ID. - `token_id` (String) The AI model serving auth token ID.
- `valid_until` (String) The time until the model serving auth token is valid. - `valid_until` (String) The time until the AI model serving auth token is valid.

View file

@ -1,8 +1,8 @@
Model Serving Auth Token Resource schema. AI Model Serving Auth Token Resource schema.
## Example Usage ## Example Usage
### Automatically rotate model serving token ### Automatically rotate AI model serving token
```terraform ```terraform
resource "time_rotating" "rotate" { resource "time_rotating" "rotate" {
rotation_days = 80 rotation_days = 80

View file

@ -199,7 +199,7 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
Computed: true, Computed: true,
}, },
"project_id": schema.StringAttribute{ "project_id": schema.StringAttribute{
Description: "STACKIT project ID to which the model serving auth token is associated.", Description: "STACKIT project ID to which the AI model serving auth token is associated.",
Required: true, Required: true,
Validators: []validator.String{ Validators: []validator.String{
validate.UUID(), validate.UUID(),
@ -210,13 +210,13 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
Optional: true, Optional: true,
// must be computed to allow for storing the override value from the provider // must be computed to allow for storing the override value from the provider
Computed: true, Computed: true,
Description: "Region to which the model serving auth token is associated. If not defined, the provider region is used", Description: "Region to which the AI model serving auth token is associated. If not defined, the provider region is used",
PlanModifiers: []planmodifier.String{ PlanModifiers: []planmodifier.String{
stringplanmodifier.RequiresReplace(), stringplanmodifier.RequiresReplace(),
}, },
}, },
"token_id": schema.StringAttribute{ "token_id": schema.StringAttribute{
Description: "The model serving auth token ID.", Description: "The AI model serving auth token ID.",
Computed: true, Computed: true,
Validators: []validator.String{ Validators: []validator.String{
validate.UUID(), validate.UUID(),
@ -224,7 +224,7 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
}, },
}, },
"ttl_duration": schema.StringAttribute{ "ttl_duration": schema.StringAttribute{
Description: "The TTL duration of the model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s", Description: "The TTL duration of the AI model serving auth token. E.g. 5h30m40s,5h,5h30m,30m,30s",
Required: false, Required: false,
Optional: true, Optional: true,
PlanModifiers: []planmodifier.String{ PlanModifiers: []planmodifier.String{
@ -247,7 +247,7 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
}, },
}, },
"description": schema.StringAttribute{ "description": schema.StringAttribute{
Description: "The description of the model serving auth token.", Description: "The description of the AI model serving auth token.",
Required: false, Required: false,
Optional: true, Optional: true,
Validators: []validator.String{ Validators: []validator.String{
@ -255,23 +255,23 @@ func (r *tokenResource) Schema(_ context.Context, _ resource.SchemaRequest, resp
}, },
}, },
"name": schema.StringAttribute{ "name": schema.StringAttribute{
Description: "Name of the model serving auth token.", Description: "Name of the AI model serving auth token.",
Required: true, Required: true,
Validators: []validator.String{ Validators: []validator.String{
stringvalidator.LengthBetween(1, 200), stringvalidator.LengthBetween(1, 200),
}, },
}, },
"state": schema.StringAttribute{ "state": schema.StringAttribute{
Description: "State of the model serving auth token.", Description: "State of the AI model serving auth token.",
Computed: true, Computed: true,
}, },
"token": schema.StringAttribute{ "token": schema.StringAttribute{
Description: "Content of the model serving auth token.", Description: "Content of the AI model serving auth token.",
Computed: true, Computed: true,
Sensitive: true, Sensitive: true,
}, },
"valid_until": schema.StringAttribute{ "valid_until": schema.StringAttribute{
Description: "The time until the model serving auth token is valid.", Description: "The time until the AI model serving auth token is valid.",
Computed: true, Computed: true,
}, },
}, },
@ -300,14 +300,14 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
ctx = tflog.SetField(ctx, "project_id", projectId) ctx = tflog.SetField(ctx, "project_id", projectId)
ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "region", region)
// If model serving is not enabled, enable it // If AI model serving is not enabled, enable it
err := r.serviceEnablementClient.EnableServiceRegional(ctx, region, projectId, utils.ModelServingServiceId). err := r.serviceEnablementClient.EnableServiceRegional(ctx, region, projectId, utils.ModelServingServiceId).
Execute() Execute()
if err != nil { if err != nil {
var oapiErr *oapierror.GenericOpenAPIError var oapiErr *oapierror.GenericOpenAPIError
if errors.As(err, &oapiErr) { if errors.As(err, &oapiErr) {
if oapiErr.StatusCode == http.StatusNotFound { if oapiErr.StatusCode == http.StatusNotFound {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error enabling model serving", core.LogAndAddError(ctx, &resp.Diagnostics, "Error enabling AI model serving",
fmt.Sprintf("Service not available in region %s \n%v", region, err), fmt.Sprintf("Service not available in region %s \n%v", region, err),
) )
return return
@ -316,8 +316,8 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
&resp.Diagnostics, &resp.Diagnostics,
"Error enabling model serving", "Error enabling AI model serving",
fmt.Sprintf("Error enabling model serving: %v", err), fmt.Sprintf("Error enabling AI model serving: %v", err),
) )
return return
} }
@ -328,8 +328,8 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
&resp.Diagnostics, &resp.Diagnostics,
"Error enabling model serving", "Error enabling AI model serving",
fmt.Sprintf("Error enabling model serving: %v", err), fmt.Sprintf("Error enabling AI model serving: %v", err),
) )
return return
} }
@ -337,11 +337,11 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
// Generate API request body from model // Generate API request body from model
payload, err := toCreatePayload(&model) payload, err := toCreatePayload(&model)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Creating API payload: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
return return
} }
// Create new model serving auth token // Create new AI model serving auth token
createTokenResp, err := r.client.CreateToken(ctx, region, projectId). createTokenResp, err := r.client.CreateToken(ctx, region, projectId).
CreateTokenPayload(*payload). CreateTokenPayload(*payload).
Execute() Execute()
@ -349,7 +349,7 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
&resp.Diagnostics, &resp.Diagnostics,
"Error creating model serving auth token", "Error creating AI model serving auth token",
fmt.Sprintf("Calling API: %v", err), fmt.Sprintf("Calling API: %v", err),
) )
return return
@ -357,14 +357,14 @@ func (r *tokenResource) Create(ctx context.Context, req resource.CreateRequest,
waitResp, err := wait.CreateModelServingWaitHandler(ctx, r.client, region, projectId, *createTokenResp.Token.Id).WaitWithContext(ctx) waitResp, err := wait.CreateModelServingWaitHandler(ctx, r.client, region, projectId, *createTokenResp.Token.Id).WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Waiting for token to be active: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Waiting for token to be active: %v", err))
return return
} }
// Map response body to schema // Map response body to schema
err = mapCreateResponse(createTokenResp, waitResp, &model, region) err = mapCreateResponse(createTokenResp, waitResp, &model, region)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating model serving auth token", fmt.Sprintf("Processing API payload: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
return return
} }
@ -413,21 +413,21 @@ func (r *tokenResource) Read(ctx context.Context, req resource.ReadRequest, resp
} }
} }
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading model serving auth token", fmt.Sprintf("Calling API: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", fmt.Sprintf("Calling API: %v", err))
return return
} }
if getTokenResp != nil && getTokenResp.Token.State != nil && if getTokenResp != nil && getTokenResp.Token.State != nil &&
*getTokenResp.Token.State == inactiveState { *getTokenResp.Token.State == inactiveState {
resp.State.RemoveResource(ctx) resp.State.RemoveResource(ctx)
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error reading model serving auth token", "Model serving auth token has expired") core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", "AI model serving auth token has expired")
return return
} }
// Map response body to schema // Map response body to schema
err = mapGetResponse(getTokenResp, &model) err = mapGetResponse(getTokenResp, &model)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading model serving auth token", fmt.Sprintf("Processing API payload: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
return return
} }
@ -476,11 +476,11 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
// Generate API request body from model // Generate API request body from model
payload, err := toUpdatePayload(&model) payload, err := toUpdatePayload(&model)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Creating API payload: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Creating API payload: %v", err))
return return
} }
// Update model serving auth token // Update AI model serving auth token
updateTokenResp, err := r.client.PartialUpdateToken(ctx, region, projectId, tokenId).PartialUpdateTokenPayload(*payload).Execute() updateTokenResp, err := r.client.PartialUpdateToken(ctx, region, projectId, tokenId).PartialUpdateTokenPayload(*payload).Execute()
if err != nil { if err != nil {
var oapiErr *oapierror.GenericOpenAPIError var oapiErr *oapierror.GenericOpenAPIError
@ -495,7 +495,7 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
core.LogAndAddError( core.LogAndAddError(
ctx, ctx,
&resp.Diagnostics, &resp.Diagnostics,
"Error updating model serving auth token", "Error updating AI model serving auth token",
fmt.Sprintf( fmt.Sprintf(
"Calling API: %v, tokenId: %s, region: %s, projectId: %s", "Calling API: %v, tokenId: %s, region: %s, projectId: %s",
err, err,
@ -510,13 +510,13 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
if updateTokenResp != nil && updateTokenResp.Token.State != nil && if updateTokenResp != nil && updateTokenResp.Token.State != nil &&
*updateTokenResp.Token.State == inactiveState { *updateTokenResp.Token.State == inactiveState {
resp.State.RemoveResource(ctx) resp.State.RemoveResource(ctx)
core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error updating model serving auth token", "Model serving auth token has expired") core.LogAndAddWarning(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", "AI model serving auth token has expired")
return return
} }
waitResp, err := wait.UpdateModelServingWaitHandler(ctx, r.client, region, projectId, tokenId).WaitWithContext(ctx) waitResp, err := wait.UpdateModelServingWaitHandler(ctx, r.client, region, projectId, tokenId).WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Waiting for token to be updated: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Waiting for token to be updated: %v", err))
return return
} }
@ -524,7 +524,7 @@ func (r *tokenResource) Update(ctx context.Context, req resource.UpdateRequest,
model.Token = state.Token model.Token = state.Token
err = mapGetResponse(waitResp, &model) err = mapGetResponse(waitResp, &model)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating model serving auth token", fmt.Sprintf("Processing API payload: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating AI model serving auth token", fmt.Sprintf("Processing API payload: %v", err))
return return
} }
@ -561,7 +561,7 @@ func (r *tokenResource) Delete(ctx context.Context, req resource.DeleteRequest,
ctx = tflog.SetField(ctx, "token_id", tokenId) ctx = tflog.SetField(ctx, "token_id", tokenId)
ctx = tflog.SetField(ctx, "region", region) ctx = tflog.SetField(ctx, "region", region)
// Delete existing model serving auth token. We will ignore the state 'deleting' for now. // Delete existing AI model serving auth token. We will ignore the state 'deleting' for now.
_, err := r.client.DeleteToken(ctx, region, projectId, tokenId).Execute() _, err := r.client.DeleteToken(ctx, region, projectId, tokenId).Execute()
if err != nil { if err != nil {
var oapiErr *oapierror.GenericOpenAPIError var oapiErr *oapierror.GenericOpenAPIError
@ -572,14 +572,14 @@ func (r *tokenResource) Delete(ctx context.Context, req resource.DeleteRequest,
} }
} }
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting model serving auth token", fmt.Sprintf("Calling API: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting AI model serving auth token", fmt.Sprintf("Calling API: %v", err))
return return
} }
_, err = wait.DeleteModelServingWaitHandler(ctx, r.client, region, projectId, tokenId). _, err = wait.DeleteModelServingWaitHandler(ctx, r.client, region, projectId, tokenId).
WaitWithContext(ctx) WaitWithContext(ctx)
if err != nil { if err != nil {
core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting model serving auth token", fmt.Sprintf("Waiting for token to be deleted: %v", err)) core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting AI model serving auth token", fmt.Sprintf("Waiting for token to be deleted: %v", err))
return return
} }

View file

@ -158,7 +158,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro
"dns_custom_endpoint": "Custom endpoint for the DNS service", "dns_custom_endpoint": "Custom endpoint for the DNS service",
"iaas_custom_endpoint": "Custom endpoint for the IaaS service", "iaas_custom_endpoint": "Custom endpoint for the IaaS service",
"mongodbflex_custom_endpoint": "Custom endpoint for the MongoDB Flex service", "mongodbflex_custom_endpoint": "Custom endpoint for the MongoDB Flex service",
"modelserving_custom_endpoint": "Custom endpoint for the Model Serving service", "modelserving_custom_endpoint": "Custom endpoint for the AI Model Serving service",
"loadbalancer_custom_endpoint": "Custom endpoint for the Load Balancer service", "loadbalancer_custom_endpoint": "Custom endpoint for the Load Balancer service",
"logme_custom_endpoint": "Custom endpoint for the LogMe service", "logme_custom_endpoint": "Custom endpoint for the LogMe service",
"rabbitmq_custom_endpoint": "Custom endpoint for the RabbitMQ service", "rabbitmq_custom_endpoint": "Custom endpoint for the RabbitMQ service",