Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add support for custom and auto agent size #272

Merged
merged 18 commits into from
Dec 17, 2024
Merged
25 changes: 24 additions & 1 deletion akp/apis/v1alpha1/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,33 @@ type ClusterSpec struct {
Data ClusterData `json:"data,omitempty"`
}

type Resources struct {
Mem string `json:"mem,omitempty"`
Cpu string `json:"cpu,omitempty"`
}

type ManagedClusterConfig struct {
SecretName string `json:"secretName,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
}

type AutoScalerConfig struct {
ApplicationController *AppControllerAutoScalingConfig `json:"applicationController,omitempty"`
RepoServer *RepoServerAutoScalingConfig `json:"repoServer,omitempty"`
}

type AppControllerAutoScalingConfig struct {
ResourceMinimum *Resources `json:"resourceMinimum,omitempty"`
ResourceMaximum *Resources `json:"resourceMaximum,omitempty"`
}

type RepoServerAutoScalingConfig struct {
ResourceMinimum *Resources `json:"resourceMinimum,omitempty"`
ResourceMaximum *Resources `json:"resourceMaximum,omitempty"`
ReplicaMaximum int32 `json:"replicaMaximum,omitempty"`
ReplicaMinimum int32 `json:"replicaMinimum,omitempty"`
}

type ClusterData struct {
Size ClusterSize `json:"size,omitempty"`
AutoUpgradeDisabled *bool `json:"autoUpgradeDisabled,omitempty"`
Expand All @@ -42,5 +64,6 @@ type ClusterData struct {
EksAddonEnabled *bool `json:"eksAddonEnabled,omitempty"`
ManagedClusterConfig *ManagedClusterConfig `json:"managedClusterConfig,omitempty"`

MultiClusterK8SDashboardEnabled *bool `json:"multiClusterK8sDashboardEnabled,omitempty"`
MultiClusterK8SDashboardEnabled *bool `json:"multiClusterK8sDashboardEnabled,omitempty"`
AutoscalerConfig *AutoScalerConfig `json:"autoscalerConfig,omitempty"`
}
2 changes: 1 addition & 1 deletion akp/data_source_akp_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (d *AkpClusterDataSource) Read(ctx context.Context, req datasource.ReadRequ
return
}
ctx = httpctx.SetAuthorizationHeader(ctx, d.akpCli.Cred.Scheme(), d.akpCli.Cred.Credential())
refreshClusterState(ctx, &resp.Diagnostics, d.akpCli.Cli, &data, d.akpCli.OrgId, &resp.State)
refreshClusterState(ctx, &resp.Diagnostics, d.akpCli.Cli, &data, d.akpCli.OrgId, &resp.State, &data)
// Save data into Terraform state
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
123 changes: 122 additions & 1 deletion akp/data_source_akp_cluster_schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func getClusterSpecDataSourceAttributes() map[string]schema.Attribute {
func getClusterDataDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"size": schema.StringAttribute{
MarkdownDescription: "Cluster Size. One of `small`, `medium` or `large`",
MarkdownDescription: "Cluster Size. One of `small`, `medium`, `large`, `custom` or `auto`",
Computed: true,
},
"auto_upgrade_disabled": schema.BoolAttribute{
Expand Down Expand Up @@ -121,6 +121,16 @@ func getClusterDataDataSourceAttributes() map[string]schema.Attribute {
MarkdownDescription: "Enable the KubeVision feature on the managed cluster",
Computed: true,
},
"auto_agent_size_config": schema.SingleNestedAttribute{
MarkdownDescription: "Autoscaler config for auto agent size",
Computed: true,
Attributes: getAutoScalerConfigDataSourceAttributes(),
},
"custom_agent_size_config": schema.SingleNestedAttribute{
MarkdownDescription: "Custom agent size config",
Computed: true,
Attributes: getCustomAgentSizeConfigDataSourceAttributes(),
},
}
}

Expand Down Expand Up @@ -201,3 +211,114 @@ func getManagedClusterConfigDataSourceAttributes() map[string]schema.Attribute {
},
}
}

func getAutoScalerConfigDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"application_controller": schema.SingleNestedAttribute{
Description: "Application Controller auto scaling config",
Computed: true,
Attributes: getAppControllerAutoScalingConfigDataSourceAttributes(),
},
"repo_server": schema.SingleNestedAttribute{
Description: "Repo Server auto scaling config",
Computed: true,
Attributes: getRepoServerAutoScalingConfigDataSourceAttributes(),
},
}
}

func getCustomAgentSizeConfigDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"application_controller": schema.SingleNestedAttribute{
Description: "Application Controller custom agent size config",
Computed: true,
Attributes: getAppControllerCustomAgentSizeConfigDataSourceAttributes(),
},
"repo_server": schema.SingleNestedAttribute{
Description: "Repo Server custom agent size config",
Computed: true,
Attributes: getRepoServerCustomAgentSizeConfigDataSourceAttributes(),
},
}
}

func getAppControllerCustomAgentSizeConfigDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"memory": schema.StringAttribute{
Description: "Memory",
Computed: true,
},
"cpu": schema.StringAttribute{
Description: "CPU",
Computed: true,
},
}
}

func getRepoServerCustomAgentSizeConfigDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"memory": schema.StringAttribute{
Description: "Memory",
Computed: true,
},
"cpu": schema.StringAttribute{
Description: "CPU",
Computed: true,
},
"replicas": schema.Int64Attribute{
Description: "Replica",
Computed: true,
},
}
}

func getAppControllerAutoScalingConfigDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"resource_minimum": schema.SingleNestedAttribute{
Description: "Resource minimum",
Computed: true,
Attributes: getResourcesDataSourceAttributes(),
},
"resource_maximum": schema.SingleNestedAttribute{
Description: "Resource maximum",
Computed: true,
Attributes: getResourcesDataSourceAttributes(),
},
}
}

func getRepoServerAutoScalingConfigDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"resource_minimum": schema.SingleNestedAttribute{
Description: "Resource minimum",
Computed: true,
Attributes: getResourcesDataSourceAttributes(),
},
"resource_maximum": schema.SingleNestedAttribute{
Description: "Resource maximum",
Computed: true,
Attributes: getResourcesDataSourceAttributes(),
},
"replicas_maximum": schema.Int64Attribute{
Description: "Replica maximum",
Computed: true,
},
"replicas_minimum": schema.Int64Attribute{
Description: "Replica minimum",
Computed: true,
},
}
}

func getResourcesDataSourceAttributes() map[string]schema.Attribute {
return map[string]schema.Attribute{
"memory": schema.StringAttribute{
Description: "Memory",
Computed: true,
},
"cpu": schema.StringAttribute{
Description: "CPU",
Computed: true,
},
}
}
2 changes: 1 addition & 1 deletion akp/data_source_akp_clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func (d *AkpClustersDataSource) Read(ctx context.Context, req datasource.ReadReq
stateCluster := types.Cluster{
InstanceID: data.InstanceID,
}
stateCluster.Update(ctx, &resp.Diagnostics, cluster)
stateCluster.Update(ctx, &resp.Diagnostics, cluster, nil)
data.Clusters = append(data.Clusters, stateCluster)
}
// Save data into Terraform state
Expand Down
15 changes: 9 additions & 6 deletions akp/resource_akp_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@ package akp
import (
"context"
"fmt"
"github.com/pkg/errors"
"strings"
"time"

"github.com/pkg/errors"

"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
"github.com/hashicorp/terraform-plugin-framework/resource"
Expand Down Expand Up @@ -95,7 +96,7 @@ func (r *AkpClusterResource) Read(ctx context.Context, req resource.ReadRequest,
}

ctx = httpctx.SetAuthorizationHeader(ctx, r.akpCli.Cred.Scheme(), r.akpCli.Cred.Credential())
err := refreshClusterState(ctx, &resp.Diagnostics, r.akpCli.Cli, &data, r.akpCli.OrgId, &resp.State)
err := refreshClusterState(ctx, &resp.Diagnostics, r.akpCli.Cli, &data, r.akpCli.OrgId, &resp.State, &data)
if err != nil {
resp.Diagnostics.AddError("Client Error", err.Error())
} else {
Expand Down Expand Up @@ -186,12 +187,14 @@ func (r *AkpClusterResource) ImportState(ctx context.Context, req resource.Impor
func (r *AkpClusterResource) upsert(ctx context.Context, diagnostics *diag.Diagnostics, plan *types.Cluster, isCreate bool) (*types.Cluster, error) {
ctx = httpctx.SetAuthorizationHeader(ctx, r.akpCli.Cred.Scheme(), r.akpCli.Cred.Credential())
apiReq := buildClusterApplyRequest(ctx, diagnostics, plan, r.akpCli.OrgId)
if diagnostics.HasError() {
return nil, nil
}
result, err := r.applyInstance(ctx, plan, apiReq, isCreate, r.akpCli.Cli.ApplyInstance, r.upsertKubeConfig)
if err != nil {
return result, err
}

return result, refreshClusterState(ctx, diagnostics, r.akpCli.Cli, result, r.akpCli.OrgId, nil)
return result, refreshClusterState(ctx, diagnostics, r.akpCli.Cli, result, r.akpCli.OrgId, nil, plan)
}

func (r *AkpClusterResource) applyInstance(ctx context.Context, plan *types.Cluster, apiReq *argocdv1.ApplyInstanceRequest, isCreate bool, applyInstance func(context.Context, *argocdv1.ApplyInstanceRequest) (*argocdv1.ApplyInstanceResponse, error), upsertKubeConfig func(ctx context.Context, plan *types.Cluster, isCreate bool) error) (*types.Cluster, error) {
Expand Down Expand Up @@ -240,7 +243,7 @@ func (r *AkpClusterResource) upsertKubeConfig(ctx context.Context, plan *types.C
}

func refreshClusterState(ctx context.Context, diagnostics *diag.Diagnostics, client argocdv1.ArgoCDServiceGatewayClient, cluster *types.Cluster,
orgID string, state *tfsdk.State) error {
orgID string, state *tfsdk.State, plan *types.Cluster) error {
clusterReq := &argocdv1.GetInstanceClusterRequest{
OrganizationId: orgID,
InstanceId: cluster.InstanceID.ValueString(),
Expand All @@ -258,7 +261,7 @@ func refreshClusterState(ctx context.Context, diagnostics *diag.Diagnostics, cli
return errors.Wrap(err, "Unable to read Argo CD cluster")
}
tflog.Debug(ctx, fmt.Sprintf("Get cluster response: %s", clusterResp))
cluster.Update(ctx, diagnostics, clusterResp.GetCluster())
cluster.Update(ctx, diagnostics, clusterResp.GetCluster(), plan)
return nil
}

Expand Down
Loading
Loading