Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

k8s: add option to wait for pools in the wait for cluster #1193

Merged
merged 7 commits into from
Jul 10, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions cmd/scw/testdata/test-all-usage-k8s-cluster-wait-usage.golden
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ EXAMPLES:
scw k8s cluster wait 11111111-1111-1111-1111-111111111111

ARGS:
cluster-id ID of the cluster.
[region=fr-par] Region to target. If none is passed will use default region from the config
cluster-id ID of the cluster.
[wait-for-pools] Wait for pools to be ready.
[region=fr-par] Region to target. If none is passed will use default region from the config

FLAGS:
-h, --help help for wait
Expand Down
44 changes: 40 additions & 4 deletions internal/namespaces/k8s/v1/custom_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,21 +129,53 @@ func waitForClusterFunc(action int) core.WaitFunc {
}

func k8sClusterWaitCommand() *core.Command {
type customClusterWaitArgs struct {
k8s.WaitForClusterRequest
WaitForPools bool
}
return &core.Command{
Short: `Wait for a cluster to reach a stable state`,
Long: `Wait for server to reach a stable state. This is similar to using --wait flag on other action commands, but without requiring a new action on the server.`,
Namespace: "k8s",
Resource: "cluster",
Verb: "wait",
ArgsType: reflect.TypeOf(k8s.WaitForClusterRequest{}),
ArgsType: reflect.TypeOf(customClusterWaitArgs{}),
Run: func(ctx context.Context, argsI interface{}) (i interface{}, err error) {
args := argsI.(*customClusterWaitArgs)

api := k8s.NewAPI(core.ExtractClient(ctx))
return api.WaitForCluster(&k8s.WaitForClusterRequest{
Region: argsI.(*k8s.WaitForClusterRequest).Region,
ClusterID: argsI.(*k8s.WaitForClusterRequest).ClusterID,
cluster, err := api.WaitForCluster(&k8s.WaitForClusterRequest{
Region: args.Region,
ClusterID: args.ClusterID,
Timeout: scw.TimeDurationPtr(clusterActionTimeout),
RetryInterval: core.DefaultRetryInterval,
})
if err != nil {
return nil, err
}

if args.WaitForPools {
pools, err := api.ListPools(&k8s.ListPoolsRequest{
Region: cluster.Region,
ClusterID: cluster.ID,
}, scw.WithAllPages())
if err != nil {
return cluster, err
}
for _, pool := range pools.Pools {
_, err := api.WaitForPool(&k8s.WaitForPoolRequest{
Region: pool.Region,
PoolID: pool.ID,
Timeout: scw.TimeDurationPtr(poolActionTimeout),
RetryInterval: core.DefaultRetryInterval,
})
if err != nil {
return cluster, err
}
}
}

return cluster, nil
},
ArgSpecs: core.ArgSpecs{
{
Expand All @@ -152,6 +184,10 @@ func k8sClusterWaitCommand() *core.Command {
Required: true,
Positional: true,
},
{
Name: "wait-for-pools",
Short: "Wait for pools to be ready.",
},
core.RegionArgSpec(),
},
Examples: []*core.Example{
Expand Down
15 changes: 14 additions & 1 deletion internal/namespaces/k8s/v1/custom_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
func Test_GetCluster(t *testing.T) {
t.Run("Simple", core.Test(&core.TestConfig{
Commands: GetCommands(),
BeforeFunc: createCluster("Cluster", kapsuleVersion, 1),
BeforeFunc: createCluster("Cluster", kapsuleVersion, 1, "DEV1-M"),
Cmd: "scw k8s cluster get {{ .Cluster.ID }}",
Check: core.TestCheckCombine(
core.TestCheckGolden(),
Expand All @@ -18,3 +18,16 @@ func Test_GetCluster(t *testing.T) {
AfterFunc: deleteCluster("Cluster"),
}))
}

func Test_WaitCluster(t *testing.T) {
t.Run("wait for pools", core.Test(&core.TestConfig{
Commands: GetCommands(),
BeforeFunc: createCluster("Cluster", kapsuleVersion, 1, "GP1-XS"),
Cmd: "scw k8s cluster wait {{ .Cluster.ID }} wait-for-pools=true",
Check: core.TestCheckCombine(
core.TestCheckGolden(),
core.TestCheckExitCode(0),
),
AfterFunc: deleteCluster("Cluster"),
}))
}
4 changes: 2 additions & 2 deletions internal/namespaces/k8s/v1/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ const (

// createCluster creates a basic cluster with "poolSize" dev1-m as nodes, the given version and
// register it in the context Meta at metaKey.
func createCluster(metaKey string, version string, poolSize int) core.BeforeFunc {
func createCluster(metaKey string, version string, poolSize int, nodeType string) core.BeforeFunc {
return core.ExecStoreBeforeCmd(
metaKey,
fmt.Sprintf("scw k8s cluster create name=cli-test version=%s cni=cilium pools.0.node-type=DEV1-M pools.0.size=%d pools.0.name=default", version, poolSize))
fmt.Sprintf("scw k8s cluster create name=cli-test version=%s cni=cilium pools.0.node-type=%s pools.0.size=%d pools.0.name=default", version, nodeType, poolSize))
}

// createClusterAndWaitAndKubeconfig creates a basic cluster with 1 dev1-m as node, the given version and
Expand Down
Loading