Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
Merge pull request #1013 from kinvolk/invidian/cli-cleanup
Browse files Browse the repository at this point in the history
cli/cmd: cleanups part 1
  • Loading branch information
invidian authored Oct 6, 2020
2 parents fc57a6c + 3ab2eca commit c01551a
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 50 deletions.
54 changes: 31 additions & 23 deletions cli/cmd/cluster-apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,59 +51,65 @@ func init() {
pf.BoolVarP(&upgradeKubelets, "upgrade-kubelets", "", false, "Experimentally upgrade self-hosted kubelets")
}

//nolint:funlen
func runClusterApply(cmd *cobra.Command, args []string) {
contextLogger := log.WithFields(log.Fields{
"command": "lokoctl cluster apply",
"args": args,
})

ex, p, lokoConfig, assetDir := initialize(contextLogger)
if err := clusterApply(contextLogger); err != nil {
contextLogger.Fatalf("Applying cluster failed: %v", err)
}
}

exists := clusterExists(contextLogger, ex)
//nolint:funlen
func clusterApply(contextLogger *log.Entry) error {
c := initialize(contextLogger)

exists := clusterExists(contextLogger, &c.terraformExecutor)
if exists && !confirm {
// TODO: We could plan to a file and use it when installing.
if err := ex.Plan(); err != nil {
contextLogger.Fatalf("Failed to reconcile cluster state: %v", err)
if err := c.terraformExecutor.Plan(); err != nil {
return fmt.Errorf("reconciling cluster state: %v", err)
}

if !askForConfirmation("Do you want to proceed with cluster apply?") {
contextLogger.Println("Cluster apply cancelled")

return
return nil
}
}

if err := p.Apply(ex); err != nil {
contextLogger.Fatalf("Error applying cluster: %v", err)
if err := c.platform.Apply(&c.terraformExecutor); err != nil {
return fmt.Errorf("applying platform: %v", err)
}

fmt.Printf("\nYour configurations are stored in %s\n", assetDir)
fmt.Printf("\nYour configurations are stored in %s\n", c.assetDir)

kubeconfig, err := getKubeconfig(contextLogger, lokoConfig, true)
kubeconfig, err := getKubeconfig(contextLogger, c.lokomotiveConfig, true)
if err != nil {
contextLogger.Fatalf("Failed to get kubeconfig: %v", err)
return fmt.Errorf("getting kubeconfig: %v", err)
}

if err := verifyCluster(kubeconfig, p.Meta().ExpectedNodes); err != nil {
contextLogger.Fatalf("Verify cluster: %v", err)
if err := verifyCluster(kubeconfig, c.platform.Meta().ExpectedNodes); err != nil {
return fmt.Errorf("verifying cluster: %v", err)
}

// Update all the pre installed namespaces with lokomotive specific label.
// `lokomotive.kinvolk.io/name: <namespace_name>`.
if err := updateInstalledNamespaces(kubeconfig); err != nil {
contextLogger.Fatalf("Updating installed namespace: %v", err)
return fmt.Errorf("updating installed namespace: %v", err)
}

// Do controlplane upgrades only if cluster already exists and it is not a managed platform.
if exists && !p.Meta().Managed {
if exists && !c.platform.Meta().Managed {
fmt.Printf("\nEnsuring that cluster controlplane is up to date.\n")

cu := controlplaneUpdater{
kubeconfig: kubeconfig,
assetDir: assetDir,
assetDir: c.assetDir,
contextLogger: *contextLogger,
ex: *ex,
ex: c.terraformExecutor,
}

charts := platform.CommonControlPlaneCharts()
Expand All @@ -120,28 +126,30 @@ func runClusterApply(cmd *cobra.Command, args []string) {
}
}

if ph, ok := p.(platform.PlatformWithPostApplyHook); ok {
if ph, ok := c.platform.(platform.PlatformWithPostApplyHook); ok {
if err := ph.PostApplyHook(kubeconfig); err != nil {
contextLogger.Fatalf("Running platform post install hook failed: %v", err)
return fmt.Errorf("running platform post install hook: %v", err)
}
}

if skipComponents {
return
return nil
}

componentsToApply := []string{}
for _, component := range lokoConfig.RootConfig.Components {
for _, component := range c.lokomotiveConfig.RootConfig.Components {
componentsToApply = append(componentsToApply, component.Name)
}

contextLogger.Println("Applying component configuration")

if len(componentsToApply) > 0 {
if err := applyComponents(lokoConfig, kubeconfig, componentsToApply...); err != nil {
contextLogger.Fatalf("Applying component configuration failed: %v", err)
if err := applyComponents(c.lokomotiveConfig, kubeconfig, componentsToApply...); err != nil {
return fmt.Errorf("applying component configuration: %v", err)
}
}

return nil
}

func verifyCluster(kubeconfig []byte, expectedNodes int) error {
Expand Down
23 changes: 17 additions & 6 deletions cli/cmd/cluster-destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
package cmd

import (
"fmt"

log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
Expand All @@ -40,26 +42,35 @@ func runClusterDestroy(cmd *cobra.Command, args []string) {
"args": args,
})

ex, p, _, _ := initialize(contextLogger)
if err := clusterDestroy(contextLogger); err != nil {
contextLogger.Fatalf("Destroying cluster: %v", err)
}
}

func clusterDestroy(contextLogger *log.Entry) error {
c := initialize(contextLogger)

if !clusterExists(contextLogger, ex) {
if !clusterExists(contextLogger, &c.terraformExecutor) {
contextLogger.Println("Cluster already destroyed, nothing to do")

return
return nil
}

if !confirm {
confirmation := askForConfirmation("WARNING: This action cannot be undone. Do you really want to destroy the cluster?")
if !confirmation {
contextLogger.Println("Cluster destroy canceled")
return

return nil
}
}

if err := p.Destroy(ex); err != nil {
contextLogger.Fatalf("Error destroying cluster: %v", err)
if err := c.platform.Destroy(&c.terraformExecutor); err != nil {
return fmt.Errorf("destroying cluster: %v", err)
}

contextLogger.Println("Cluster destroyed successfully")
contextLogger.Println("You can safely remove the assets directory now")

return nil
}
30 changes: 22 additions & 8 deletions cli/cmd/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
"fmt"

"github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
Expand All @@ -41,11 +41,20 @@ func init() {
RootCmd.AddCommand(clusterCmd)
}

// cluster is a temporary helper struct to aggregate objects which are used
// for managing the cluster and components.
type cluster struct {
terraformExecutor terraform.Executor
platform platform.Platform
lokomotiveConfig *config.Config
assetDir string
}

// initialize does common initialization actions between cluster operations
// and returns created objects to the caller for further use.
func initialize(contextLogger *logrus.Entry) (*terraform.Executor, platform.Platform, *config.Config, string) {
func initialize(contextLogger *log.Entry) *cluster {
lokoConfig, diags := getLokoConfig()
if len(diags) > 0 {
if diags.HasErrors() {
contextLogger.Fatal(diags)
}

Expand Down Expand Up @@ -85,12 +94,17 @@ func initialize(contextLogger *logrus.Entry) (*terraform.Executor, platform.Plat

ex := initializeTerraform(contextLogger, p, b)

return ex, p, lokoConfig, assetDir
return &cluster{
terraformExecutor: *ex,
platform: p,
lokomotiveConfig: lokoConfig,
assetDir: assetDir,
}
}

// initializeTerraform initialized Terraform directory using given backend and platform
// and returns configured executor.
func initializeTerraform(contextLogger *logrus.Entry, p platform.Platform, b backend.Backend) *terraform.Executor {
func initializeTerraform(contextLogger *log.Entry, p platform.Platform, b backend.Backend) *terraform.Executor {
assetDir, err := homedir.Expand(p.Meta().AssetDir)
if err != nil {
contextLogger.Fatalf("Error expanding path: %v", err)
Expand Down Expand Up @@ -131,7 +145,7 @@ func initializeTerraform(contextLogger *logrus.Entry, p platform.Platform, b bac
// clusterExists determines if cluster has already been created by getting all
// outputs from the Terraform. If there is any output defined, it means 'terraform apply'
// run at least once.
func clusterExists(contextLogger *logrus.Entry, ex *terraform.Executor) bool {
func clusterExists(contextLogger *log.Entry, ex *terraform.Executor) bool {
o := map[string]interface{}{}

if err := ex.Output("", &o); err != nil {
Expand All @@ -144,7 +158,7 @@ func clusterExists(contextLogger *logrus.Entry, ex *terraform.Executor) bool {
type controlplaneUpdater struct {
kubeconfig []byte
assetDir string
contextLogger logrus.Entry
contextLogger log.Entry
ex terraform.Executor
}

Expand Down Expand Up @@ -176,7 +190,7 @@ func (c controlplaneUpdater) getControlplaneValues(name string) (map[string]inte
}

func (c controlplaneUpdater) upgradeComponent(component, namespace string) {
contextLogger := c.contextLogger.WithFields(logrus.Fields{
contextLogger := c.contextLogger.WithFields(log.Fields{
"action": "controlplane-upgrade",
"component": component,
})
Expand Down
10 changes: 4 additions & 6 deletions cli/cmd/component-apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,14 +62,12 @@ func runApply(cmd *cobra.Command, args []string) {
}

lokoConfig, diags := getLokoConfig()
if len(diags) > 0 {
if diags.HasErrors() {
contextLogger.Fatal(diags)
}

var componentsToApply []string
if len(args) > 0 {
componentsToApply = append(componentsToApply, args...)
} else {
componentsToApply := args
if len(componentsToApply) == 0 {
for _, component := range lokoConfig.RootConfig.Components {
componentsToApply = append(componentsToApply, component.Name)
}
Expand Down Expand Up @@ -97,7 +95,7 @@ func applyComponents(lokoConfig *config.Config, kubeconfig []byte, componentName

componentConfigBody := lokoConfig.LoadComponentConfigBody(componentName)

if diags := component.LoadConfig(componentConfigBody, lokoConfig.EvalContext); len(diags) > 0 {
if diags := component.LoadConfig(componentConfigBody, lokoConfig.EvalContext); diags.HasErrors() {
fmt.Printf("%v\n", diags)
return diags
}
Expand Down
2 changes: 1 addition & 1 deletion cli/cmd/component-delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func runDelete(cmd *cobra.Command, args []string) {
}

lokoConfig, diags := getLokoConfig()
if len(diags) > 0 {
if diags.HasErrors() {
contextLogger.Fatal(diags)
}

Expand Down
12 changes: 6 additions & 6 deletions cli/cmd/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (

"github.com/hashicorp/hcl/v2"
"github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"

"github.com/kinvolk/lokomotive/pkg/backend"
Expand Down Expand Up @@ -86,7 +86,7 @@ func getConfiguredPlatform(lokoConfig *config.Config, require bool) (platform.Pl
// getKubeconfig finds the right kubeconfig file to use for an action and returns it's content.
//
// If platform is required and user do not have it configured, an error is returned.
func getKubeconfig(contextLogger *logrus.Entry, lokoConfig *config.Config, platformRequired bool) ([]byte, error) {
func getKubeconfig(contextLogger *log.Entry, lokoConfig *config.Config, platformRequired bool) ([]byte, error) {
sources, err := getKubeconfigSource(contextLogger, lokoConfig, platformRequired)
if err != nil {
return nil, fmt.Errorf("selecting kubeconfig source: %w", err)
Expand Down Expand Up @@ -126,7 +126,7 @@ func getKubeconfig(contextLogger *logrus.Entry, lokoConfig *config.Config, platf
//
// - kubeconfig from ~/.kube/config file.
//
func getKubeconfigSource(contextLogger *logrus.Entry, lokoConfig *config.Config, platformRequired bool) ([]string, error) { //nolint:lll
func getKubeconfigSource(contextLogger *log.Entry, lokoConfig *config.Config, platformRequired bool) ([]string, error) { //nolint:lll
// Always try reading platform configuration.
p, diags := getConfiguredPlatform(lokoConfig, platformRequired)
if diags.HasErrors() {
Expand Down Expand Up @@ -183,15 +183,15 @@ func getLokoConfig() (*config.Config, hcl.Diagnostics) {

// readKubeconfigFromTerraformState initializes Terraform and
// reads content of cluster kubeconfig file from the Terraform.
func readKubeconfigFromTerraformState(contextLogger *logrus.Entry) ([]byte, error) {
func readKubeconfigFromTerraformState(contextLogger *log.Entry) ([]byte, error) {
contextLogger.Warn("Kubeconfig file not found in assets directory, pulling kubeconfig from " +
"Terraform state, this might be slow. Run 'lokoctl cluster apply' to fix it.")

ex, _, _, _ := initialize(contextLogger) //nolint:dogsled
c := initialize(contextLogger)

kubeconfig := ""

if err := ex.Output(kubeconfigTerraformOutputKey, &kubeconfig); err != nil {
if err := c.terraformExecutor.Output(kubeconfigTerraformOutputKey, &kubeconfig); err != nil {
return nil, fmt.Errorf("reading kubeconfig file content from Terraform state: %w", err)
}

Expand Down

0 comments on commit c01551a

Please sign in to comment.