Skip to content

Commit

Permalink
convert all controllers to use genericController, logContext -> log
Browse files Browse the repository at this point in the history
Signed-off-by: Steve Kriss <steve@heptio.com>
  • Loading branch information
skriss committed Sep 7, 2018
1 parent 92283a5 commit 94617b3
Show file tree
Hide file tree
Showing 5 changed files with 169 additions and 411 deletions.
212 changes: 66 additions & 146 deletions pkg/controller/backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,11 @@ package controller
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"sync"
"time"

jsonpatch "github.com/evanphx/json-patch"
Expand All @@ -36,9 +34,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"

api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/backup"
Expand All @@ -57,23 +53,20 @@ import (
const backupVersion = 1

type backupController struct {
backupper backup.Backupper
pvProviderExists bool
lister listers.BackupLister
listerSynced cache.InformerSynced
client arkv1client.BackupsGetter
syncHandler func(backupName string) error
queue workqueue.RateLimitingInterface
clock clock.Clock
logger logrus.FieldLogger
logLevel logrus.Level
newPluginManager func(logrus.FieldLogger) plugin.Manager
backupTracker BackupTracker
backupLocationLister listers.BackupStorageLocationLister
backupLocationListerSynced cache.InformerSynced
defaultBackupLocation string
metrics *metrics.ServerMetrics
newBackupStore func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error)
*genericController

backupper backup.Backupper
pvProviderExists bool
lister listers.BackupLister
client arkv1client.BackupsGetter
clock clock.Clock
backupLogLevel logrus.Level
newPluginManager func(logrus.FieldLogger) plugin.Manager
backupTracker BackupTracker
backupLocationLister listers.BackupStorageLocationLister
defaultBackupLocation string
metrics *metrics.ServerMetrics
newBackupStore func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error)
}

func NewBackupController(
Expand All @@ -82,34 +75,35 @@ func NewBackupController(
backupper backup.Backupper,
pvProviderExists bool,
logger logrus.FieldLogger,
logLevel logrus.Level,
backupLogLevel logrus.Level,
newPluginManager func(logrus.FieldLogger) plugin.Manager,
backupTracker BackupTracker,
backupLocationInformer informers.BackupStorageLocationInformer,
defaultBackupLocation string,
metrics *metrics.ServerMetrics,
) Interface {
c := &backupController{
backupper: backupper,
pvProviderExists: pvProviderExists,
lister: backupInformer.Lister(),
listerSynced: backupInformer.Informer().HasSynced,
client: client,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "backup"),
clock: &clock.RealClock{},
logger: logger,
logLevel: logLevel,
newPluginManager: newPluginManager,
backupTracker: backupTracker,
backupLocationLister: backupLocationInformer.Lister(),
backupLocationListerSynced: backupLocationInformer.Informer().HasSynced,
defaultBackupLocation: defaultBackupLocation,
metrics: metrics,
genericController: newGenericController("backup", logger),
backupper: backupper,
pvProviderExists: pvProviderExists,
lister: backupInformer.Lister(),
client: client,
clock: &clock.RealClock{},
backupLogLevel: backupLogLevel,
newPluginManager: newPluginManager,
backupTracker: backupTracker,
backupLocationLister: backupLocationInformer.Lister(),
defaultBackupLocation: defaultBackupLocation,
metrics: metrics,

newBackupStore: persistence.NewObjectBackupStore,
}

c.syncHandler = c.processBackup
c.cacheSyncWaiters = append(c.cacheSyncWaiters,
backupInformer.Informer().HasSynced,
backupLocationInformer.Informer().HasSynced,
)

backupInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -140,91 +134,17 @@ func NewBackupController(
return c
}

// Run is a blocking function that runs the specified number of worker goroutines
// to process items in the work queue. It will return when it receives on the
// ctx.Done() channel.
func (controller *backupController) Run(ctx context.Context, numWorkers int) error {
var wg sync.WaitGroup

defer func() {
controller.logger.Info("Waiting for workers to finish their work")

controller.queue.ShutDown()

// We have to wait here in the deferred function instead of at the bottom of the function body
// because we have to shut down the queue in order for the workers to shut down gracefully, and
// we want to shut down the queue via defer and not at the end of the body.
wg.Wait()

controller.logger.Info("All workers have finished")

}()

controller.logger.Info("Starting BackupController")
defer controller.logger.Info("Shutting down BackupController")

controller.logger.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), controller.listerSynced, controller.backupLocationListerSynced) {
return errors.New("timed out waiting for caches to sync")
}
controller.logger.Info("Caches are synced")

wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
wait.Until(controller.runWorker, time.Second, ctx.Done())
wg.Done()
}()
}

<-ctx.Done()

return nil
}

func (controller *backupController) runWorker() {
// continually take items off the queue (waits if it's
// empty) until we get a shutdown signal from the queue
for controller.processNextWorkItem() {
}
}

func (controller *backupController) processNextWorkItem() bool {
key, quit := controller.queue.Get()
if quit {
return false
}
// always call done on this item, since if it fails we'll add
// it back with rate-limiting below
defer controller.queue.Done(key)

err := controller.syncHandler(key.(string))
if err == nil {
// If you had no error, tell the queue to stop tracking history for your key. This will reset
// things like failure counts for per-item rate limiting.
controller.queue.Forget(key)
return true
}

controller.logger.WithError(err).WithField("key", key).Error("Error in syncHandler, re-adding item to queue")
// we had an error processing the item so add it back
// into the queue for re-processing with rate-limiting
controller.queue.AddRateLimited(key)

return true
}

func (controller *backupController) processBackup(key string) error {
logContext := controller.logger.WithField("key", key)
func (c *backupController) processBackup(key string) error {
log := c.logger.WithField("key", key)

logContext.Debug("Running processBackup")
log.Debug("Running processBackup")
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return errors.Wrap(err, "error splitting queue key")
}

logContext.Debug("Getting backup")
backup, err := controller.lister.Backups(ns).Get(name)
log.Debug("Getting backup")
backup, err := c.lister.Backups(ns).Get(name)
if err != nil {
return errors.Wrap(err, "error getting backup")
}
Expand All @@ -244,7 +164,7 @@ func (controller *backupController) processBackup(key string) error {
return nil
}

logContext.Debug("Cloning backup")
log.Debug("Cloning backup")
// store ref to original for creating patch
original := backup
// don't modify items in the cache
Expand All @@ -255,19 +175,19 @@ func (controller *backupController) processBackup(key string) error {

// calculate expiration
if backup.Spec.TTL.Duration > 0 {
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))
backup.Status.Expiration = metav1.NewTime(c.clock.Now().Add(backup.Spec.TTL.Duration))
}

var backupLocation *api.BackupStorageLocation
// validation
if backupLocation, backup.Status.ValidationErrors = controller.getLocationAndValidate(backup, controller.defaultBackupLocation); len(backup.Status.ValidationErrors) > 0 {
if backupLocation, backup.Status.ValidationErrors = c.getLocationAndValidate(backup, c.defaultBackupLocation); len(backup.Status.ValidationErrors) > 0 {
backup.Status.Phase = api.BackupPhaseFailedValidation
} else {
backup.Status.Phase = api.BackupPhaseInProgress
}

// update status
updatedBackup, err := patchBackup(original, backup, controller.client)
updatedBackup, err := patchBackup(original, backup, c.client)
if err != nil {
return errors.Wrapf(err, "error updating Backup status to %s", backup.Status.Phase)
}
Expand All @@ -279,25 +199,25 @@ func (controller *backupController) processBackup(key string) error {
return nil
}

controller.backupTracker.Add(backup.Namespace, backup.Name)
defer controller.backupTracker.Delete(backup.Namespace, backup.Name)
c.backupTracker.Add(backup.Namespace, backup.Name)
defer c.backupTracker.Delete(backup.Namespace, backup.Name)

logContext.Debug("Running backup")
log.Debug("Running backup")
// execution & upload of backup
backupScheduleName := backup.GetLabels()["ark-schedule"]
controller.metrics.RegisterBackupAttempt(backupScheduleName)
c.metrics.RegisterBackupAttempt(backupScheduleName)

if err := controller.runBackup(backup, backupLocation); err != nil {
logContext.WithError(err).Error("backup failed")
if err := c.runBackup(backup, backupLocation); err != nil {
log.WithError(err).Error("backup failed")
backup.Status.Phase = api.BackupPhaseFailed
controller.metrics.RegisterBackupFailed(backupScheduleName)
c.metrics.RegisterBackupFailed(backupScheduleName)
} else {
controller.metrics.RegisterBackupSuccess(backupScheduleName)
c.metrics.RegisterBackupSuccess(backupScheduleName)
}

logContext.Debug("Updating backup's final status")
if _, err := patchBackup(original, backup, controller.client); err != nil {
logContext.WithError(err).Error("error updating backup's final status")
log.Debug("Updating backup's final status")
if _, err := patchBackup(original, backup, c.client); err != nil {
log.WithError(err).Error("error updating backup's final status")
}

return nil
Expand Down Expand Up @@ -327,7 +247,7 @@ func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter
return res, nil
}

func (controller *backupController) getLocationAndValidate(itm *api.Backup, defaultBackupLocation string) (*api.BackupStorageLocation, []string) {
func (c *backupController) getLocationAndValidate(itm *api.Backup, defaultBackupLocation string) (*api.BackupStorageLocation, []string) {
var validationErrors []string

for _, err := range collections.ValidateIncludesExcludes(itm.Spec.IncludedResources, itm.Spec.ExcludedResources) {
Expand All @@ -338,7 +258,7 @@ func (controller *backupController) getLocationAndValidate(itm *api.Backup, defa
validationErrors = append(validationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
}

if !controller.pvProviderExists && itm.Spec.SnapshotVolumes != nil && *itm.Spec.SnapshotVolumes {
if !c.pvProviderExists && itm.Spec.SnapshotVolumes != nil && *itm.Spec.SnapshotVolumes {
validationErrors = append(validationErrors, "Server is not configured for PV snapshots")
}

Expand All @@ -353,18 +273,18 @@ func (controller *backupController) getLocationAndValidate(itm *api.Backup, defa
itm.Labels[api.StorageLocationLabel] = itm.Spec.StorageLocation

var backupLocation *api.BackupStorageLocation
backupLocation, err := controller.backupLocationLister.BackupStorageLocations(itm.Namespace).Get(itm.Spec.StorageLocation)
backupLocation, err := c.backupLocationLister.BackupStorageLocations(itm.Namespace).Get(itm.Spec.StorageLocation)
if err != nil {
validationErrors = append(validationErrors, fmt.Sprintf("Error getting backup storage location: %v", err))
}

return backupLocation, validationErrors
}

func (controller *backupController) runBackup(backup *api.Backup, backupLocation *api.BackupStorageLocation) error {
log := controller.logger.WithField("backup", kubeutil.NamespaceAndName(backup))
func (c *backupController) runBackup(backup *api.Backup, backupLocation *api.BackupStorageLocation) error {
log := c.logger.WithField("backup", kubeutil.NamespaceAndName(backup))
log.Info("Starting backup")
backup.Status.StartTimestamp.Time = controller.clock.Now()
backup.Status.StartTimestamp.Time = c.clock.Now()

logFile, err := ioutil.TempFile("", "")
if err != nil {
Expand All @@ -374,11 +294,11 @@ func (controller *backupController) runBackup(backup *api.Backup, backupLocation
// Assuming we successfully uploaded the log file, this will have already been closed below. It is safe to call
// close multiple times. If we get an error closing this, there's not really anything we can do about it.
defer gzippedLogFile.Close()
defer closeAndRemoveFile(logFile, controller.logger)
defer closeAndRemoveFile(logFile, c.logger)

// Log the backup to both a backup log file and to stdout. This will help see what happened if the upload of the
// backup log failed for whatever reason.
logger := logging.DefaultLogger(controller.logLevel)
logger := logging.DefaultLogger(c.backupLogLevel)
logger.Out = io.MultiWriter(os.Stdout, gzippedLogFile)
log = logger.WithField("backup", kubeutil.NamespaceAndName(backup))

Expand All @@ -390,15 +310,15 @@ func (controller *backupController) runBackup(backup *api.Backup, backupLocation
}
defer closeAndRemoveFile(backupFile, log)

pluginManager := controller.newPluginManager(log)
pluginManager := c.newPluginManager(log)
defer pluginManager.CleanupClients()

actions, err := pluginManager.GetBackupItemActions()
if err != nil {
return err
}

backupStore, err := controller.newBackupStore(backupLocation, pluginManager, log)
backupStore, err := c.newBackupStore(backupLocation, pluginManager, log)
if err != nil {
return err
}
Expand All @@ -408,7 +328,7 @@ func (controller *backupController) runBackup(backup *api.Backup, backupLocation
var backupJSONToUpload, backupFileToUpload io.Reader

// Do the actual backup
if err := controller.backupper.Backup(log, backup, backupFile, actions); err != nil {
if err := c.backupper.Backup(log, backup, backupFile, actions); err != nil {
errs = append(errs, err)

backup.Status.Phase = api.BackupPhaseFailed
Expand All @@ -418,7 +338,7 @@ func (controller *backupController) runBackup(backup *api.Backup, backupLocation

// Mark completion timestamp before serializing and uploading.
// Otherwise, the JSON file in object storage has a CompletionTimestamp of 'null'.
backup.Status.CompletionTimestamp.Time = controller.clock.Now()
backup.Status.CompletionTimestamp.Time = c.clock.Now()

backupJSON := new(bytes.Buffer)
if err := encode.EncodeTo(backup, "json", backupJSON); err != nil {
Expand All @@ -437,19 +357,19 @@ func (controller *backupController) runBackup(backup *api.Backup, backupLocation
}

if err := gzippedLogFile.Close(); err != nil {
controller.logger.WithError(err).Error("error closing gzippedLogFile")
c.logger.WithError(err).Error("error closing gzippedLogFile")
}

if err := backupStore.PutBackup(backup.Name, backupJSONToUpload, backupFileToUpload, logFile); err != nil {
errs = append(errs, err)
}

backupScheduleName := backup.GetLabels()["ark-schedule"]
controller.metrics.SetBackupTarballSizeBytesGauge(backupScheduleName, backupSizeBytes)
c.metrics.SetBackupTarballSizeBytesGauge(backupScheduleName, backupSizeBytes)

backupDuration := backup.Status.CompletionTimestamp.Time.Sub(backup.Status.StartTimestamp.Time)
backupDurationSeconds := float64(backupDuration / time.Second)
controller.metrics.RegisterBackupDuration(backupScheduleName, backupDurationSeconds)
c.metrics.RegisterBackupDuration(backupScheduleName, backupDurationSeconds)

log.Info("Backup completed")

Expand Down
Loading

0 comments on commit 94617b3

Please sign in to comment.