-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
Copy pathrestore_controller.go
649 lines (548 loc) · 21.9 KB
/
restore_controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"sort"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/metrics"
"github.com/heptio/ark/pkg/persistence"
"github.com/heptio/ark/pkg/plugin"
"github.com/heptio/ark/pkg/restore"
"github.com/heptio/ark/pkg/util/boolptr"
"github.com/heptio/ark/pkg/util/collections"
kubeutil "github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/logging"
)
// nonRestorableResources is a blacklist for the restoration process. Any resources
// included here are explicitly excluded from the restoration process.
var nonRestorableResources = []string{
"nodes",
"events",
"events.events.k8s.io",
// Don't ever restore backups - if appropriate, they'll be synced in from object storage.
// https://github.com/heptio/ark/issues/622
"backups.ark.heptio.com",
// Restores are cluster-specific, and don't have value moving across clusters.
// https://github.com/heptio/ark/issues/622
"restores.ark.heptio.com",
}
type restoreController struct {
*genericController
namespace string
restoreClient arkv1client.RestoresGetter
backupClient arkv1client.BackupsGetter
restorer restore.Restorer
pvProviderExists bool
backupLister listers.BackupLister
restoreLister listers.RestoreLister
backupLocationLister listers.BackupStorageLocationLister
snapshotLocationLister listers.VolumeSnapshotLocationLister
restoreLogLevel logrus.Level
defaultBackupLocation string
metrics *metrics.ServerMetrics
newPluginManager func(logger logrus.FieldLogger) plugin.Manager
newBackupStore func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error)
}
func NewRestoreController(
namespace string,
restoreInformer informers.RestoreInformer,
restoreClient arkv1client.RestoresGetter,
backupClient arkv1client.BackupsGetter,
restorer restore.Restorer,
backupInformer informers.BackupInformer,
backupLocationInformer informers.BackupStorageLocationInformer,
snapshotLocationInformer informers.VolumeSnapshotLocationInformer,
pvProviderExists bool,
logger logrus.FieldLogger,
restoreLogLevel logrus.Level,
newPluginManager func(logrus.FieldLogger) plugin.Manager,
defaultBackupLocation string,
metrics *metrics.ServerMetrics,
) Interface {
c := &restoreController{
genericController: newGenericController("restore", logger),
namespace: namespace,
restoreClient: restoreClient,
backupClient: backupClient,
restorer: restorer,
pvProviderExists: pvProviderExists,
backupLister: backupInformer.Lister(),
restoreLister: restoreInformer.Lister(),
backupLocationLister: backupLocationInformer.Lister(),
snapshotLocationLister: snapshotLocationInformer.Lister(),
restoreLogLevel: restoreLogLevel,
defaultBackupLocation: defaultBackupLocation,
metrics: metrics,
// use variables to refer to these functions so they can be
// replaced with fakes for testing.
newPluginManager: newPluginManager,
newBackupStore: persistence.NewObjectBackupStore,
}
c.syncHandler = c.processRestore
c.cacheSyncWaiters = append(c.cacheSyncWaiters,
backupInformer.Informer().HasSynced,
restoreInformer.Informer().HasSynced,
backupLocationInformer.Informer().HasSynced,
snapshotLocationInformer.Informer().HasSynced,
)
restoreInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
restore := obj.(*api.Restore)
switch restore.Status.Phase {
case "", api.RestorePhaseNew:
// only process new restores
default:
c.logger.WithFields(logrus.Fields{
"restore": kubeutil.NamespaceAndName(restore),
"phase": restore.Status.Phase,
}).Debug("Restore is not new, skipping")
return
}
key, err := cache.MetaNamespaceKeyFunc(restore)
if err != nil {
c.logger.WithError(errors.WithStack(err)).WithField("restore", restore).Error("Error creating queue key, item not added to queue")
return
}
c.queue.Add(key)
},
},
)
return c
}
func (c *restoreController) processRestore(key string) error {
log := c.logger.WithField("key", key)
log.Debug("Running processRestore")
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.WithError(err).Error("unable to process restore: error splitting queue key")
// Return nil here so we don't try to process the key any more
return nil
}
log.Debug("Getting Restore")
restore, err := c.restoreLister.Restores(ns).Get(name)
if err != nil {
return errors.Wrap(err, "error getting Restore")
}
// TODO I think this is now unnecessary. We only initially place
// item with Phase = ("" | New) into the queue. Items will only get
// re-queued if syncHandler returns an error, which will only
// happen if there's an error updating Phase from its initial
// state to something else. So any time it's re-queued it will
// still have its initial state, which we've already confirmed
// is ("" | New)
switch restore.Status.Phase {
case "", api.RestorePhaseNew:
// only process new restores
default:
return nil
}
log.Debug("Cloning Restore")
// store ref to original for creating patch
original := restore
// don't modify items in the cache
restore = restore.DeepCopy()
pluginManager := c.newPluginManager(log)
defer pluginManager.CleanupClients()
actions, err := pluginManager.GetRestoreItemActions()
if err != nil {
return errors.Wrap(err, "error initializing restore item actions")
}
// validate the restore and fetch the backup
info := c.validateAndComplete(restore, pluginManager)
backupScheduleName := restore.Spec.ScheduleName
// Register attempts after validation so we don't have to fetch the backup multiple times
c.metrics.RegisterRestoreAttempt(backupScheduleName)
if len(restore.Status.ValidationErrors) > 0 {
restore.Status.Phase = api.RestorePhaseFailedValidation
c.metrics.RegisterRestoreValidationFailed(backupScheduleName)
} else {
restore.Status.Phase = api.RestorePhaseInProgress
}
// patch to update status and persist to API
updatedRestore, err := patchRestore(original, restore, c.restoreClient)
if err != nil {
return errors.Wrapf(err, "error updating Restore phase to %s", restore.Status.Phase)
}
// store ref to just-updated item for creating patch
original = updatedRestore
restore = updatedRestore.DeepCopy()
if restore.Status.Phase == api.RestorePhaseFailedValidation {
return nil
}
log.Debug("Running restore")
// execution & upload of restore
restoreWarnings, restoreErrors, restoreFailure := c.runRestore(
restore,
actions,
info,
pluginManager,
)
restore.Status.Warnings = len(restoreWarnings.Ark) + len(restoreWarnings.Cluster)
for _, w := range restoreWarnings.Namespaces {
restore.Status.Warnings += len(w)
}
restore.Status.Errors = len(restoreErrors.Ark) + len(restoreErrors.Cluster)
for _, e := range restoreErrors.Namespaces {
restore.Status.Errors += len(e)
}
if restoreFailure != nil {
log.Debug("restore failed")
restore.Status.Phase = api.RestorePhaseFailed
restore.Status.FailureReason = restoreFailure.Error()
c.metrics.RegisterRestoreFailed(backupScheduleName)
} else {
log.Debug("restore completed")
// We got through the restore process without failing validation or restore execution
restore.Status.Phase = api.RestorePhaseCompleted
c.metrics.RegisterRestoreSuccess(backupScheduleName)
}
log.Debug("Updating Restore final status")
if _, err = patchRestore(original, restore, c.restoreClient); err != nil {
log.WithError(errors.WithStack(err)).Info("Error updating Restore final status")
}
return nil
}
type backupInfo struct {
backup *api.Backup
backupStore persistence.BackupStore
}
func (c *restoreController) validateAndComplete(restore *api.Restore, pluginManager plugin.Manager) backupInfo {
// add non-restorable resources to restore's excluded resources
excludedResources := sets.NewString(restore.Spec.ExcludedResources...)
for _, nonrestorable := range nonRestorableResources {
if !excludedResources.Has(nonrestorable) {
restore.Spec.ExcludedResources = append(restore.Spec.ExcludedResources, nonrestorable)
}
}
// validate that included resources don't contain any non-restorable resources
includedResources := sets.NewString(restore.Spec.IncludedResources...)
for _, nonRestorableResource := range nonRestorableResources {
if includedResources.Has(nonRestorableResource) {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("%v are non-restorable resources", nonRestorableResource))
}
}
// validate included/excluded resources
for _, err := range collections.ValidateIncludesExcludes(restore.Spec.IncludedResources, restore.Spec.ExcludedResources) {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err))
}
// validate included/excluded namespaces
for _, err := range collections.ValidateIncludesExcludes(restore.Spec.IncludedNamespaces, restore.Spec.ExcludedNamespaces) {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
}
// validate that PV provider exists if we're restoring PVs
if boolptr.IsSetToTrue(restore.Spec.RestorePVs) && !c.pvProviderExists {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "Server is not configured for PV snapshot restores")
}
// validate that exactly one of BackupName and ScheduleName have been specified
if !backupXorScheduleProvided(restore) {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "Either a backup or schedule must be specified as a source for the restore, but not both")
return backupInfo{}
}
// if ScheduleName is specified, fill in BackupName with the most recent successful backup from
// the schedule
if restore.Spec.ScheduleName != "" {
selector := labels.SelectorFromSet(labels.Set(map[string]string{
"ark-schedule": restore.Spec.ScheduleName,
}))
backups, err := c.backupLister.Backups(c.namespace).List(selector)
if err != nil {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "Unable to list backups for schedule")
return backupInfo{}
}
if len(backups) == 0 {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "No backups found for schedule")
}
if backup := mostRecentCompletedBackup(backups); backup != nil {
restore.Spec.BackupName = backup.Name
} else {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "No completed backups found for schedule")
return backupInfo{}
}
}
info, err := c.fetchBackupInfo(restore.Spec.BackupName, pluginManager)
if err != nil {
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("Error retrieving backup: %v", err))
return backupInfo{}
}
// Fill in the ScheduleName so it's easier to consume for metrics.
if restore.Spec.ScheduleName == "" {
restore.Spec.ScheduleName = info.backup.GetLabels()["ark-schedule"]
}
return info
}
// backupXorScheduleProvided returns true if exactly one of BackupName and
// ScheduleName are non-empty for the restore, or false otherwise.
func backupXorScheduleProvided(restore *api.Restore) bool {
if restore.Spec.BackupName != "" && restore.Spec.ScheduleName != "" {
return false
}
if restore.Spec.BackupName == "" && restore.Spec.ScheduleName == "" {
return false
}
return true
}
// mostRecentCompletedBackup returns the most recent backup that's
// completed from a list of backups.
func mostRecentCompletedBackup(backups []*api.Backup) *api.Backup {
sort.Slice(backups, func(i, j int) bool {
// Use .After() because we want descending sort.
return backups[i].Status.StartTimestamp.After(backups[j].Status.StartTimestamp.Time)
})
for _, backup := range backups {
if backup.Status.Phase == api.BackupPhaseCompleted {
return backup
}
}
return nil
}
// fetchBackupInfo checks the backup lister for a backup that matches the given name. If it doesn't
// find it, it tries to retrieve it from one of the backup storage locations.
func (c *restoreController) fetchBackupInfo(backupName string, pluginManager plugin.Manager) (backupInfo, error) {
backup, err := c.backupLister.Backups(c.namespace).Get(backupName)
if err != nil {
if !apierrors.IsNotFound(err) {
return backupInfo{}, errors.WithStack(err)
}
log := c.logger.WithField("backupName", backupName)
log.Debug("Backup not found in backupLister, checking each backup location directly, starting with default...")
return c.fetchFromBackupStorage(backupName, pluginManager)
}
location, err := c.backupLocationLister.BackupStorageLocations(c.namespace).Get(backup.Spec.StorageLocation)
if err != nil {
return backupInfo{}, errors.WithStack(err)
}
backupStore, err := c.newBackupStore(location, pluginManager, c.logger)
if err != nil {
return backupInfo{}, err
}
return backupInfo{
backup: backup,
backupStore: backupStore,
}, nil
}
// fetchFromBackupStorage checks each backup storage location, starting with the default,
// looking for a backup that matches the given backup name.
func (c *restoreController) fetchFromBackupStorage(backupName string, pluginManager plugin.Manager) (backupInfo, error) {
locations, err := c.backupLocationLister.BackupStorageLocations(c.namespace).List(labels.Everything())
if err != nil {
return backupInfo{}, errors.WithStack(err)
}
orderedLocations := orderedBackupLocations(locations, c.defaultBackupLocation)
log := c.logger.WithField("backupName", backupName)
for _, location := range orderedLocations {
info, err := c.backupInfoForLocation(location, backupName, pluginManager)
if err != nil {
log.WithField("locationName", location.Name).WithError(err).Error("Unable to fetch backup from object storage location")
continue
}
return info, nil
}
return backupInfo{}, errors.New("not able to fetch from backup storage")
}
// orderedBackupLocations returns a new slice with the default backup location first (if it exists),
// followed by the rest of the locations in no particular order.
func orderedBackupLocations(locations []*api.BackupStorageLocation, defaultLocationName string) []*api.BackupStorageLocation {
var result []*api.BackupStorageLocation
for i := range locations {
if locations[i].Name == defaultLocationName {
// put the default location first
result = append(result, locations[i])
// append everything before the default
result = append(result, locations[:i]...)
// append everything after the default
result = append(result, locations[i+1:]...)
return result
}
}
return locations
}
func (c *restoreController) backupInfoForLocation(location *api.BackupStorageLocation, backupName string, pluginManager plugin.Manager) (backupInfo, error) {
backupStore, err := persistence.NewObjectBackupStore(location, pluginManager, c.logger)
if err != nil {
return backupInfo{}, err
}
backup, err := backupStore.GetBackupMetadata(backupName)
if err != nil {
return backupInfo{}, err
}
// ResourceVersion needs to be cleared in order to create the object in the API
backup.ResourceVersion = ""
// Clear out the namespace, in case the backup was made in a different cluster, with a different namespace
backup.Namespace = ""
backupCreated, err := c.backupClient.Backups(c.namespace).Create(backup)
if err != nil {
return backupInfo{}, errors.WithStack(err)
}
return backupInfo{
backup: backupCreated,
backupStore: backupStore,
}, nil
}
func (c *restoreController) runRestore(
restore *api.Restore,
actions []restore.ItemAction,
info backupInfo,
pluginManager plugin.Manager,
) (restoreWarnings, restoreErrors api.RestoreResult, restoreFailure error) {
logFile, err := ioutil.TempFile("", "")
if err != nil {
c.logger.
WithFields(
logrus.Fields{
"restore": kubeutil.NamespaceAndName(restore),
"backup": restore.Spec.BackupName,
},
).
WithError(errors.WithStack(err)).
Error("Error creating log temp file")
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
return
}
gzippedLogFile := gzip.NewWriter(logFile)
// Assuming we successfully uploaded the log file, this will have already been closed below. It is safe to call
// close multiple times. If we get an error closing this, there's not really anything we can do about it.
defer gzippedLogFile.Close()
defer closeAndRemoveFile(logFile, c.logger)
// Log the backup to both a backup log file and to stdout. This will help see what happened if the upload of the
// backup log failed for whatever reason.
logger := logging.DefaultLogger(c.restoreLogLevel)
logger.Out = io.MultiWriter(os.Stdout, gzippedLogFile)
log := logger.WithFields(
logrus.Fields{
"restore": kubeutil.NamespaceAndName(restore),
"backup": restore.Spec.BackupName,
})
backupFile, err := downloadToTempFile(restore.Spec.BackupName, info.backupStore, c.logger)
if err != nil {
log.WithError(err).Error("Error downloading backup")
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
restoreFailure = err
return
}
defer closeAndRemoveFile(backupFile, c.logger)
resultsFile, err := ioutil.TempFile("", "")
if err != nil {
log.WithError(errors.WithStack(err)).Error("Error creating results temp file")
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
restoreFailure = err
return
}
defer closeAndRemoveFile(resultsFile, c.logger)
volumeSnapshots, err := info.backupStore.GetBackupVolumeSnapshots(restore.Spec.BackupName)
if err != nil {
log.WithError(errors.WithStack(err)).Error("Error fetching volume snapshots")
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
restoreFailure = err
return
}
// Any return statement above this line means a total restore failure
// Some failures after this line *may* be a total restore failure
log.Info("starting restore")
restoreWarnings, restoreErrors = c.restorer.Restore(log, restore, info.backup, volumeSnapshots, backupFile, actions, c.snapshotLocationLister, pluginManager)
log.Info("restore completed")
// Try to upload the log file. This is best-effort. If we fail, we'll add to the ark errors.
if err := gzippedLogFile.Close(); err != nil {
c.logger.WithError(err).Error("error closing gzippedLogFile")
}
// Reset the offset to 0 for reading
if _, err = logFile.Seek(0, 0); err != nil {
restoreErrors.Ark = append(restoreErrors.Ark, fmt.Sprintf("error resetting log file offset to 0: %v", err))
return
}
if err := info.backupStore.PutRestoreLog(restore.Spec.BackupName, restore.Name, logFile); err != nil {
restoreErrors.Ark = append(restoreErrors.Ark, fmt.Sprintf("error uploading log file to backup storage: %v", err))
}
m := map[string]api.RestoreResult{
"warnings": restoreWarnings,
"errors": restoreErrors,
}
gzippedResultsFile := gzip.NewWriter(resultsFile)
if err := json.NewEncoder(gzippedResultsFile).Encode(m); err != nil {
log.WithError(errors.WithStack(err)).Error("Error encoding restore results")
return
}
gzippedResultsFile.Close()
if _, err = resultsFile.Seek(0, 0); err != nil {
log.WithError(errors.WithStack(err)).Error("Error resetting results file offset to 0")
return
}
if err := info.backupStore.PutRestoreResults(restore.Spec.BackupName, restore.Name, resultsFile); err != nil {
log.WithError(errors.WithStack(err)).Error("Error uploading results file to backup storage")
}
return
}
func downloadToTempFile(
backupName string,
backupStore persistence.BackupStore,
logger logrus.FieldLogger,
) (*os.File, error) {
readCloser, err := backupStore.GetBackupContents(backupName)
if err != nil {
return nil, err
}
defer readCloser.Close()
file, err := ioutil.TempFile("", backupName)
if err != nil {
return nil, errors.Wrap(err, "error creating Backup temp file")
}
n, err := io.Copy(file, readCloser)
if err != nil {
return nil, errors.Wrap(err, "error copying Backup to temp file")
}
log := logger.WithField("backup", backupName)
log.WithFields(logrus.Fields{
"fileName": file.Name(),
"bytes": n,
}).Debug("Copied Backup to file")
if _, err := file.Seek(0, 0); err != nil {
return nil, errors.Wrap(err, "error resetting Backup file offset")
}
return file, nil
}
func patchRestore(original, updated *api.Restore, client arkv1client.RestoresGetter) (*api.Restore, error) {
origBytes, err := json.Marshal(original)
if err != nil {
return nil, errors.Wrap(err, "error marshalling original restore")
}
updatedBytes, err := json.Marshal(updated)
if err != nil {
return nil, errors.Wrap(err, "error marshalling updated restore")
}
patchBytes, err := jsonpatch.CreateMergePatch(origBytes, updatedBytes)
if err != nil {
return nil, errors.Wrap(err, "error creating json merge patch for restore")
}
res, err := client.Restores(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)
if err != nil {
return nil, errors.Wrap(err, "error patching restore")
}
return res, nil
}