From 79c75718ca0ba7f9b005aa420c96dab31747dc2e Mon Sep 17 00:00:00 2001 From: Xun Jiang Date: Sun, 15 Oct 2023 13:23:06 +0800 Subject: [PATCH 01/10] Change controller-runtime List option from MatchingFields to ListOptions. Signed-off-by: Xun Jiang --- changelogs/unreleased/6958-blackpiglet | 1 + pkg/cmd/cli/nodeagent/server.go | 4 ++-- pkg/cmd/server/server.go | 19 +++++++++++++++---- 3 files changed, 18 insertions(+), 6 deletions(-) create mode 100644 changelogs/unreleased/6958-blackpiglet diff --git a/changelogs/unreleased/6958-blackpiglet b/changelogs/unreleased/6958-blackpiglet new file mode 100644 index 0000000000..7b402258fb --- /dev/null +++ b/changelogs/unreleased/6958-blackpiglet @@ -0,0 +1 @@ +Change controller-runtime List option from MatchingFields to ListOptions \ No newline at end of file diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 105e370524..7507023fb2 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -419,7 +419,7 @@ func (s *nodeAgentServer) markDataDownloadsCancel(r *controller.DataDownloadReco func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvbs := &velerov1api.PodVolumeBackupList{} - if err := client.List(s.ctx, pvbs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil { + if err := client.List(s.ctx, pvbs, &ctrlclient.ListOptions{Namespace: s.namespace}); err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups") return } @@ -445,7 +445,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { pvrs := &velerov1api.PodVolumeRestoreList{} - if err := client.List(s.ctx, pvrs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil { + if err := client.List(s.ctx, pvrs, &ctrlclient.ListOptions{Namespace: s.namespace}); err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores") return } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index bde964aad5..fe32236a2e 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -39,6 +39,7 @@ import ( corev1api "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kubeerrs "k8s.io/apimachinery/pkg/util/errors" @@ -1056,7 +1057,7 @@ func markInProgressCRsFailed(ctx context.Context, cfg *rest.Config, scheme *runt func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, namespace string, log logrus.FieldLogger) { backups := &velerov1api.BackupList{} - if err := client.List(ctx, backups, &ctrlclient.MatchingFields{"metadata.namespace": namespace}); err != nil { + if err := client.List(ctx, backups, &ctrlclient.ListOptions{Namespace: namespace}); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list backups") return } @@ -1081,7 +1082,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, namespace string, log logrus.FieldLogger) { restores := &velerov1api.RestoreList{} - if err := client.List(ctx, restores, &ctrlclient.MatchingFields{"metadata.namespace": namespace}); err != nil { + if err := client.List(ctx, restores, &ctrlclient.ListOptions{Namespace: namespace}); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list restores") return } @@ -1106,7 +1107,12 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, func markDataUploadsCancel(ctx context.Context, client ctrlclient.Client, backup velerov1api.Backup, log logrus.FieldLogger) { dataUploads := &velerov2alpha1api.DataUploadList{} - if err := client.List(ctx, dataUploads, &ctrlclient.MatchingFields{"metadata.namespace": backup.GetNamespace()}, &ctrlclient.MatchingLabels{velerov1api.BackupUIDLabel: string(backup.GetUID())}); err != nil { + if err := client.List(ctx, dataUploads, &ctrlclient.ListOptions{ + Namespace: backup.GetNamespace(), + LabelSelector: labels.Set(map[string]string{ + velerov1api.BackupUIDLabel: string(backup.GetUID()), + }).AsSelector(), + }); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list dataUploads") return } @@ -1134,7 +1140,12 @@ func markDataUploadsCancel(ctx context.Context, client ctrlclient.Client, backup func markDataDownloadsCancel(ctx context.Context, client ctrlclient.Client, restore velerov1api.Restore, log logrus.FieldLogger) { dataDownloads := &velerov2alpha1api.DataDownloadList{} - if err := client.List(ctx, dataDownloads, &ctrlclient.MatchingFields{"metadata.namespace": restore.GetNamespace()}, &ctrlclient.MatchingLabels{velerov1api.RestoreUIDLabel: string(restore.GetUID())}); err != nil { + if err := client.List(ctx, dataDownloads, &ctrlclient.ListOptions{ + Namespace: restore.GetNamespace(), + LabelSelector: labels.Set(map[string]string{ + velerov1api.RestoreUIDLabel: string(restore.GetUID()), + }).AsSelector(), + }); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list dataDownloads") return } From 70483ded9078a6ea1c9720520784096890bf8c89 Mon Sep 17 00:00:00 2001 From: lou Date: Tue, 7 Nov 2023 19:12:30 +0800 Subject: [PATCH 02/10] improve discoveryHelper.Refresh() in restore Signed-off-by: lou --- pkg/restore/restore.go | 129 ++++++++++++++++++++++------------------- 1 file changed, 69 insertions(+), 60 deletions(-) diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 8f623800ce..0241014f20 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -502,7 +502,7 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { }() // totalItems: previously discovered items, i: iteration counter. - totalItems, processedItems, existingNamespaces := 0, 0, sets.NewString() + totalItems, processedItems, createdItems, existingNamespaces := 0, 0, 0, sets.NewString() // First restore CRDs. This is needed so that they are available in the cluster // when getOrderedResourceCollection is called again on the whole backup and @@ -525,16 +525,26 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { var w, e results.Result // Restore this resource, the update channel is set to nil, to avoid misleading value of "totalItems" // more details see #5990 - processedItems, w, e = ctx.processSelectedResource( + processedItems, createdItems, w, e = ctx.processSelectedResource( selectedResource, totalItems, processedItems, + createdItems, existingNamespaces, nil, ) warnings.Merge(&w) errs.Merge(&e) } + // If we just restored custom resource definitions (CRDs), refresh + // discovery because the restored CRDs may have created new APIs that + // didn't previously exist in the cluster, and we want to be able to + // resolve & restore instances of them in subsequent loop iterations. + if createdItems > 0 { + if err := ctx.discoveryHelper.Refresh(); err != nil { + warnings.Add("", errors.Wrap(err, "refresh discovery after restoring CRDs")) + } + } // Restore everything else selectedResourceCollection, _, w, e := ctx.getOrderedResourceCollection( @@ -576,6 +586,7 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { // reset processedItems and totalItems before processing full resource list processedItems = 0 totalItems = 0 + createdItems = 0 for _, selectedResource := range selectedResourceCollection { totalItems += selectedResource.totalItems } @@ -583,10 +594,11 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { for _, selectedResource := range selectedResourceCollection { var w, e results.Result // Restore this resource - processedItems, w, e = ctx.processSelectedResource( + processedItems, createdItems, w, e = ctx.processSelectedResource( selectedResource, totalItems, processedItems, + createdItems, existingNamespaces, update, ) @@ -670,9 +682,10 @@ func (ctx *restoreContext) processSelectedResource( selectedResource restoreableResource, totalItems int, processedItems int, + createdItems int, existingNamespaces sets.String, update chan progressUpdate, -) (int, results.Result, results.Result) { +) (int, int, results.Result, results.Result) { warnings, errs := results.Result{}, results.Result{} groupResource := schema.ParseGroupResource(selectedResource.resource) @@ -728,11 +741,15 @@ func (ctx *restoreContext) processSelectedResource( continue } - w, e, _ := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace) + w, e, _, created := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace) warnings.Merge(&w) errs.Merge(&e) processedItems++ + if created { + createdItems++ + } + // totalItems keeps the count of items previously known. There // may be additional items restored by plugins. We want to include // the additional items by looking at restoredItems at the same @@ -754,16 +771,7 @@ func (ctx *restoreContext) processSelectedResource( } } - // If we just restored custom resource definitions (CRDs), refresh - // discovery because the restored CRDs may have created new APIs that - // didn't previously exist in the cluster, and we want to be able to - // resolve & restore instances of them in subsequent loop iterations. - if groupResource == kuberesource.CustomResourceDefinitions { - if err := ctx.discoveryHelper.Refresh(); err != nil { - warnings.Add("", errors.Wrap(err, "refresh discovery after restoring CRDs")) - } - } - return processedItems, warnings, errs + return processedItems, createdItems, warnings, errs } // getNamespace returns a namespace API object that we should attempt to @@ -1083,10 +1091,9 @@ func (ctx *restoreContext) getResource(groupResource schema.GroupResource, obj * return u, nil } -func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (results.Result, results.Result, bool) { - warnings, errs := results.Result{}, results.Result{} - // itemExists bool is used to determine whether to include this item in the "wait for additional items" list - itemExists := false +// itemExists bool is used to determine whether to include this item in the "wait for additional items" list +// itemCreated indicates whether the item was created by this restore +func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (warnings, errs results.Result, itemExists, itemCreated bool) { resourceID := getResourceID(groupResource, namespace, obj.GetName()) // Check if group/resource should be restored. We need to do this here since @@ -1098,7 +1105,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso "name": obj.GetName(), "groupResource": groupResource.String(), }).Info("Not restoring item because resource is excluded") - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // Check if namespace/cluster-scoped resource should be restored. We need @@ -1114,7 +1121,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso "name": obj.GetName(), "groupResource": groupResource.String(), }).Info("Not restoring item because namespace is excluded") - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // If the namespace scoped resource should be restored, ensure that the @@ -1124,7 +1131,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso _, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady(nsToEnsure, ctx.namespaceClient, ctx.resourceTerminatingTimeout) if err != nil { errs.AddVeleroError(err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // Add the newly created namespace to the list of restored items. if nsCreated { @@ -1142,7 +1149,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso "name": obj.GetName(), "groupResource": groupResource.String(), }).Info("Not restoring item because it's cluster-scoped") - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } } @@ -1153,11 +1160,11 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso complete, err := isCompleted(obj, groupResource) if err != nil { errs.Add(namespace, fmt.Errorf("error checking completion of %q: %v", resourceID, err)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } if complete { ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } name := obj.GetName() @@ -1171,7 +1178,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if prevRestoredItemStatus, exists := ctx.restoredItems[itemKey]; exists { ctx.log.Infof("Skipping %s because it's already been restored.", resourceID) itemExists = prevRestoredItemStatus.itemExists - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } ctx.restoredItems[itemKey] = restoredItemStatus{itemExists: itemExists} defer func() { @@ -1195,13 +1202,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // to the interface. if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" { ctx.log.Infof("Not restoring pod because it's a mirror pod") - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } resourceClient, err := ctx.getResourceClient(groupResource, obj, namespace) if err != nil { errs.AddVeleroError(fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } if groupResource == kuberesource.PersistentVolumes { @@ -1211,7 +1218,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso shouldRenamePV, err := shouldRenamePV(ctx, obj, resourceClient) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // Check to see if the claimRef.namespace field needs to be remapped, @@ -1219,7 +1226,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso _, err = remapClaimRefNS(ctx, obj) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } var shouldRestoreSnapshot bool @@ -1229,7 +1236,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso shouldRestoreSnapshot, err = ctx.shouldRestore(name, resourceClient) if err != nil { errs.Add(namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } } else { // If we're renaming the PV, we're going to give it a new random name, @@ -1249,7 +1256,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso updatedObj, err := ctx.pvRestorer.executePVAction(obj) if err != nil { errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } obj = updatedObj @@ -1266,7 +1273,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso pvName, err = ctx.pvRenamer(oldName) if err != nil { errs.Add(namespace, errors.Wrapf(err, "error renaming PV")) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } } else { // VolumeSnapshotter could have modified the PV name through @@ -1293,7 +1300,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // Return early because we don't want to restore the PV itself, we // want to dynamically re-provision it. - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated case hasDeleteReclaimPolicy(obj.Object): ctx.log.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") @@ -1301,7 +1308,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // Return early because we don't want to restore the PV itself, we // want to dynamically re-provision it. - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated default: ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.") @@ -1310,7 +1317,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso _, err = remapClaimRefNS(ctx, obj) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } obj = resetVolumeBindingInfo(obj) // We call the pvRestorer here to clear out the PV's claimRef.UID, @@ -1318,7 +1325,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso updatedObj, err := ctx.pvRestorer.executePVAction(obj) if err != nil { errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } obj = updatedObj } @@ -1328,7 +1335,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // Clear out non-core metadata fields and status. if obj, err = resetMetadataAndStatus(obj); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } ctx.log.Infof("restore status includes excludes: %+v", ctx.resourceStatusIncludesExcludes) @@ -1353,7 +1360,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso }) if err != nil { errs.Add(namespace, fmt.Errorf("error preparing %s: %v", resourceID, err)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // If async plugin started async operation, add it to the ItemOperations list @@ -1382,12 +1389,12 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } if executeOutput.SkipRestore { ctx.log.Infof("Skipping restore of %s: %v because a registered plugin discarded it", obj.GroupVersionKind().Kind, name) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured) if !ok { errs.Add(namespace, fmt.Errorf("%s: unexpected type %T", resourceID, executeOutput.UpdatedItem)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } obj = unstructuredObj @@ -1420,7 +1427,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } } - w, e, additionalItemExists := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace) + w, e, additionalItemExists, _ := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace) if additionalItemExists { filteredAdditionalItems = append(filteredAdditionalItems, additionalItem) } @@ -1449,7 +1456,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso pvc := new(v1.PersistentVolumeClaim) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } if pvc.Spec.VolumeName != "" { @@ -1468,7 +1475,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso ctx.log.Infof("Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)", namespace, name, pvc.Spec.VolumeName, newName) if err := unstructured.SetNestedField(obj.Object, newName, "spec", "volumeName"); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } } } @@ -1499,7 +1506,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso resourceClient, err = ctx.getResourceClient(newGR, obj, obj.GetNamespace()) if err != nil { errs.AddVeleroError(fmt.Errorf("error getting updated resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name) @@ -1528,7 +1535,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso isAlreadyExistsError, err := isAlreadyExistsError(ctx, obj, restoreErr, resourceClient) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } if restoreErr != nil { @@ -1543,7 +1550,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil && isAlreadyExistsError { ctx.log.Warnf("Unable to retrieve in-cluster version of %s: %v, object won't be restored by velero or have restore labels, and existing resource policy is not applied", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } } @@ -1557,7 +1564,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // We know the object from the cluster won't have the backup/restore name @@ -1573,20 +1580,20 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } patchBytes, err := generatePatch(fromCluster, desired) if err != nil { ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } if patchBytes == nil { // In-cluster and desired state are the same, so move on to // the next item. - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } _, err = resourceClient.Patch(name, patchBytes) @@ -1635,7 +1642,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso warnings.Add(namespace, e) } } - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } //update backup/restore labels on the unchanged resources if existingResourcePolicy is set as update @@ -1651,22 +1658,24 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } ctx.log.Infof("Restore of %s, %v skipped: it already exists in the cluster and is the same as the backed up version", obj.GroupVersionKind().Kind, name) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // Error was something other than an AlreadyExists. if restoreErr != nil { ctx.log.Errorf("error restoring %s: %+v", name, restoreErr) errs.Add(namespace, fmt.Errorf("error restoring %s: %v", resourceID, restoreErr)) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } + itemCreated = true + shouldRestoreStatus := ctx.resourceStatusIncludesExcludes != nil && ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String()) if shouldRestoreStatus && statusFieldErr != nil { err := fmt.Errorf("could not get status to be restored %s: %v", kube.NamespaceAndName(obj), statusFieldErr) ctx.log.Errorf(err.Error()) errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } ctx.log.Debugf("status field for %s: exists: %v, should restore: %v", groupResource, statusFieldExists, shouldRestoreStatus) // if it should restore status, run a UpdateStatus @@ -1674,7 +1683,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil { ctx.log.Errorf("could not set status field %s: %v", kube.NamespaceAndName(obj), err) errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } obj.SetResourceVersion(createdObj.GetResourceVersion()) updated, err := resourceClient.UpdateStatus(obj, metav1.UpdateOptions{}) @@ -1693,14 +1702,14 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { ctx.log.Errorf("error generating patch for managed fields %s: %v", kube.NamespaceAndName(obj), err) errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } if patchBytes != nil { if _, err = resourceClient.Patch(name, patchBytes); err != nil { ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err) if !apierrors.IsNotFound(err) { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } } else { ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj)) @@ -1711,7 +1720,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso pod := new(v1.Pod) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } // Do not create podvolumerestore when current restore excludes pv/pvc @@ -1737,7 +1746,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } } - return warnings, errs, itemExists + return warnings, errs, itemExists, itemCreated } func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, err error, client client.Dynamic) (bool, error) { From ebb21303ab09875a3aebed9228e323111fa2c199 Mon Sep 17 00:00:00 2001 From: lou Date: Tue, 7 Nov 2023 19:50:35 +0800 Subject: [PATCH 03/10] add changelog Signed-off-by: lou --- changelogs/unreleased/7069-27149chen | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelogs/unreleased/7069-27149chen diff --git a/changelogs/unreleased/7069-27149chen b/changelogs/unreleased/7069-27149chen new file mode 100644 index 0000000000..243596d4ad --- /dev/null +++ b/changelogs/unreleased/7069-27149chen @@ -0,0 +1 @@ +improve discoveryHelper.Refresh() in restore \ No newline at end of file From 55a465a941bcba72b10b158ff84a740bda2a03a5 Mon Sep 17 00:00:00 2001 From: danfengl Date: Mon, 13 Nov 2023 12:47:38 +0000 Subject: [PATCH 04/10] Add E2E test for taking CSI snapshot to PV with retain reclaim policy Signed-off-by: danfengl --- test/e2e/backup/backup.go | 36 +++++++- test/e2e/basic/namespace-mapping.go | 2 +- test/e2e/e2e_suite_test.go | 4 + test/e2e/migration/migration.go | 7 +- test/e2e/pv-backup/pv-backup-filter.go | 4 +- .../e2e/resourcepolicies/resource_policies.go | 3 +- test/e2e/upgrade/upgrade.go | 3 +- test/util/csi/common.go | 7 +- test/util/k8s/common.go | 24 ++++-- test/util/k8s/persistentvolumes.go | 16 +++- test/util/kibishii/kibishii_utils.go | 82 +++++++++++++++++-- test/util/velero/velero_utils.go | 59 +++++++++++++ 12 files changed, 216 insertions(+), 31 deletions(-) diff --git a/test/e2e/backup/backup.go b/test/e2e/backup/backup.go index 52dc7ad8fb..923781e258 100644 --- a/test/e2e/backup/backup.go +++ b/test/e2e/backup/backup.go @@ -31,15 +31,33 @@ import ( . "github.com/vmware-tanzu/velero/test/util/velero" ) +type BackupRestoreTestConfig struct { + useVolumeSnapshots bool + kibishiiPatchSubDir string + isRetainPVTest bool +} + func BackupRestoreWithSnapshots() { - BackupRestoreTest(true) + config := BackupRestoreTestConfig{true, "", false} + BackupRestoreTest(config) } func BackupRestoreWithRestic() { - BackupRestoreTest(false) + config := BackupRestoreTestConfig{false, "", false} + BackupRestoreTest(config) +} + +func BackupRestoreRetainedPVWithSnapshots() { + config := BackupRestoreTestConfig{true, "overlays/sc-reclaim-policy/", true} + BackupRestoreTest(config) +} + +func BackupRestoreRetainedPVWithRestic() { + config := BackupRestoreTestConfig{false, "overlays/sc-reclaim-policy/", true} + BackupRestoreTest(config) } -func BackupRestoreTest(useVolumeSnapshots bool) { +func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) { var ( backupName, restoreName, kibishiiNamespace string @@ -48,25 +66,34 @@ func BackupRestoreTest(useVolumeSnapshots bool) { veleroCfg VeleroConfig ) provideSnapshotVolumesParmInBackup = false + useVolumeSnapshots := backupRestoreTestConfig.useVolumeSnapshots BeforeEach(func() { veleroCfg = VeleroCfg + + veleroCfg.KibishiiDirectory = veleroCfg.KibishiiDirectory + backupRestoreTestConfig.kibishiiPatchSubDir veleroCfg.UseVolumeSnapshots = useVolumeSnapshots veleroCfg.UseNodeAgent = !useVolumeSnapshots if useVolumeSnapshots && veleroCfg.CloudProvider == "kind" { Skip("Volume snapshots not supported on kind") } + var err error flag.Parse() UUIDgen, err = uuid.NewRandom() kibishiiNamespace = "k-" + UUIDgen.String() Expect(err).To(Succeed()) + DeleteStorageClass(context.Background(), *veleroCfg.ClientToInstallVelero, KibishiiStorageClassName) }) AfterEach(func() { if !veleroCfg.Debug { By("Clean backups after test", func() { DeleteAllBackups(context.Background(), *veleroCfg.ClientToInstallVelero) + if backupRestoreTestConfig.isRetainPVTest { + CleanAllRetainedPV(context.Background(), *veleroCfg.ClientToInstallVelero) + } + DeleteStorageClass(context.Background(), *veleroCfg.ClientToInstallVelero, KibishiiStorageClassName) }) if veleroCfg.InstallVelero { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) @@ -106,6 +133,9 @@ func BackupRestoreTest(useVolumeSnapshots bool) { }) It("should successfully back up and restore to an additional BackupStorageLocation with unique credentials", func() { + if backupRestoreTestConfig.isRetainPVTest { + Skip("It's tested by 1st test case") + } if veleroCfg.AdditionalBSLProvider == "" { Skip("no additional BSL provider given, not running multiple BackupStorageLocation with unique credentials tests") } diff --git a/test/e2e/basic/namespace-mapping.go b/test/e2e/basic/namespace-mapping.go index ea2a8f53a7..dbf98c1f92 100644 --- a/test/e2e/basic/namespace-mapping.go +++ b/test/e2e/basic/namespace-mapping.go @@ -102,7 +102,7 @@ func (n *NamespaceMapping) Verify() error { n.kibishiiData.Levels = len(*n.NSIncluded) + index By(fmt.Sprintf("Verify workload %s after restore ", ns), func() { Expect(KibishiiVerifyAfterRestore(n.Client, ns, - n.Ctx, n.kibishiiData)).To(Succeed(), "Fail to verify workload after restore") + n.Ctx, n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) } for _, ns := range *n.NSIncluded { diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index b4cb6b22a3..7bf3a2e974 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -102,6 +102,10 @@ var _ = Describe("[Basic][Restic] Velero tests on cluster using the plugin provi var _ = Describe("[Basic][Snapshot] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreWithSnapshots) +var _ = Describe("[Basic][Snapshot][RetainPV] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreRetainedPVWithSnapshots) + +var _ = Describe("[Basic][Restic][RetainPV] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreRetainedPVWithRestic) + var _ = Describe("[Basic][ClusterResource] Backup/restore of cluster resources", ResourcesCheckTest) var _ = Describe("[Scale][LongTime] Backup/restore of 2500 namespaces", MultiNSBackupRestore) diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index a1a5e895c9..da808ba92c 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -273,15 +273,16 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) } By(fmt.Sprintf("Install Velero in cluster-B (%s) to restore workload", veleroCfg.StandbyCluster), func() { + //Ensure workload of "migrationNamespace" existed in cluster-A ns, err := GetNamespace(context.Background(), *veleroCfg.DefaultClient, migrationNamespace) Expect(ns.Name).To(Equal(migrationNamespace)) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("get namespace in cluster-B err: %v", err)) + //Ensure cluster-B is the target cluster Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyCluster)).To(Succeed()) _, err = GetNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace) Expect(err).To(HaveOccurred()) strings.Contains(fmt.Sprint(err), "namespaces \""+migrationNamespace+"\" not found") - fmt.Println(err) veleroCfg.ClientToInstallVelero = veleroCfg.StandbyClient @@ -335,7 +336,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) By(fmt.Sprintf("Verify workload %s after restore ", migrationNamespace), func() { Expect(KibishiiVerifyAfterRestore(*veleroCfg.StandbyClient, migrationNamespace, - oneHourTimeout, &KibishiiData)).To(Succeed(), "Fail to verify workload after restore") + oneHourTimeout, &KibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) // TODO: delete backup created by case self, not all diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index d8de42dd2e..556dfeb702 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -180,7 +180,7 @@ func fileContent(namespace, podName, volume string) string { } func fileExist(ctx context.Context, namespace, podName, volume string) error { - c, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) + c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) if err != nil { return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ", FILE_NAME, volume, podName, namespace)) @@ -195,7 +195,7 @@ func fileExist(ctx context.Context, namespace, podName, volume string) error { } } func fileNotExist(ctx context.Context, namespace, podName, volume string) error { - _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) + _, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) if err != nil { return nil } else { diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index 6f98c5ebda..df96bc3d91 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -24,7 +24,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/pkg/errors" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -164,7 +163,7 @@ func (r *ResourcePoliciesCase) Verify() error { if vol.Name != volName { continue } - content, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) + content, _, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) if i%2 == 0 { Expect(err).To(HaveOccurred(), "Expected file not found") // File should not exist } else { diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 6fd4c40ed3..c9e9af90bb 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -29,7 +29,6 @@ import ( . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/util/k8s" . "github.com/vmware-tanzu/velero/test/util/kibishii" - . "github.com/vmware-tanzu/velero/test/util/providers" . "github.com/vmware-tanzu/velero/test/util/velero" ) @@ -256,7 +255,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() { Expect(KibishiiVerifyAfterRestore(*veleroCfg.ClientToInstallVelero, upgradeNamespace, - oneHourTimeout, DefaultKibishiiData)).To(Succeed(), "Fail to verify workload after restore") + oneHourTimeout, DefaultKibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) }) }) diff --git a/test/util/csi/common.go b/test/util/csi/common.go index e96e865b00..932646f0cf 100644 --- a/test/util/csi/common.go +++ b/test/util/csi/common.go @@ -21,14 +21,12 @@ import ( "fmt" "strings" - "github.com/pkg/errors" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - . "github.com/vmware-tanzu/velero/test/util/k8s" ) @@ -128,6 +126,7 @@ func GetCsiSnapshotHandleV1(client TestClient, backupName string) ([]string, err } return snapshotHandleList, nil } + func GetVolumeSnapshotContentNameByPod(client TestClient, podName, namespace, backupName string) (string, error) { pvcList, err := GetPvcByPVCName(context.Background(), namespace, podName) if err != nil { diff --git a/test/util/k8s/common.go b/test/util/k8s/common.go index ed579cb77d..da439f24c7 100644 --- a/test/util/k8s/common.go +++ b/test/util/k8s/common.go @@ -104,7 +104,6 @@ func GetPvcByPVCName(ctx context.Context, namespace, pvcName string) ([]string, Args: []string{"{print $1}"}, } cmds = append(cmds, cmd) - return common.GetListByCmdPipes(ctx, cmds) } @@ -279,15 +278,30 @@ func CreateFileToPod(ctx context.Context, namespace, podName, containerName, vol fmt.Printf("Kubectl exec cmd =%v\n", cmd) return cmd.Run() } -func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, error) { +func FileExistInPV(ctx context.Context, namespace, podName, containerName, volume, filename string) (bool, error) { + stdout, stderr, err := ReadFileFromPodVolume(ctx, namespace, podName, containerName, volume, filename) + + output := fmt.Sprintf("%s:%s", stdout, stderr) + if strings.Contains(output, fmt.Sprintf("/%s/%s: No such file or directory", volume, filename)) { + return false, nil + } else { + if err == nil { + return true, nil + } else { + return false, errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s", + filename, volume, podName, namespace)) + } + } +} +func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, string, error) { arg := []string{"exec", "-n", namespace, "-c", containerName, podName, "--", "cat", fmt.Sprintf("/%s/%s", volume, filename)} cmd := exec.CommandContext(ctx, "kubectl", arg...) fmt.Printf("Kubectl exec cmd =%v\n", cmd) stdout, stderr, err := veleroexec.RunCommand(cmd) - fmt.Print(stdout) - fmt.Print(stderr) - return stdout, err + fmt.Printf("stdout: %s\n", stdout) + fmt.Printf("stderr: %s\n", stderr) + return stdout, stderr, err } func RunCommand(cmdName string, arg []string) string { diff --git a/test/util/k8s/persistentvolumes.go b/test/util/k8s/persistentvolumes.go index f4c8005945..441c1bd108 100644 --- a/test/util/k8s/persistentvolumes.go +++ b/test/util/k8s/persistentvolumes.go @@ -22,10 +22,9 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/retry" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" ) func CreatePersistentVolume(client TestClient, name string) (*corev1.PersistentVolume, error) { @@ -93,3 +92,16 @@ func ClearClaimRefForFailedPVs(ctx context.Context, client TestClient) error { return nil } + +func GetAllPVNames(ctx context.Context, client TestClient) ([]string, error) { + var pvNameList []string + pvList, err := client.ClientGo.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to List PV") + } + + for _, pvName := range pvList.Items { + pvNameList = append(pvNameList, pvName.Name) + } + return pvNameList, nil +} diff --git a/test/util/kibishii/kibishii_utils.go b/test/util/kibishii/kibishii_utils.go index f9d2b00d86..de30dccf44 100644 --- a/test/util/kibishii/kibishii_utils.go +++ b/test/util/kibishii/kibishii_utils.go @@ -51,6 +51,7 @@ type KibishiiData struct { var DefaultKibishiiWorkerCounts = 2 var DefaultKibishiiData = &KibishiiData{2, 10, 10, 1024, 1024, 0, DefaultKibishiiWorkerCounts} +var KibishiiPodNameList = []string{"kibishii-deployment-0", "kibishii-deployment-1"} var KibishiiPVCNameList = []string{"kibishii-data-kibishii-deployment-0", "kibishii-data-kibishii-deployment-1"} var KibishiiStorageClassName = "kibishii-storage-class" @@ -107,6 +108,8 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc } fmt.Printf("VeleroBackupNamespace done %s\n", time.Now().Format("2006-01-02 15:04:05")) + + // Checkpoint for a successful backup if useVolumeSnapshots { if providerName == "vsphere" { // Wait for uploads started by the Velero Plugin for vSphere to complete @@ -165,11 +168,49 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc } } + // Modify PV data right after backup. If PV's reclaim policy is retain, PV will be restored with the origin resource config + fileName := "file-" + kibishiiNamespace + fileBaseContent := fileName + fmt.Printf("Re-poulate volume %s\n", time.Now().Format("2006-01-02 15:04:05")) + for _, pod := range KibishiiPodNameList { + // To ensure Kibishii verification result is accurate + ClearKibishiiData(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data") + + fileContent := fileBaseContent + pod + err := CreateFileToPod(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", + fileName, fileContent) + if err != nil { + return errors.Wrapf(err, "failed to create file %s", fileName) + } + } + fmt.Printf("Re-poulate volume done %s\n", time.Now().Format("2006-01-02 15:04:05")) + + pvList := []string{} + if strings.Contains(veleroCfg.KibishiiDirectory, "sc-reclaim-policy") { + // Get leftover PV list for PV cleanup + for _, pvc := range KibishiiPVCNameList { + pv, err := GetPvName(oneHourTimeout, client, pvc, kibishiiNamespace) + if err != nil { + errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) + } + pvList = append(pvList, pv) + } + } + fmt.Printf("Simulating a disaster by removing namespace %s %s\n", kibishiiNamespace, time.Now().Format("2006-01-02 15:04:05")) if err := DeleteNamespace(oneHourTimeout, client, kibishiiNamespace, true); err != nil { return errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) } + if strings.Contains(veleroCfg.KibishiiDirectory, "sc-reclaim-policy") { + // In scenario of CSI PV-retain-policy test, to restore PV of the backed up resource, we should make sure + // there are no PVs of the same name left, because in previous test step, PV's reclaim policy is retain, + // so PVs are not deleted although workload namespace is destroyed. + if err := DeletePVs(oneHourTimeout, *veleroCfg.ClientToInstallVelero, pvList); err != nil { + return errors.Wrapf(err, "failed to delete PVs %v", pvList) + } + } + // the snapshots of AWS may be still in pending status when do the restore, wait for a while // to avoid this https://github.com/vmware-tanzu/velero/issues/1799 // TODO remove this after https://github.com/vmware-tanzu/velero/issues/3533 is fixed @@ -191,10 +232,12 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc return errors.New(fmt.Sprintf("PVR count %d is not as expected %d", len(pvrs), pvCount)) } } + fmt.Printf("KibishiiVerifyAfterRestore %s\n", time.Now().Format("2006-01-02 15:04:05")) - if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData); err != nil { + if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData, fileName); err != nil { return errors.Wrapf(err, "Error verifying kibishii after restore") } + fmt.Printf("kibishii test completed successfully %s\n", time.Now().Format("2006-01-02 15:04:05")) return nil } @@ -309,6 +352,15 @@ func waitForKibishiiPods(ctx context.Context, client TestClient, kibishiiNamespa return WaitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"}) } +func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace string, kibishiiData *KibishiiData) error { + fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05")) + if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil { + return errors.Wrap(err, "Failed to generate data") + } + fmt.Printf("generateData done %s\n", time.Now().Format("2006-01-02 15:04:05")) + return nil +} + func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClient, providerName, kibishiiNamespace, registryCredentialFile, veleroFeatures, kibishiiDirectory string, useVolumeSnapshots bool, kibishiiData *KibishiiData) error { @@ -338,16 +390,12 @@ func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClie if kibishiiData == nil { kibishiiData = DefaultKibishiiData } - fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05")) - if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil { - return errors.Wrap(err, "Failed to generate data") - } - fmt.Printf("generateData done %s\n", time.Now().Format("2006-01-02 15:04:05")) + KibishiiGenerateData(oneHourTimeout, kibishiiNamespace, kibishiiData) return nil } func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, oneHourTimeout context.Context, - kibishiiData *KibishiiData) error { + kibishiiData *KibishiiData, incrementalFileName string) error { if kibishiiData == nil { kibishiiData = DefaultKibishiiData } @@ -357,6 +405,18 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one if err := waitForKibishiiPods(oneHourTimeout, client, kibishiiNamespace); err != nil { return errors.Wrapf(err, "Failed to wait for ready status of kibishii pods in %s", kibishiiNamespace) } + if incrementalFileName != "" { + for _, pod := range KibishiiPodNameList { + exist, err := FileExistInPV(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName) + if err != nil { + return errors.Wrapf(err, fmt.Sprintf("fail to get file %s", incrementalFileName)) + } + + if exist { + return errors.New("Unexpected incremental data exist") + } + } + } // TODO - check that namespace exists fmt.Printf("running kibishii verify\n") @@ -365,3 +425,11 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one } return nil } + +func ClearKibishiiData(ctx context.Context, namespace, podName, containerName, dir string) error { + arg := []string{"exec", "-n", namespace, "-c", containerName, podName, + "--", "/bin/sh", "-c", "rm -rf /" + dir + "/*"} + cmd := exec.CommandContext(ctx, "kubectl", arg...) + fmt.Printf("Kubectl exec cmd =%v\n", cmd) + return cmd.Run() +} diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index a106cf5b50..fd0d919e4f 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -1561,3 +1561,62 @@ func InstallTestStorageClasses(path string) error { } return InstallStorageClass(ctx, tmpFile.Name()) } + +func GetPvName(ctx context.Context, client TestClient, pvcName, namespace string) (string, error) { + + pvcList, err := GetPvcByPVCName(context.Background(), namespace, pvcName) + if err != nil { + return "", err + } + + if len(pvcList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], pvcName, namespace)) + } + + pvList, err := GetPvByPvc(context.Background(), namespace, pvcList[0]) + if err != nil { + return "", err + } + if len(pvList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], pvcName, namespace)) + } + + return pvList[0], nil + +} +func DeletePVs(ctx context.Context, client TestClient, pvList []string) error { + for _, pv := range pvList { + args := []string{"delete", "pv", pv, "--timeout=0s"} + fmt.Println(args) + err := exec.CommandContext(ctx, "kubectl", args...).Run() + if err != nil { + return errors.New(fmt.Sprintf("Deleted PV %s ", pv)) + } + } + return nil +} + +func CleanAllRetainedPV(ctx context.Context, client TestClient) { + + pvNameList, err := GetAllPVNames(ctx, client) + if err != nil { + fmt.Println("fail to list PV") + } + for _, pv := range pvNameList { + args := []string{"patch", "pv", pv, "-p", "{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Delete\"}}"} + fmt.Println(args) + cmd := exec.CommandContext(ctx, "kubectl", args...) + stdout, errMsg, err := veleroexec.RunCommand(cmd) + if err != nil { + fmt.Printf("fail to patch PV %s reclaim policy to delete: stdout: %s, stderr: %s", pv, stdout, errMsg) + } + + args = []string{"delete", "pv", pv, "--timeout=60s"} + fmt.Println(args) + cmd = exec.CommandContext(ctx, "kubectl", args...) + stdout, errMsg, err = veleroexec.RunCommand(cmd) + if err != nil { + fmt.Printf("fail to delete PV %s reclaim policy to delete: stdout: %s, stderr: %s", pv, stdout, errMsg) + } + } +} From 507157f8126ecc7965fd9a3ea88b0e66335c1d0b Mon Sep 17 00:00:00 2001 From: Ming Date: Tue, 7 Nov 2023 06:25:13 +0000 Subject: [PATCH 05/10] Add perf test namespace mapping when restore Signed-off-by: Ming --- pkg/cmd/cli/install/install.go | 7 ++++++ pkg/install/deployment.go | 11 +++++++++ pkg/install/resources.go | 2 ++ test/e2e/e2e_suite_test.go | 2 ++ test/perf/Makefile | 24 +++++++++++++++++- test/perf/backup/backup.go | 2 +- test/perf/basic/basic.go | 18 +++++++++++--- test/perf/e2e_suite_test.go | 14 +++++++++++ test/perf/metrics/pod.go | 42 ++++++++++++++++--------------- test/perf/metrics/time.go | 39 +++++++++++++++++++---------- test/perf/restore/restore.go | 20 +++++++++++++-- test/perf/test/test.go | 24 +++++++++--------- test/types.go | 15 +++++------- test/util/k8s/namespace.go | 39 +++++++++++++++++++++++++++++ test/util/metrics/pod.go | 20 ++++++++++++--- test/util/velero/install.go | 45 ++++++++++++++++++++++++++++++++++ 16 files changed, 261 insertions(+), 63 deletions(-) diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index 9b8d835f24..fc5784082d 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -73,6 +73,7 @@ type Options struct { UseVolumeSnapshots bool DefaultRepoMaintenanceFrequency time.Duration GarbageCollectionFrequency time.Duration + PodVolumeOperationTimeout time.Duration Plugins flag.StringArray NoDefaultBackupLocation bool CRDsOnly bool @@ -116,6 +117,7 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) { flags.BoolVar(&o.Wait, "wait", o.Wait, "Wait for Velero deployment to be ready. Optional.") flags.DurationVar(&o.DefaultRepoMaintenanceFrequency, "default-repo-maintain-frequency", o.DefaultRepoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default. Optional.") flags.DurationVar(&o.GarbageCollectionFrequency, "garbage-collection-frequency", o.GarbageCollectionFrequency, "How often the garbage collection runs for expired backups.(default 1h)") + flags.DurationVar(&o.PodVolumeOperationTimeout, "pod-volume-operation-timeout", o.PodVolumeOperationTimeout, "How long to wait for pod volume operations to complete before timing out(default 4h). Optional.") flags.Var(&o.Plugins, "plugins", "Plugin container images to install into the Velero Deployment") flags.BoolVar(&o.CRDsOnly, "crds-only", o.CRDsOnly, "Only generate CustomResourceDefinition resources. Useful for updating CRDs for an existing Velero install.") flags.StringVar(&o.CACertFile, "cacert", o.CACertFile, "File containing a certificate bundle to use when verifying TLS connections to the object store. Optional.") @@ -209,6 +211,7 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) { VSLConfig: o.VolumeSnapshotConfig.Data(), DefaultRepoMaintenanceFrequency: o.DefaultRepoMaintenanceFrequency, GarbageCollectionFrequency: o.GarbageCollectionFrequency, + PodVolumeOperationTimeout: o.PodVolumeOperationTimeout, Plugins: o.Plugins, NoDefaultBackupLocation: o.NoDefaultBackupLocation, CACertData: caCertData, @@ -426,5 +429,9 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er return errors.New("--garbage-collection-frequency must be non-negative") } + if o.PodVolumeOperationTimeout < 0 { + return errors.New("--pod-volume-operation-timeout must be non-negative") + } + return nil } diff --git a/pkg/install/deployment.go b/pkg/install/deployment.go index 5ea680dc16..7c1bd7a81f 100644 --- a/pkg/install/deployment.go +++ b/pkg/install/deployment.go @@ -41,6 +41,7 @@ type podTemplateConfig struct { withSecret bool defaultRepoMaintenanceFrequency time.Duration garbageCollectionFrequency time.Duration + podVolumeOperationTimeout time.Duration plugins []string features []string defaultVolumesToFsBackup bool @@ -115,6 +116,12 @@ func WithGarbageCollectionFrequency(val time.Duration) podTemplateOption { } } +func WithPodVolumeOperationTimeout(val time.Duration) podTemplateOption { + return func(c *podTemplateConfig) { + c.podVolumeOperationTimeout = val + } +} + func WithPlugins(plugins []string) podTemplateOption { return func(c *podTemplateConfig) { c.plugins = plugins @@ -212,6 +219,10 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment args = append(args, fmt.Sprintf("--garbage-collection-frequency=%v", c.garbageCollectionFrequency)) } + if c.podVolumeOperationTimeout > 0 { + args = append(args, fmt.Sprintf("--fs-backup-timeout=%v", c.podVolumeOperationTimeout)) + } + deployment := &appsv1.Deployment{ ObjectMeta: objectMeta(namespace, "velero"), TypeMeta: metav1.TypeMeta{ diff --git a/pkg/install/resources.go b/pkg/install/resources.go index 21aa83ff65..2e9e1bc3e2 100644 --- a/pkg/install/resources.go +++ b/pkg/install/resources.go @@ -246,6 +246,7 @@ type VeleroOptions struct { VSLConfig map[string]string DefaultRepoMaintenanceFrequency time.Duration GarbageCollectionFrequency time.Duration + PodVolumeOperationTimeout time.Duration Plugins []string NoDefaultBackupLocation bool CACertData []byte @@ -335,6 +336,7 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList { WithDefaultRepoMaintenanceFrequency(o.DefaultRepoMaintenanceFrequency), WithServiceAccountName(serviceAccountName), WithGarbageCollectionFrequency(o.GarbageCollectionFrequency), + WithPodVolumeOperationTimeout(o.PodVolumeOperationTimeout), WithUploaderType(o.UploaderType), } diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index b4cb6b22a3..76ef04100d 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -28,6 +28,7 @@ import ( "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" + "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/e2e/backup" . "github.com/vmware-tanzu/velero/test/e2e/backups" @@ -49,6 +50,7 @@ import ( ) func init() { + VeleroCfg.Options = &install.Options{} flag.StringVar(&VeleroCfg.CloudProvider, "cloud-provider", "", "cloud that Velero will be installed into. Required.") flag.StringVar(&VeleroCfg.ObjectStoreProvider, "object-store-provider", "", "provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.") flag.StringVar(&VeleroCfg.BSLBucket, "bucket", "", "name of the object storage bucket where backups from e2e tests should be stored. Required.") diff --git a/test/perf/Makefile b/test/perf/Makefile index f30ee5b995..843ccab87e 100644 --- a/test/perf/Makefile +++ b/test/perf/Makefile @@ -76,6 +76,17 @@ NFS_SERVER_PATH ?= UPLOADER_TYPE ?= TEST_CASE_DESCRIBE ?= 'velero performance test' BACKUP_FOR_RESTORE ?= +Delete_Cluster_Resource ?= false +Debug_Velero_Pod_Restart ?= false +NODE_AGENT_POD_CPU_LIMIT ?= 4 +NODE_AGENT_POD_MEM_LIMIT ?= 4Gi +NODE_AGENT_POD_CPU_REQUEST ?= 2 +NODE_AGENT_POD_MEM_REQUEST ?= 2Gi +VELERO_POD_CPU_LIMIT ?= 4 +VELERO_POD_MEM_LIMIT ?= 4Gi +VELERO_POD_CPU_REQUEST ?= 2 +VELERO_POD_MEM_REQUEST ?= 2Gi +POD_VOLUME_OPERATION_TIMEOUT ?= 6h .PHONY:ginkgo ginkgo: # Make sure ginkgo is in $GOPATH/bin @@ -110,7 +121,18 @@ run: ginkgo -uploader-type=$(UPLOADER_TYPE) \ -nfs-server-path=$(NFS_SERVER_PATH) \ -test-case-describe=$(TEST_CASE_DESCRIBE) \ - -backup-for-restore=$(BACKUP_FOR_RESTORE) + -backup-for-restore=$(BACKUP_FOR_RESTORE) \ + -delete-cluster-resource=$(Delete_Cluster_Resource) \ + -debug-velero-pod-restart=$(Debug_Velero_Pod_Restart) \ + -node-agent-pod-cpu-limit=$(NODE_AGENT_POD_CPU_LIMIT) \ + -node-agent-pod-mem-limit=$(NODE_AGENT_POD_MEM_LIMIT) \ + -node-agent-pod-cpu-request=$(NODE_AGENT_POD_CPU_REQUEST) \ + -node-agent-pod-mem-request=$(NODE_AGENT_POD_MEM_REQUEST) \ + -velero-pod-cpu-limit=$(VELERO_POD_CPU_LIMIT) \ + -velero-pod-mem-limit=$(VELERO_POD_MEM_LIMIT) \ + -velero-pod-cpu-request=$(VELERO_POD_CPU_REQUEST) \ + -velero-pod-mem-request=$(VELERO_POD_MEM_REQUEST) \ + -pod-volume-operation-timeout=$(POD_VOLUME_OPERATION_TIMEOUT) build: ginkgo mkdir -p $(OUTPUT_DIR) diff --git a/test/perf/backup/backup.go b/test/perf/backup/backup.go index 7f9f35de08..3a3c059a56 100644 --- a/test/perf/backup/backup.go +++ b/test/perf/backup/backup.go @@ -32,7 +32,7 @@ type BackupTest struct { func (b *BackupTest) Init() error { b.TestCase.Init() - b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) b.CaseBaseName = "backup" b.BackupName = "backup-" + b.CaseBaseName + "-" + b.UUIDgen diff --git a/test/perf/basic/basic.go b/test/perf/basic/basic.go index 80c6b02185..76bf605a68 100644 --- a/test/perf/basic/basic.go +++ b/test/perf/basic/basic.go @@ -18,12 +18,14 @@ package basic import ( "context" - "fmt" "strings" "time" + "github.com/pkg/errors" + . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/perf/test" + "github.com/vmware-tanzu/velero/test/util/k8s" ) type BasicTest struct { @@ -32,7 +34,7 @@ type BasicTest struct { func (b *BasicTest) Init() error { b.TestCase.Init() - b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) b.CaseBaseName = "backuprestore" b.BackupName = "backup-" + b.CaseBaseName + "-" + b.UUIDgen b.RestoreName = "restore-" + b.CaseBaseName + "-" + b.UUIDgen @@ -49,10 +51,20 @@ func (b *BasicTest) Init() error { "--from-backup", b.BackupName, "--wait", } + if !VeleroCfg.DeleteClusterResource { + joinedNsMapping, err := k8s.GetMappingNamespaces(b.Ctx, b.Client, *b.NSExcluded) + if err != nil { + return errors.Wrapf(err, "failed to get mapping namespaces in init") + } + + b.RestoreArgs = append(b.RestoreArgs, "--namespace-mappings") + b.RestoreArgs = append(b.RestoreArgs, joinedNsMapping) + } + b.TestMsg = &TestMSG{ Desc: "Do backup and restore resources for performance test", FailedMSG: "Failed to backup and restore resources", - Text: fmt.Sprintf("Should backup and restore resources success"), + Text: "Should backup and restore resources success", } return nil } diff --git a/test/perf/e2e_suite_test.go b/test/perf/e2e_suite_test.go index 4d3275dec1..57599ec364 100644 --- a/test/perf/e2e_suite_test.go +++ b/test/perf/e2e_suite_test.go @@ -21,12 +21,14 @@ import ( "flag" "fmt" "testing" + "time" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" "github.com/pkg/errors" + "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" . "github.com/vmware-tanzu/velero/test" "github.com/vmware-tanzu/velero/test/perf/backup" @@ -39,6 +41,7 @@ import ( ) func init() { + VeleroCfg.Options = &install.Options{} flag.StringVar(&VeleroCfg.CloudProvider, "cloud-provider", "", "cloud that Velero will be installed into. Required.") flag.StringVar(&VeleroCfg.ObjectStoreProvider, "object-store-provider", "", "provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.") flag.StringVar(&VeleroCfg.BSLBucket, "bucket", "", "name of the object storage bucket where backups from e2e tests should be stored. Required.") @@ -56,6 +59,15 @@ func init() { flag.BoolVar(&VeleroCfg.InstallVelero, "install-velero", true, "install/uninstall velero during the test. Optional.") flag.BoolVar(&VeleroCfg.UseNodeAgent, "use-node-agent", true, "whether deploy node agent daemonset velero during the test. Optional.") flag.StringVar(&VeleroCfg.RegistryCredentialFile, "registry-credential-file", "", "file containing credential for the image registry, follows the same format rules as the ~/.docker/config.json file. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodCPULimit, "node-agent-pod-cpu-limit", "4", "CPU limit for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodMemLimit, "node-agent-pod-mem-limit", "4Gi", "Memory limit for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodCPURequest, "node-agent-pod-cpu-request", "2", "CPU request for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodMemRequest, "node-agent-pod-mem-request", "2Gi", "Memory request for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodCPULimit, "velero-pod-cpu-limit", "4", "CPU limit for velero pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodMemLimit, "velero-pod-mem-limit", "4Gi", "Memory limit for velero pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodCPURequest, "velero-pod-cpu-request", "2", "CPU request for velero pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodMemRequest, "velero-pod-mem-request", "2Gi", "Memory request for velero pod. Optional.") + flag.DurationVar(&VeleroCfg.PodVolumeOperationTimeout, "pod-volume-operation-timeout", 360*time.Minute, "Timeout for pod volume operations. Optional.") //vmware-tanzu-experiments flag.StringVar(&VeleroCfg.Features, "features", "", "Comma-separated list of features to enable for this Velero process.") flag.StringVar(&VeleroCfg.DefaultCluster, "default-cluster-context", "", "Default cluster context for migration test.") @@ -65,6 +77,8 @@ func init() { flag.StringVar(&VeleroCfg.NFSServerPath, "nfs-server-path", "", "the path of nfs server") flag.StringVar(&VeleroCfg.TestCaseDescribe, "test-case-describe", "velero performance test", "the description for the current test") flag.StringVar(&VeleroCfg.BackupForRestore, "backup-for-restore", "", "the name of backup for restore") + flag.BoolVar(&VeleroCfg.DeleteClusterResource, "delete-cluster-resource", false, "delete cluster resource after test") + flag.BoolVar(&VeleroCfg.DebugVeleroPodRestart, "debug-velero-pod-restart", false, "Switch for debugging velero pod restart.") } func initConfig() error { diff --git a/test/perf/metrics/pod.go b/test/perf/metrics/pod.go index f341fe918a..56572f6728 100644 --- a/test/perf/metrics/pod.go +++ b/test/perf/metrics/pod.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/pkg/errors" @@ -29,6 +30,7 @@ import ( ) const PodResourceDesc = "Resource consumption" +const PodMetricsTimeout = 5 * time.Minute type PodMetrics struct { Client *metricsclientset.Clientset @@ -39,31 +41,31 @@ type PodMetrics struct { } func (p *PodMetrics) Update() error { - cpu, mem, err := metrics.GetPodUsageMetrics(p.Ctx, p.Client, p.PodName, p.Namespace) + cpu, mem, err := metrics.GetPodUsageMetrics(p.Ctx, p.Client, p.PodName, p.Namespace, PodMetricsTimeout) if err != nil { return errors.WithStack(err) - } else { - keyMaxCPU := p.PodName + ":MaxCPU" - curCPU := cpu.MilliValue() - if curCPU > p.Metrics[keyMaxCPU] { - p.Metrics[keyMaxCPU] = curCPU - } + } + keyMaxCPU := p.PodName + ":MaxCPU" + curCPU := cpu.MilliValue() + if curCPU > p.Metrics[keyMaxCPU] { + p.Metrics[keyMaxCPU] = curCPU + } - keyMaxMem := p.PodName + ":MaxMemory" - curMem := mem.MilliValue() - if curMem > p.Metrics[keyMaxMem] { - p.Metrics[keyMaxMem] = curMem - } + keyMaxMem := p.PodName + ":MaxMemory" + curMem := mem.MilliValue() + if curMem > p.Metrics[keyMaxMem] { + p.Metrics[keyMaxMem] = curMem + } - keyAvgCPU := p.PodName + ":AverageCPU" - preAvgCPU := p.Metrics[keyAvgCPU] - p.Metrics[keyAvgCPU] = (preAvgCPU*p.count + curCPU) / (p.count + 1) + keyAvgCPU := p.PodName + ":AverageCPU" + preAvgCPU := p.Metrics[keyAvgCPU] + p.Metrics[keyAvgCPU] = (preAvgCPU*p.count + curCPU) / (p.count + 1) + + keyAvgMem := p.PodName + ":AverageMemory" + preAvgMem := p.Metrics[keyAvgMem] + p.Metrics[keyAvgMem] = (preAvgMem*p.count + curMem) / (p.count + 1) + p.count++ - keyAvgMem := p.PodName + ":AverageMemory" - preAvgMem := p.Metrics[keyAvgMem] - p.Metrics[keyAvgMem] = (preAvgMem*p.count + curMem) / (p.count + 1) - p.count++ - } return nil } diff --git a/test/perf/metrics/time.go b/test/perf/metrics/time.go index 3334cbb297..aa760389d2 100644 --- a/test/perf/metrics/time.go +++ b/test/perf/metrics/time.go @@ -16,40 +16,53 @@ limitations under the License. package metrics -import "time" +import ( + "fmt" + "time" +) const TimeCaseDesc = "Time cost" +type TimeSpan struct { + Start time.Time + End time.Time +} + type TimeMetrics struct { Name string - TimeInfo map[string]time.Time // metric name : start timestamp - Metrics map[string]float64 // metric name : time duration + TimeInfo map[string]TimeSpan // metric name : start timestamp } func (t *TimeMetrics) GetMetrics() map[string]string { tmpMetrics := make(map[string]string) - for k, v := range t.Metrics { - duration := time.Duration(v) * time.Second - tmpMetrics[k] = duration.String() + for k, v := range t.TimeInfo { + duration := v.End.Sub(v.Start) + if duration < time.Second { + // For those too shoter time difference we should ignored + // as it may not really execute the logic + continue + } + tmpMetrics[k] = duration.String() + fmt.Sprintf(" (%s - %s)", v.Start.Format(time.RFC3339), v.End.Format(time.RFC3339)) } return tmpMetrics } func (t *TimeMetrics) Start(name string) { - t.TimeInfo[name] = time.Now() + t.TimeInfo[name] = TimeSpan{ + Start: time.Now(), + } } func (t *TimeMetrics) End(name string) { - t.Metrics[name] = time.Now().Sub(t.TimeInfo[name]).Seconds() - if t.Metrics[name] < 1 { - // For those too shoter time difference we should ignored - // as it may not really execute the logic - delete(t.Metrics, name) + if _, ok := t.TimeInfo[name]; !ok { + return } + timeSpan := t.TimeInfo[name] + timeSpan.End = time.Now() + t.TimeInfo[name] = timeSpan } func (t *TimeMetrics) Update() error { - t.Metrics[t.Name] = time.Now().Sub(t.TimeInfo[t.Name]).Seconds() return nil } diff --git a/test/perf/restore/restore.go b/test/perf/restore/restore.go index 025ef49865..f07d5df4d6 100644 --- a/test/perf/restore/restore.go +++ b/test/perf/restore/restore.go @@ -25,6 +25,7 @@ import ( . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/perf/test" + "github.com/vmware-tanzu/velero/test/util/k8s" . "github.com/vmware-tanzu/velero/test/util/velero" ) @@ -34,7 +35,7 @@ type RestoreTest struct { func (r *RestoreTest) Init() error { r.TestCase.Init() - r.Ctx, r.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + r.Ctx, r.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) r.CaseBaseName = "restore" r.RestoreName = "restore-" + r.CaseBaseName + "-" + r.UUIDgen @@ -43,7 +44,7 @@ func (r *RestoreTest) Init() error { FailedMSG: "Failed to restore resources", Text: fmt.Sprintf("Should restore resources success"), } - return r.clearUpResourcesBeforRestore() + return nil } func (r *RestoreTest) clearUpResourcesBeforRestore() error { @@ -52,6 +53,11 @@ func (r *RestoreTest) clearUpResourcesBeforRestore() error { } func (r *RestoreTest) Restore() error { + // we need to clear up all resources before do the restore test + err := r.clearUpResourcesBeforRestore() + if err != nil { + return errors.Wrapf(err, "failed to clear up resources before do the restore test") + } var backupName string if VeleroCfg.BackupForRestore != "" { backupName = VeleroCfg.BackupForRestore @@ -71,6 +77,16 @@ func (r *RestoreTest) Restore() error { "--from-backup", r.BackupName, "--wait", } + if !VeleroCfg.DeleteClusterResource { + joinedNsMapping, err := k8s.GetMappingNamespaces(r.Ctx, r.Client, *r.NSExcluded) + if err != nil { + return errors.Wrapf(err, "failed to get mapping namespaces in init") + } + + r.RestoreArgs = append(r.RestoreArgs, "--namespace-mappings") + r.RestoreArgs = append(r.RestoreArgs, joinedNsMapping) + } + return r.TestCase.Restore() } func (r *RestoreTest) Destroy() error { diff --git a/test/perf/test/test.go b/test/perf/test/test.go index 9aed01bb27..c7f80e3fea 100644 --- a/test/perf/test/test.go +++ b/test/perf/test/test.go @@ -97,14 +97,15 @@ func TestFunc(test VeleroBackupRestoreTest) func() { } func (t *TestCase) Init() error { - t.Ctx, t.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + t.Ctx, t.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) t.NSExcluded = &[]string{"kube-system", "velero", "default", "kube-public", "kube-node-lease"} t.UUIDgen = t.GenerateUUID() t.Client = *VeleroCfg.DefaultClient t.timer = &metrics.TimeMetrics{ - Name: "Total time cost", - TimeInfo: map[string]time.Time{"Total time cost": time.Now()}, - Metrics: make(map[string]float64), + Name: "Total time cost", + TimeInfo: map[string]metrics.TimeSpan{"Total time cost": { + Start: time.Now(), + }}, } return nil } @@ -131,10 +132,12 @@ func (t *TestCase) Backup() error { } func (t *TestCase) Destroy() error { - By(fmt.Sprintf("Start to destroy namespace %s......", t.CaseBaseName), func() { - Expect(CleanupNamespacesFiterdByExcludes(t.GetTestCase().Ctx, t.Client, *t.NSExcluded)).To(Succeed(), "Could cleanup retrieve namespaces") - Expect(ClearClaimRefForFailedPVs(t.Ctx, t.Client)).To(Succeed(), "Failed to make PV status become to available") - }) + if VeleroCfg.DeleteClusterResource { + By(fmt.Sprintf("Start to destroy namespace %s......", t.CaseBaseName), func() { + Expect(CleanupNamespacesFiterdByExcludes(t.GetTestCase().Ctx, t.Client, *t.NSExcluded)).To(Succeed(), "Could cleanup retrieve namespaces") + Expect(ClearClaimRefForFailedPVs(t.Ctx, t.Client)).To(Succeed(), "Failed to make PV status become to available") + }) + } return nil } @@ -160,7 +163,7 @@ func (t *TestCase) Verify() error { } func (t *TestCase) Clean() error { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug || VeleroCfg.DeleteClusterResource { By("Clean backups and restore after test", func() { if len(t.BackupArgs) != 0 { if err := VeleroBackupDelete(t.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.BackupName); err != nil { @@ -269,8 +272,7 @@ func (t *TestCase) MonitorMetircs(ctx context.Context, collectors *metrics.Metri timeMetrics := &metrics.TimeMetrics{ Name: t.CaseBaseName, - TimeInfo: make(map[string]time.Time), - Metrics: make(map[string]float64), + TimeInfo: make(map[string]metrics.TimeSpan), } collectors.RegisterOneTimeMetric(timeMetrics) diff --git a/test/types.go b/test/types.go index 327139f35a..360c904735 100644 --- a/test/types.go +++ b/test/types.go @@ -21,6 +21,7 @@ import ( "github.com/google/uuid" + "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" . "github.com/vmware-tanzu/velero/test/util/k8s" ) @@ -40,6 +41,7 @@ var ReportData *Report type VeleroConfig struct { VeleroCfgInPerf + *install.Options VeleroCLI string VeleroImage string VeleroVersion string @@ -66,7 +68,6 @@ type VeleroConfig struct { AddBSLPlugins string InstallVelero bool KibishiiDirectory string - Features string Debug bool GCFrequency string DefaultCluster string @@ -74,12 +75,7 @@ type VeleroConfig struct { ClientToInstallVelero *TestClient DefaultClient *TestClient StandbyClient *TestClient - UploaderType string - UseNodeAgent bool - UseRestic bool ProvideSnapshotsVolumeParam bool - DefaultVolumesToFsBackup bool - UseVolumeSnapshots bool VeleroServerDebugMode bool SnapshotMoveData bool DataMoverPlugin string @@ -90,9 +86,10 @@ type VeleroConfig struct { } type VeleroCfgInPerf struct { - NFSServerPath string - TestCaseDescribe string - BackupForRestore string + NFSServerPath string + TestCaseDescribe string + BackupForRestore string + DeleteClusterResource bool } type SnapshotCheckPoint struct { diff --git a/test/util/k8s/namespace.go b/test/util/k8s/namespace.go index e056dc9905..3c76867560 100644 --- a/test/util/k8s/namespace.go +++ b/test/util/k8s/namespace.go @@ -194,3 +194,42 @@ func NamespaceShouldNotExist(ctx context.Context, client TestClient, namespace s } return nil } + +func GetBackupNamespaces(ctx context.Context, client TestClient, excludeNS []string) ([]string, error) { + namespaces, err := client.ClientGo.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrap(err, "Could not retrieve namespaces") + } + var backupNamespaces []string + for _, checkNamespace := range namespaces.Items { + isExclude := false + for k := range excludeNS { + if checkNamespace.Name == excludeNS[k] { + isExclude = true + } + } + if !isExclude { + backupNamespaces = append(backupNamespaces, checkNamespace.Name) + } + } + return backupNamespaces, nil +} + +func GetMappingNamespaces(ctx context.Context, client TestClient, excludeNS []string) (string, error) { + ns, err := GetBackupNamespaces(ctx, client, excludeNS) + if err != nil { + return "", errors.Wrap(err, "Could not retrieve namespaces") + } else if len(ns) == 0 { + return "", errors.Wrap(err, "Get empty namespaces in backup") + } + + nsMapping := []string{} + for _, n := range ns { + nsMapping = append(nsMapping, n+":mapping-"+n) + } + joinedNsMapping := strings.Join(nsMapping, ",") + if len(joinedNsMapping) > 0 { + joinedNsMapping = joinedNsMapping[:len(joinedNsMapping)-1] + } + return joinedNsMapping, nil +} diff --git a/test/util/metrics/pod.go b/test/util/metrics/pod.go index 331211bb0e..d31f6a481a 100644 --- a/test/util/metrics/pod.go +++ b/test/util/metrics/pod.go @@ -18,21 +18,35 @@ package metrics import ( "context" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/metrics/pkg/apis/metrics/v1beta1" metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned" ) -func GetPodUsageMetrics(ctx context.Context, metricsClient *metricsclientset.Clientset, podName, namespace string) (cpuUsage, memoryUsage resource.Quantity, err error) { +func GetPodUsageMetrics(ctx context.Context, metricsClient *metricsclientset.Clientset, podName, namespace string, podMetricsTimeout time.Duration) (cpuUsage, memoryUsage resource.Quantity, err error) { + ctx, cancel := context.WithTimeout(context.Background(), podMetricsTimeout) + defer cancel() + var podMetrics *v1beta1.PodMetrics - podMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(namespace).Get(ctx, podName, metav1.GetOptions{}) + err = wait.PollImmediateUntil(time.Second, func() (bool, error) { + var err error + podMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(namespace).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + return true, nil + }, ctx.Done()) + if err != nil { return + } else if podMetrics == nil { + return cpuUsage, memoryUsage, nil } - // Variables to store the max and sum of CPU and memory usage // For velero pod we only return the main container for _, container := range podMetrics.Containers { diff --git a/test/util/velero/install.go b/test/util/velero/install.go index b0bbcf7ff1..9427e19eed 100644 --- a/test/util/velero/install.go +++ b/test/util/velero/install.go @@ -120,6 +120,15 @@ func VeleroInstall(ctx context.Context, veleroCfg *VeleroConfig, isStandbyCluste veleroInstallOptions.UploaderType = veleroCfg.UploaderType GCFrequency, _ := time.ParseDuration(veleroCfg.GCFrequency) veleroInstallOptions.GarbageCollectionFrequency = GCFrequency + veleroInstallOptions.PodVolumeOperationTimeout = veleroCfg.PodVolumeOperationTimeout + veleroInstallOptions.NodeAgentPodCPULimit = veleroCfg.NodeAgentPodCPULimit + veleroInstallOptions.NodeAgentPodCPURequest = veleroCfg.NodeAgentPodCPURequest + veleroInstallOptions.NodeAgentPodMemLimit = veleroCfg.NodeAgentPodMemLimit + veleroInstallOptions.NodeAgentPodMemRequest = veleroCfg.NodeAgentPodMemRequest + veleroInstallOptions.VeleroPodCPULimit = veleroCfg.VeleroPodCPULimit + veleroInstallOptions.VeleroPodCPURequest = veleroCfg.VeleroPodCPURequest + veleroInstallOptions.VeleroPodMemLimit = veleroCfg.VeleroPodMemLimit + veleroInstallOptions.VeleroPodMemRequest = veleroCfg.VeleroPodMemRequest err = installVeleroServer(ctx, veleroCfg.VeleroCLI, veleroCfg.CloudProvider, &installOptions{ Options: veleroInstallOptions, @@ -251,6 +260,42 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options args = append(args, fmt.Sprintf("--garbage-collection-frequency=%v", options.GarbageCollectionFrequency)) } + if options.PodVolumeOperationTimeout > 0 { + args = append(args, fmt.Sprintf("--pod-volume-operation-timeout=%v", options.PodVolumeOperationTimeout)) + } + + if options.NodeAgentPodCPULimit != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-cpu-limit=%v", options.NodeAgentPodCPULimit)) + } + + if options.NodeAgentPodCPURequest != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-mem-request=%v", options.NodeAgentPodCPURequest)) + } + + if options.NodeAgentPodMemLimit != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-mem-limit=%v", options.NodeAgentPodMemLimit)) + } + + if options.NodeAgentPodMemRequest != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-mem-request=%v", options.NodeAgentPodMemRequest)) + } + + if options.VeleroPodCPULimit != "" { + args = append(args, fmt.Sprintf("--velero-pod-cpu-limit=%v", options.VeleroPodCPULimit)) + } + + if options.VeleroPodCPURequest != "" { + args = append(args, fmt.Sprintf("--velero-pod-cpu-request=%v", options.VeleroPodCPURequest)) + } + + if options.VeleroPodMemLimit != "" { + args = append(args, fmt.Sprintf("--velero-pod-mem-limit=%v", options.VeleroPodMemLimit)) + } + + if options.VeleroPodMemRequest != "" { + args = append(args, fmt.Sprintf("--velero-pod-mem-request=%v", options.VeleroPodMemRequest)) + } + if len(options.UploaderType) > 0 { args = append(args, fmt.Sprintf("--uploader-type=%v", options.UploaderType)) } From 179faf3e333c48519933a27ec70155eb48e35294 Mon Sep 17 00:00:00 2001 From: lou Date: Mon, 27 Nov 2023 17:39:37 +0800 Subject: [PATCH 06/10] update after review Signed-off-by: lou --- pkg/restore/restore.go | 123 ++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 62 deletions(-) diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 0241014f20..a64e8639a8 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -502,7 +502,7 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { }() // totalItems: previously discovered items, i: iteration counter. - totalItems, processedItems, createdItems, existingNamespaces := 0, 0, 0, sets.NewString() + totalItems, processedItems, existingNamespaces := 0, 0, sets.NewString() // First restore CRDs. This is needed so that they are available in the cluster // when getOrderedResourceCollection is called again on the whole backup and @@ -525,22 +525,29 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { var w, e results.Result // Restore this resource, the update channel is set to nil, to avoid misleading value of "totalItems" // more details see #5990 - processedItems, createdItems, w, e = ctx.processSelectedResource( + processedItems, w, e = ctx.processSelectedResource( selectedResource, totalItems, processedItems, - createdItems, existingNamespaces, nil, ) warnings.Merge(&w) errs.Merge(&e) } + + var createdOrUpdatedCRDs bool + for _, restoredItem := range ctx.restoredItems { + if restoredItem.action == itemRestoreResultCreated || restoredItem.action == itemRestoreResultUpdated { + createdOrUpdatedCRDs = true + break + } + } // If we just restored custom resource definitions (CRDs), refresh - // discovery because the restored CRDs may have created new APIs that + // discovery because the restored CRDs may have created or updated new APIs that // didn't previously exist in the cluster, and we want to be able to // resolve & restore instances of them in subsequent loop iterations. - if createdItems > 0 { + if createdOrUpdatedCRDs { if err := ctx.discoveryHelper.Refresh(); err != nil { warnings.Add("", errors.Wrap(err, "refresh discovery after restoring CRDs")) } @@ -586,7 +593,6 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { // reset processedItems and totalItems before processing full resource list processedItems = 0 totalItems = 0 - createdItems = 0 for _, selectedResource := range selectedResourceCollection { totalItems += selectedResource.totalItems } @@ -594,11 +600,10 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { for _, selectedResource := range selectedResourceCollection { var w, e results.Result // Restore this resource - processedItems, createdItems, w, e = ctx.processSelectedResource( + processedItems, w, e = ctx.processSelectedResource( selectedResource, totalItems, processedItems, - createdItems, existingNamespaces, update, ) @@ -682,10 +687,9 @@ func (ctx *restoreContext) processSelectedResource( selectedResource restoreableResource, totalItems int, processedItems int, - createdItems int, existingNamespaces sets.String, update chan progressUpdate, -) (int, int, results.Result, results.Result) { +) (int, results.Result, results.Result) { warnings, errs := results.Result{}, results.Result{} groupResource := schema.ParseGroupResource(selectedResource.resource) @@ -741,15 +745,11 @@ func (ctx *restoreContext) processSelectedResource( continue } - w, e, _, created := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace) + w, e, _ := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace) warnings.Merge(&w) errs.Merge(&e) processedItems++ - if created { - createdItems++ - } - // totalItems keeps the count of items previously known. There // may be additional items restored by plugins. We want to include // the additional items by looking at restoredItems at the same @@ -771,7 +771,7 @@ func (ctx *restoreContext) processSelectedResource( } } - return processedItems, createdItems, warnings, errs + return processedItems, warnings, errs } // getNamespace returns a namespace API object that we should attempt to @@ -1091,9 +1091,10 @@ func (ctx *restoreContext) getResource(groupResource schema.GroupResource, obj * return u, nil } -// itemExists bool is used to determine whether to include this item in the "wait for additional items" list -// itemCreated indicates whether the item was created by this restore -func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (warnings, errs results.Result, itemExists, itemCreated bool) { +func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (results.Result, results.Result, bool) { + warnings, errs := results.Result{}, results.Result{} + // itemExists bool is used to determine whether to include this item in the "wait for additional items" list + itemExists := false resourceID := getResourceID(groupResource, namespace, obj.GetName()) // Check if group/resource should be restored. We need to do this here since @@ -1105,7 +1106,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso "name": obj.GetName(), "groupResource": groupResource.String(), }).Info("Not restoring item because resource is excluded") - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // Check if namespace/cluster-scoped resource should be restored. We need @@ -1121,7 +1122,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso "name": obj.GetName(), "groupResource": groupResource.String(), }).Info("Not restoring item because namespace is excluded") - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // If the namespace scoped resource should be restored, ensure that the @@ -1131,7 +1132,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso _, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady(nsToEnsure, ctx.namespaceClient, ctx.resourceTerminatingTimeout) if err != nil { errs.AddVeleroError(err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // Add the newly created namespace to the list of restored items. if nsCreated { @@ -1149,7 +1150,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso "name": obj.GetName(), "groupResource": groupResource.String(), }).Info("Not restoring item because it's cluster-scoped") - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } } @@ -1160,11 +1161,11 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso complete, err := isCompleted(obj, groupResource) if err != nil { errs.Add(namespace, fmt.Errorf("error checking completion of %q: %v", resourceID, err)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } if complete { ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } name := obj.GetName() @@ -1178,7 +1179,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if prevRestoredItemStatus, exists := ctx.restoredItems[itemKey]; exists { ctx.log.Infof("Skipping %s because it's already been restored.", resourceID) itemExists = prevRestoredItemStatus.itemExists - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } ctx.restoredItems[itemKey] = restoredItemStatus{itemExists: itemExists} defer func() { @@ -1202,13 +1203,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // to the interface. if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" { ctx.log.Infof("Not restoring pod because it's a mirror pod") - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } resourceClient, err := ctx.getResourceClient(groupResource, obj, namespace) if err != nil { errs.AddVeleroError(fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } if groupResource == kuberesource.PersistentVolumes { @@ -1218,7 +1219,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso shouldRenamePV, err := shouldRenamePV(ctx, obj, resourceClient) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // Check to see if the claimRef.namespace field needs to be remapped, @@ -1226,7 +1227,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso _, err = remapClaimRefNS(ctx, obj) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } var shouldRestoreSnapshot bool @@ -1236,7 +1237,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso shouldRestoreSnapshot, err = ctx.shouldRestore(name, resourceClient) if err != nil { errs.Add(namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } } else { // If we're renaming the PV, we're going to give it a new random name, @@ -1256,7 +1257,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso updatedObj, err := ctx.pvRestorer.executePVAction(obj) if err != nil { errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } obj = updatedObj @@ -1273,7 +1274,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso pvName, err = ctx.pvRenamer(oldName) if err != nil { errs.Add(namespace, errors.Wrapf(err, "error renaming PV")) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } } else { // VolumeSnapshotter could have modified the PV name through @@ -1300,7 +1301,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // Return early because we don't want to restore the PV itself, we // want to dynamically re-provision it. - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists case hasDeleteReclaimPolicy(obj.Object): ctx.log.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") @@ -1308,7 +1309,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // Return early because we don't want to restore the PV itself, we // want to dynamically re-provision it. - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists default: ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.") @@ -1317,7 +1318,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso _, err = remapClaimRefNS(ctx, obj) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } obj = resetVolumeBindingInfo(obj) // We call the pvRestorer here to clear out the PV's claimRef.UID, @@ -1325,7 +1326,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso updatedObj, err := ctx.pvRestorer.executePVAction(obj) if err != nil { errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } obj = updatedObj } @@ -1335,7 +1336,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // Clear out non-core metadata fields and status. if obj, err = resetMetadataAndStatus(obj); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } ctx.log.Infof("restore status includes excludes: %+v", ctx.resourceStatusIncludesExcludes) @@ -1360,7 +1361,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso }) if err != nil { errs.Add(namespace, fmt.Errorf("error preparing %s: %v", resourceID, err)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // If async plugin started async operation, add it to the ItemOperations list @@ -1389,12 +1390,12 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } if executeOutput.SkipRestore { ctx.log.Infof("Skipping restore of %s: %v because a registered plugin discarded it", obj.GroupVersionKind().Kind, name) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured) if !ok { errs.Add(namespace, fmt.Errorf("%s: unexpected type %T", resourceID, executeOutput.UpdatedItem)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } obj = unstructuredObj @@ -1427,7 +1428,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } } - w, e, additionalItemExists, _ := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace) + w, e, additionalItemExists := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace) if additionalItemExists { filteredAdditionalItems = append(filteredAdditionalItems, additionalItem) } @@ -1456,7 +1457,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso pvc := new(v1.PersistentVolumeClaim) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } if pvc.Spec.VolumeName != "" { @@ -1475,7 +1476,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso ctx.log.Infof("Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)", namespace, name, pvc.Spec.VolumeName, newName) if err := unstructured.SetNestedField(obj.Object, newName, "spec", "volumeName"); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } } } @@ -1506,7 +1507,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso resourceClient, err = ctx.getResourceClient(newGR, obj, obj.GetNamespace()) if err != nil { errs.AddVeleroError(fmt.Errorf("error getting updated resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name) @@ -1535,7 +1536,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso isAlreadyExistsError, err := isAlreadyExistsError(ctx, obj, restoreErr, resourceClient) if err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } if restoreErr != nil { @@ -1550,7 +1551,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil && isAlreadyExistsError { ctx.log.Warnf("Unable to retrieve in-cluster version of %s: %v, object won't be restored by velero or have restore labels, and existing resource policy is not applied", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } } @@ -1564,7 +1565,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // We know the object from the cluster won't have the backup/restore name @@ -1580,20 +1581,20 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } patchBytes, err := generatePatch(fromCluster, desired) if err != nil { ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) warnings.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } if patchBytes == nil { // In-cluster and desired state are the same, so move on to // the next item. - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } _, err = resourceClient.Patch(name, patchBytes) @@ -1642,7 +1643,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso warnings.Add(namespace, e) } } - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } //update backup/restore labels on the unchanged resources if existingResourcePolicy is set as update @@ -1658,24 +1659,22 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } ctx.log.Infof("Restore of %s, %v skipped: it already exists in the cluster and is the same as the backed up version", obj.GroupVersionKind().Kind, name) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // Error was something other than an AlreadyExists. if restoreErr != nil { ctx.log.Errorf("error restoring %s: %+v", name, restoreErr) errs.Add(namespace, fmt.Errorf("error restoring %s: %v", resourceID, restoreErr)) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } - itemCreated = true - shouldRestoreStatus := ctx.resourceStatusIncludesExcludes != nil && ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String()) if shouldRestoreStatus && statusFieldErr != nil { err := fmt.Errorf("could not get status to be restored %s: %v", kube.NamespaceAndName(obj), statusFieldErr) ctx.log.Errorf(err.Error()) errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } ctx.log.Debugf("status field for %s: exists: %v, should restore: %v", groupResource, statusFieldExists, shouldRestoreStatus) // if it should restore status, run a UpdateStatus @@ -1683,7 +1682,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil { ctx.log.Errorf("could not set status field %s: %v", kube.NamespaceAndName(obj), err) errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } obj.SetResourceVersion(createdObj.GetResourceVersion()) updated, err := resourceClient.UpdateStatus(obj, metav1.UpdateOptions{}) @@ -1702,14 +1701,14 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if err != nil { ctx.log.Errorf("error generating patch for managed fields %s: %v", kube.NamespaceAndName(obj), err) errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } if patchBytes != nil { if _, err = resourceClient.Patch(name, patchBytes); err != nil { ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err) if !apierrors.IsNotFound(err) { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } } else { ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj)) @@ -1720,7 +1719,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso pod := new(v1.Pod) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil { errs.Add(namespace, err) - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } // Do not create podvolumerestore when current restore excludes pv/pvc @@ -1746,7 +1745,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } } - return warnings, errs, itemExists, itemCreated + return warnings, errs, itemExists } func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, err error, client client.Dynamic) (bool, error) { From 9ccb5a14bbe514166bfa498968aed8808b93b8a6 Mon Sep 17 00:00:00 2001 From: yanggang Date: Mon, 27 Nov 2023 11:13:52 +0000 Subject: [PATCH 07/10] Fix test code wrong code for VeleroInstall Signed-off-by: yanggang --- test/util/velero/install.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/util/velero/install.go b/test/util/velero/install.go index 9427e19eed..2c45b4c2dd 100644 --- a/test/util/velero/install.go +++ b/test/util/velero/install.go @@ -269,7 +269,7 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options } if options.NodeAgentPodCPURequest != "" { - args = append(args, fmt.Sprintf("--node-agent-pod-mem-request=%v", options.NodeAgentPodCPURequest)) + args = append(args, fmt.Sprintf("--node-agent-pod-cpu-request=%v", options.NodeAgentPodCPURequest)) } if options.NodeAgentPodMemLimit != "" { From 402a61481d387c0ba4ca85bfc1bbd7688d10df0a Mon Sep 17 00:00:00 2001 From: Yang Gang Date: Mon, 27 Nov 2023 18:03:01 +0000 Subject: [PATCH 08/10] [docs] Fix all typos in plugins typo. (#7129) Signed-off-by: yanggang --- site/content/docs/main/custom-plugins.md | 2 +- site/content/docs/v1.0.0/plugins.md | 2 +- site/content/docs/v1.1.0/plugins.md | 2 +- site/content/docs/v1.10/custom-plugins.md | 2 +- site/content/docs/v1.11/custom-plugins.md | 2 +- site/content/docs/v1.2.0/custom-plugins.md | 2 +- site/content/docs/v1.3.0/custom-plugins.md | 2 +- site/content/docs/v1.3.1/custom-plugins.md | 2 +- site/content/docs/v1.3.2/custom-plugins.md | 2 +- site/content/docs/v1.4/custom-plugins.md | 2 +- site/content/docs/v1.5/custom-plugins.md | 2 +- site/content/docs/v1.6/custom-plugins.md | 2 +- site/content/docs/v1.7/custom-plugins.md | 2 +- site/content/docs/v1.8/custom-plugins.md | 2 +- site/content/docs/v1.9/custom-plugins.md | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) diff --git a/site/content/docs/main/custom-plugins.md b/site/content/docs/main/custom-plugins.md index a4c13d0f48..703de3c494 100644 --- a/site/content/docs/main/custom-plugins.md +++ b/site/content/docs/main/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.0.0/plugins.md b/site/content/docs/v1.0.0/plugins.md index e9024fcce9..0005bff8e7 100644 --- a/site/content/docs/v1.0.0/plugins.md +++ b/site/content/docs/v1.0.0/plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.1.0/plugins.md b/site/content/docs/v1.1.0/plugins.md index 617b711e5a..0b9d409972 100644 --- a/site/content/docs/v1.1.0/plugins.md +++ b/site/content/docs/v1.1.0/plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.10/custom-plugins.md b/site/content/docs/v1.10/custom-plugins.md index 5fe168d75f..c26698dc01 100644 --- a/site/content/docs/v1.10/custom-plugins.md +++ b/site/content/docs/v1.10/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.11/custom-plugins.md b/site/content/docs/v1.11/custom-plugins.md index 6024fecc15..fce6282e80 100644 --- a/site/content/docs/v1.11/custom-plugins.md +++ b/site/content/docs/v1.11/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.2.0/custom-plugins.md b/site/content/docs/v1.2.0/custom-plugins.md index b7d42019ff..96c1bb04f7 100644 --- a/site/content/docs/v1.2.0/custom-plugins.md +++ b/site/content/docs/v1.2.0/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.3.0/custom-plugins.md b/site/content/docs/v1.3.0/custom-plugins.md index 0451bbd633..10752a8b60 100644 --- a/site/content/docs/v1.3.0/custom-plugins.md +++ b/site/content/docs/v1.3.0/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.3.1/custom-plugins.md b/site/content/docs/v1.3.1/custom-plugins.md index b9c21f31f5..894a187fab 100644 --- a/site/content/docs/v1.3.1/custom-plugins.md +++ b/site/content/docs/v1.3.1/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.3.2/custom-plugins.md b/site/content/docs/v1.3.2/custom-plugins.md index baa5b26d0b..9a5b9fe618 100644 --- a/site/content/docs/v1.3.2/custom-plugins.md +++ b/site/content/docs/v1.3.2/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.4/custom-plugins.md b/site/content/docs/v1.4/custom-plugins.md index 075d9a77af..6bbf5863d7 100644 --- a/site/content/docs/v1.4/custom-plugins.md +++ b/site/content/docs/v1.4/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.5/custom-plugins.md b/site/content/docs/v1.5/custom-plugins.md index 36aa6eb07d..989aeec418 100644 --- a/site/content/docs/v1.5/custom-plugins.md +++ b/site/content/docs/v1.5/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.6/custom-plugins.md b/site/content/docs/v1.6/custom-plugins.md index 7968ff4ad1..167584eb0c 100644 --- a/site/content/docs/v1.6/custom-plugins.md +++ b/site/content/docs/v1.6/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.7/custom-plugins.md b/site/content/docs/v1.7/custom-plugins.md index dbb82d5a30..38bbb246e7 100644 --- a/site/content/docs/v1.7/custom-plugins.md +++ b/site/content/docs/v1.7/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.8/custom-plugins.md b/site/content/docs/v1.8/custom-plugins.md index c4dbc03e14..e84ee777f8 100644 --- a/site/content/docs/v1.8/custom-plugins.md +++ b/site/content/docs/v1.8/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.9/custom-plugins.md b/site/content/docs/v1.9/custom-plugins.md index 8f92ec17ba..403f60d4ad 100644 --- a/site/content/docs/v1.9/custom-plugins.md +++ b/site/content/docs/v1.9/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: From 98a56eb5c7cf62181c4ba510d69c82a40ef4349f Mon Sep 17 00:00:00 2001 From: Ming Qiu Date: Wed, 15 Nov 2023 08:52:57 +0000 Subject: [PATCH 09/10] Node agent restart enhancement Signed-off-by: Ming Qiu --- changelogs/unreleased/7130-qiuming-best | 1 + pkg/cmd/cli/nodeagent/server.go | 55 ++------ pkg/controller/data_download_controller.go | 82 +++++++++++- .../data_download_controller_test.go | 114 +++++++++++++++- pkg/controller/data_upload_controller.go | 79 ++++++++++- pkg/controller/data_upload_controller_test.go | 126 +++++++++++++++++- 6 files changed, 401 insertions(+), 56 deletions(-) create mode 100644 changelogs/unreleased/7130-qiuming-best diff --git a/changelogs/unreleased/7130-qiuming-best b/changelogs/unreleased/7130-qiuming-best new file mode 100644 index 0000000000..f6f6c6f74f --- /dev/null +++ b/changelogs/unreleased/7130-qiuming-best @@ -0,0 +1 @@ +Node agent restart enhancement diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 835b899c36..53c45fb810 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -285,13 +285,13 @@ func (s *nodeAgentServer) run() { } dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) - s.markDataUploadsCancel(dataUploadReconciler) + s.attemptDataUploadResume(dataUploadReconciler) if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data upload controller") } dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.kubeClient, s.dataPathMgr, repoEnsurer, credentialGetter, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) - s.markDataDownloadsCancel(dataDownloadReconciler) + s.attemptDataDownloadResume(dataDownloadReconciler) if err = dataDownloadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data download controller") } @@ -365,65 +365,28 @@ func (s *nodeAgentServer) markInProgressCRsFailed() { s.markInProgressPVRsFailed(client) } -func (s *nodeAgentServer) markDataUploadsCancel(r *controller.DataUploadReconciler) { +func (s *nodeAgentServer) attemptDataUploadResume(r *controller.DataUploadReconciler) { // the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()}) if err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to create client") return } - if dataUploads, err := r.FindDataUploads(s.ctx, client, s.namespace); err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to find data uploads") - } else { - for i := range dataUploads { - du := dataUploads[i] - if du.Status.Phase == velerov2alpha1api.DataUploadPhaseAccepted || - du.Status.Phase == velerov2alpha1api.DataUploadPhasePrepared || - du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { - err = controller.UpdateDataUploadWithRetry(s.ctx, client, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, s.logger.WithField("dataupload", du.Name), - func(dataUpload *velerov2alpha1api.DataUpload) { - dataUpload.Spec.Cancel = true - dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) - }) - - if err != nil { - s.logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q cancel", du.GetName()) - continue - } - s.logger.WithField("dataupload", du.GetName()).Warn(du.Status.Message) - } - } + if err := r.AttemptDataUploadResume(s.ctx, client, s.logger.WithField("node", s.nodeName), s.namespace); err != nil { + s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data upload resume") } } -func (s *nodeAgentServer) markDataDownloadsCancel(r *controller.DataDownloadReconciler) { +func (s *nodeAgentServer) attemptDataDownloadResume(r *controller.DataDownloadReconciler) { // the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()}) if err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to create client") return } - if dataDownloads, err := r.FindDataDownloads(s.ctx, client, s.namespace); err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to find data downloads") - } else { - for i := range dataDownloads { - dd := dataDownloads[i] - if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseAccepted || - dd.Status.Phase == velerov2alpha1api.DataDownloadPhasePrepared || - dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { - err = controller.UpdateDataDownloadWithRetry(s.ctx, client, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, s.logger.WithField("datadownload", dd.Name), - func(dataDownload *velerov2alpha1api.DataDownload) { - dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) - }) - - if err != nil { - s.logger.WithError(errors.WithStack(err)).Errorf("failed to mark datadownload %q cancel", dd.GetName()) - continue - } - s.logger.WithField("datadownload", dd.GetName()).Warn(dd.Status.Message) - } - } + + if err := r.AttemptDataDownloadResume(s.ctx, client, s.logger.WithField("node", s.nodeName), s.namespace); err != nil { + s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data download resume") } } diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index bf4299ea4f..065c48a5f4 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -140,7 +140,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request // to help clear up resources instead of clear them directly in case of some conflict with Expose action if err := UpdateDataDownloadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataDownload *velerov2alpha1api.DataDownload) { dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a dataupload %s/%s is being deleted, mark it as cancel", dd.Namespace, dd.Name) + dataDownload.Status.Message = fmt.Sprintf("found a datadownload %s/%s is being deleted, mark it as cancel", dd.Namespace, dd.Name) }); err != nil { log.Errorf("failed to set cancel flag with error %s for %s/%s", err.Error(), dd.Namespace, dd.Name) return ctrl.Result{}, err @@ -192,7 +192,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request return r.errorOut(ctx, dd, err, "error to expose snapshot", log) } } - log.Info("Restore is exposed") // we need to get CR again for it may canceled by datadownload controller on other @@ -205,7 +204,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request } return ctrl.Result{}, errors.Wrap(err, "getting datadownload") } - // we need to clean up resources as resources created in Expose it may later than cancel action or prepare time // and need to clean up resources again if isDataDownloadInFinalState(dd) { @@ -267,7 +265,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request return r.errorOut(ctx, dd, err, "error to create data path", log) } } - // Update status to InProgress original := dd.DeepCopy() dd.Status.Phase = velerov2alpha1api.DataDownloadPhaseInProgress @@ -576,6 +573,51 @@ func (r *DataDownloadReconciler) FindDataDownloads(ctx context.Context, cli clie return dataDownloads, nil } +func (r *DataDownloadReconciler) findAcceptDataDownloadsByNodeLabel(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataDownload, error) { + dataDownloads := &velerov2alpha1api.DataDownloadList{} + if err := cli.List(ctx, dataDownloads, &client.ListOptions{Namespace: ns}); err != nil { + r.logger.WithError(errors.WithStack(err)).Error("failed to list datauploads") + return nil, errors.Wrapf(err, "failed to list datauploads") + } + + var result []velerov2alpha1api.DataDownload + for _, dd := range dataDownloads.Items { + if dd.Status.Phase != velerov2alpha1api.DataDownloadPhaseAccepted { + continue + } + if dd.Labels[acceptNodeLabelKey] == r.nodeName { + result = append(result, dd) + } + } + return result, nil +} + +// CancelAcceptedDataDownload will cancel the accepted data download +func (r *DataDownloadReconciler) CancelAcceptedDataDownload(ctx context.Context, cli client.Client, ns string) { + r.logger.Infof("Canceling accepted data for node %s", r.nodeName) + dataDownloads, err := r.findAcceptDataDownloadsByNodeLabel(ctx, cli, ns) + if err != nil { + r.logger.WithError(err).Error("failed to find data downloads") + return + } + + for _, dd := range dataDownloads { + if dd.Spec.Cancel { + continue + } + err = UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, + r.logger.WithField("dataupload", dd.Name), func(dataDownload *velerov2alpha1api.DataDownload) { + dataDownload.Spec.Cancel = true + dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) + }) + + r.logger.Warn(dd.Status.Message) + if err != nil { + r.logger.WithError(err).Errorf("failed to set cancel flag with error %s", err.Error()) + } + } +} + func (r *DataDownloadReconciler) prepareDataDownload(ssb *velerov2alpha1api.DataDownload) { ssb.Status.Phase = velerov2alpha1api.DataDownloadPhasePrepared ssb.Status.Node = r.nodeName @@ -749,3 +791,35 @@ func UpdateDataDownloadWithRetry(ctx context.Context, client client.Client, name return true, nil }) } + +func (r *DataDownloadReconciler) AttemptDataDownloadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { + if dataDownloads, err := r.FindDataDownloads(ctx, cli, ns); err != nil { + return errors.Wrapf(err, "failed to find data downloads") + } else { + for i := range dataDownloads { + dd := dataDownloads[i] + if dd.Status.Phase == velerov2alpha1api.DataDownloadPhasePrepared { + // keep doing nothing let controller re-download the data + // the Prepared CR could be still handled by datadownload controller after node-agent restart + logger.WithField("datadownload", dd.GetName()).Debug("find a datadownload with status prepared") + } else if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { + err = UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, logger.WithField("datadownload", dd.Name), + func(dataDownload *velerov2alpha1api.DataDownload) { + dataDownload.Spec.Cancel = true + dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) + }) + + if err != nil { + logger.WithError(errors.WithStack(err)).Errorf("failed to mark datadownload %q into canceled", dd.GetName()) + continue + } + logger.WithField("datadownload", dd.GetName()).Debug("mark datadownload into canceled") + } + } + } + + //If the data download is in Accepted status, the expoded PVC may be not created + // so we need to mark the data download as canceled for it may not be recoverable + r.CancelAcceptedDataDownload(ctx, cli, ns) + return nil +} diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index de9fa7516a..afdadf61d2 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -69,7 +69,7 @@ func dataDownloadBuilder() *builder.DataDownloadBuilder { } func initDataDownloadReconciler(objects []runtime.Object, needError ...bool) (*DataDownloadReconciler, error) { - var errs []error = make([]error, 5) + var errs []error = make([]error, 6) for k, isError := range needError { if k == 0 && isError { errs[0] = fmt.Errorf("Get error") @@ -81,6 +81,8 @@ func initDataDownloadReconciler(objects []runtime.Object, needError ...bool) (*D errs[3] = fmt.Errorf("Patch error") } else if k == 4 && isError { errs[4] = apierrors.NewConflict(velerov2alpha1api.Resource("datadownload"), dataDownloadName, errors.New("conflict")) + } else if k == 5 && isError { + errs[5] = fmt.Errorf("List error") } } return initDataDownloadReconcilerWithError(objects, errs...) @@ -116,6 +118,8 @@ func initDataDownloadReconcilerWithError(objects []runtime.Object, needError ... fakeClient.patchError = needError[3] } else if k == 4 { fakeClient.updateConflict = needError[4] + } else if k == 5 { + fakeClient.listError = needError[5] } } @@ -939,3 +943,111 @@ func TestFindDataDownloads(t *testing.T) { }) } } + +func TestAttemptDataDownloadResume(t *testing.T) { + tests := []struct { + name string + dataUploads []velerov2alpha1api.DataDownload + du *velerov2alpha1api.DataDownload + pod *corev1.Pod + needErrs []bool + acceptedDataDownloads []string + prepareddDataDownloads []string + cancelledDataDownloads []string + expectedError bool + }{ + // Test case 1: Process Accepted DataDownload + { + name: "AcceptedDataDownload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), + acceptedDataDownloads: []string{dataDownloadName}, + expectedError: false, + }, + // Test case 2: Cancel an Accepted DataDownload + { + name: "CancelAcceptedDataDownload", + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), + }, + // Test case 3: Process Accepted Prepared DataDownload + { + name: "PreparedDataDownload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + prepareddDataDownloads: []string{dataDownloadName}, + }, + // Test case 4: Process Accepted InProgress DataDownload + { + name: "InProgressDataDownload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + prepareddDataDownloads: []string{dataDownloadName}, + }, + // Test case 5: get resume error + { + name: "ResumeError", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + needErrs: []bool{false, false, false, false, false, true}, + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + expectedError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.TODO() + r, err := initDataDownloadReconciler(nil, test.needErrs...) + r.nodeName = "node-1" + require.NoError(t, err) + defer func() { + r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) + if test.pod != nil { + r.client.Delete(ctx, test.pod, &kbclient.DeleteOptions{}) + } + }() + + assert.NoError(t, r.client.Create(ctx, test.du)) + if test.pod != nil { + assert.NoError(t, r.client.Create(ctx, test.pod)) + } + // Run the test + err = r.AttemptDataDownloadResume(ctx, r.client, r.logger.WithField("name", test.name), test.du.Namespace) + + if test.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Verify DataDownload marked as Cancelled + for _, duName := range test.cancelledDataDownloads { + dataUpload := &velerov2alpha1api.DataDownload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataDownloadPhaseCanceled, dataUpload.Status.Phase) + } + // Verify DataDownload marked as Accepted + for _, duName := range test.acceptedDataDownloads { + dataUpload := &velerov2alpha1api.DataDownload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataDownloadPhaseAccepted, dataUpload.Status.Phase) + } + // Verify DataDownload marked as Prepared + for _, duName := range test.prepareddDataDownloads { + dataUpload := &velerov2alpha1api.DataDownload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataDownloadPhasePrepared, dataUpload.Status.Phase) + } + } + }) + } +} diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index 9465528e33..524d8a0570 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -274,7 +274,6 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request) return r.errorOut(ctx, du, err, "error to create data path", log) } } - // Update status to InProgress original := du.DeepCopy() du.Status.Phase = velerov2alpha1api.DataUploadPhaseInProgress @@ -581,7 +580,7 @@ func (r *DataUploadReconciler) findDataUploadForPod(podObj client.Object) []reco return []reconcile.Request{requests} } -func (r *DataUploadReconciler) FindDataUploads(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { +func (r *DataUploadReconciler) FindDataUploadsByPod(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { pods := &corev1.PodList{} var dataUploads []velerov2alpha1api.DataUpload if err := cli.List(ctx, pods, &client.ListOptions{Namespace: ns}); err != nil { @@ -605,6 +604,51 @@ func (r *DataUploadReconciler) FindDataUploads(ctx context.Context, cli client.C return dataUploads, nil } +func (r *DataUploadReconciler) findAcceptDataUploadsByNodeLabel(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { + dataUploads := &velerov2alpha1api.DataUploadList{} + if err := cli.List(ctx, dataUploads, &client.ListOptions{Namespace: ns}); err != nil { + r.logger.WithError(errors.WithStack(err)).Error("failed to list datauploads") + return nil, errors.Wrapf(err, "failed to list datauploads") + } + + var result []velerov2alpha1api.DataUpload + for _, du := range dataUploads.Items { + if du.Status.Phase != velerov2alpha1api.DataUploadPhaseAccepted { + continue + } + if du.Labels[acceptNodeLabelKey] == r.nodeName { + result = append(result, du) + } + } + return result, nil +} + +func (r *DataUploadReconciler) CancelAcceptedDataupload(ctx context.Context, cli client.Client, ns string) { + r.logger.Infof("Reset accepted dataupload for node %s", r.nodeName) + dataUploads, err := r.findAcceptDataUploadsByNodeLabel(ctx, cli, ns) + if err != nil { + r.logger.WithError(err).Error("failed to find dataupload") + return + } + + for _, du := range dataUploads { + if du.Spec.Cancel { + continue + } + err = UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, r.logger.WithField("dataupload", du.Name), + func(dataUpload *velerov2alpha1api.DataUpload) { + dataUpload.Spec.Cancel = true + dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) + }) + + r.logger.WithField("dataupload", du.GetName()).Warn(du.Status.Message) + if err != nil { + r.logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q cancel", du.GetName()) + continue + } + } +} + func (r *DataUploadReconciler) prepareDataUpload(du *velerov2alpha1api.DataUpload) { du.Status.Phase = velerov2alpha1api.DataUploadPhasePrepared du.Status.Node = r.nodeName @@ -833,3 +877,34 @@ func UpdateDataUploadWithRetry(ctx context.Context, client client.Client, namesp return true, nil }) } + +func (r *DataUploadReconciler) AttemptDataUploadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { + if dataUploads, err := r.FindDataUploadsByPod(ctx, cli, ns); err != nil { + return errors.Wrap(err, "failed to find data uploads") + } else { + for _, du := range dataUploads { + if du.Status.Phase == velerov2alpha1api.DataUploadPhasePrepared { + // keep doing nothing let controller re-download the data + // the Prepared CR could be still handled by dataupload controller after node-agent restart + logger.WithField("dataupload", du.GetName()).Debug("find a dataupload with status prepared") + } else if du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { + err = UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, logger.WithField("dataupload", du.Name), + func(dataUpload *velerov2alpha1api.DataUpload) { + dataUpload.Spec.Cancel = true + dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) + }) + + if err != nil { + logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q into canceled", du.GetName()) + continue + } + logger.WithField("dataupload", du.GetName()).Debug("mark dataupload into canceled") + } + } + } + + //If the data upload is in Accepted status, the volume snapshot may be deleted and the exposed pod may not be created + // so we need to mark the data upload as canceled for it may not be recoverable + r.CancelAcceptedDataupload(ctx, cli, ns) + return nil +} diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index b61cd07b32..05ee974308 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -68,6 +68,7 @@ type FakeClient struct { updateError error patchError error updateConflict error + listError error } func (c *FakeClient) Get(ctx context.Context, key kbclient.ObjectKey, obj kbclient.Object) error { @@ -106,8 +107,16 @@ func (c *FakeClient) Patch(ctx context.Context, obj kbclient.Object, patch kbcli return c.Client.Patch(ctx, obj, patch, opts...) } +func (c *FakeClient) List(ctx context.Context, list kbclient.ObjectList, opts ...kbclient.ListOption) error { + if c.listError != nil { + return c.listError + } + + return c.Client.List(ctx, list, opts...) +} + func initDataUploaderReconciler(needError ...bool) (*DataUploadReconciler, error) { - var errs []error = make([]error, 5) + var errs []error = make([]error, 6) for k, isError := range needError { if k == 0 && isError { errs[0] = fmt.Errorf("Get error") @@ -118,7 +127,9 @@ func initDataUploaderReconciler(needError ...bool) (*DataUploadReconciler, error } else if k == 3 && isError { errs[3] = fmt.Errorf("Patch error") } else if k == 4 && isError { - errs[4] = apierrors.NewConflict(velerov2alpha1api.Resource("datadownload"), dataDownloadName, errors.New("conflict")) + errs[4] = apierrors.NewConflict(velerov2alpha1api.Resource("dataupload"), dataUploadName, errors.New("conflict")) + } else if k == 5 && isError { + errs[5] = fmt.Errorf("List error") } } @@ -198,6 +209,8 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci fakeClient.patchError = needError[3] } else if k == 4 { fakeClient.updateConflict = needError[4] + } else if k == 5 { + fakeClient.listError = needError[5] } } @@ -983,7 +996,7 @@ func TestFindDataUploads(t *testing.T) { require.NoError(t, err) err = r.client.Create(ctx, &test.pod) require.NoError(t, err) - uploads, err := r.FindDataUploads(context.Background(), r.client, "velero") + uploads, err := r.FindDataUploadsByPod(context.Background(), r.client, "velero") if test.expectedError { assert.Error(t, err) @@ -994,3 +1007,110 @@ func TestFindDataUploads(t *testing.T) { }) } } +func TestAttemptDataUploadResume(t *testing.T) { + tests := []struct { + name string + dataUploads []velerov2alpha1api.DataUpload + du *velerov2alpha1api.DataUpload + pod *corev1.Pod + needErrs []bool + acceptedDataUploads []string + prepareddDataUploads []string + cancelledDataUploads []string + expectedError bool + }{ + // Test case 1: Process Accepted DataUpload + { + name: "AcceptedDataUpload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), + acceptedDataUploads: []string{dataUploadName}, + expectedError: false, + }, + // Test case 2: Cancel an Accepted DataUpload + { + name: "CancelAcceptedDataUpload", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), + }, + // Test case 3: Process Accepted Prepared DataUpload + { + name: "PreparedDataUpload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), + prepareddDataUploads: []string{dataUploadName}, + }, + // Test case 4: Process Accepted InProgress DataUpload + { + name: "InProgressDataUpload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), + prepareddDataUploads: []string{dataUploadName}, + }, + // Test case 5: get resume error + { + name: "ResumeError", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + needErrs: []bool{false, false, false, false, false, true}, + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), + expectedError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.TODO() + r, err := initDataUploaderReconciler(test.needErrs...) + r.nodeName = "node-1" + require.NoError(t, err) + defer func() { + r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) + if test.pod != nil { + r.client.Delete(ctx, test.pod, &kbclient.DeleteOptions{}) + } + }() + + assert.NoError(t, r.client.Create(ctx, test.du)) + if test.pod != nil { + assert.NoError(t, r.client.Create(ctx, test.pod)) + } + // Run the test + err = r.AttemptDataUploadResume(ctx, r.client, r.logger.WithField("name", test.name), test.du.Namespace) + + if test.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Verify DataUploads marked as Cancelled + for _, duName := range test.cancelledDataUploads { + dataUpload := &velerov2alpha1api.DataUpload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataUploadPhaseCanceled, dataUpload.Status.Phase) + } + // Verify DataUploads marked as Accepted + for _, duName := range test.acceptedDataUploads { + dataUpload := &velerov2alpha1api.DataUpload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataUploadPhaseAccepted, dataUpload.Status.Phase) + } + // Verify DataUploads marked as Prepared + for _, duName := range test.prepareddDataUploads { + dataUpload := &velerov2alpha1api.DataUpload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataUploadPhasePrepared, dataUpload.Status.Phase) + } + } + }) + } +} From 5d1a632be4683b76254b33929a16326a64617c31 Mon Sep 17 00:00:00 2001 From: allenxu404 Date: Fri, 24 Nov 2023 12:03:25 +0800 Subject: [PATCH 10/10] Add hook status to backup/restore CR Signed-off-by: allenxu404 --- changelogs/unreleased/7117-allenxu404 | 1 + config/crd/v1/bases/velero.io_backups.yaml | 16 + config/crd/v1/bases/velero.io_restores.yaml | 16 + config/crd/v1/crds/crds.go | 4 +- internal/hook/hook_tracker.go | 137 +++++ internal/hook/hook_tracker_test.go | 88 +++ internal/hook/item_hook_handler.go | 55 +- internal/hook/item_hook_handler_test.go | 519 +++++++++++++++++- internal/hook/wait_exec_hook_handler.go | 8 + internal/hook/wait_exec_hook_handler_test.go | 275 +++++++++- pkg/apis/velero/v1/backup_types.go | 18 + pkg/apis/velero/v1/restore_types.go | 5 + pkg/apis/velero/v1/zz_generated.deepcopy.go | 25 + pkg/backup/backup.go | 11 +- pkg/backup/item_backupper.go | 7 +- pkg/cmd/util/output/backup_describer.go | 6 + .../output/backup_structured_describer.go | 5 + pkg/cmd/util/output/restore_describer.go | 6 + pkg/restore/restore.go | 23 +- 19 files changed, 1173 insertions(+), 52 deletions(-) create mode 100644 changelogs/unreleased/7117-allenxu404 create mode 100644 internal/hook/hook_tracker.go create mode 100644 internal/hook/hook_tracker_test.go diff --git a/changelogs/unreleased/7117-allenxu404 b/changelogs/unreleased/7117-allenxu404 new file mode 100644 index 0000000000..2cfc179b2f --- /dev/null +++ b/changelogs/unreleased/7117-allenxu404 @@ -0,0 +1 @@ +Add hooks status to backup/restore CR \ No newline at end of file diff --git a/config/crd/v1/bases/velero.io_backups.yaml b/config/crd/v1/bases/velero.io_backups.yaml index 9099eb13fe..3c46f833f8 100644 --- a/config/crd/v1/bases/velero.io_backups.yaml +++ b/config/crd/v1/bases/velero.io_backups.yaml @@ -544,6 +544,22 @@ spec: description: FormatVersion is the backup format version, including major, minor, and patch version. type: string + hookStatus: + description: HookStatus contains information about the status of the + hooks. + nullable: true + properties: + hooksAttempted: + description: HooksAttempted is the total number of attempted hooks + Specifically, HooksAttempted represents the number of hooks + that failed to execute and the number of hooks that executed + successfully. + type: integer + hooksFailed: + description: HooksFailed is the total number of hooks which ended + with an error + type: integer + type: object phase: description: Phase is the current state of the Backup. enum: diff --git a/config/crd/v1/bases/velero.io_restores.yaml b/config/crd/v1/bases/velero.io_restores.yaml index 81b71ed357..19a1193966 100644 --- a/config/crd/v1/bases/velero.io_restores.yaml +++ b/config/crd/v1/bases/velero.io_restores.yaml @@ -440,6 +440,22 @@ spec: description: FailureReason is an error that caused the entire restore to fail. type: string + hookStatus: + description: HookStatus contains information about the status of the + hooks. + nullable: true + properties: + hooksAttempted: + description: HooksAttempted is the total number of attempted hooks + Specifically, HooksAttempted represents the number of hooks + that failed to execute and the number of hooks that executed + successfully. + type: integer + hooksFailed: + description: HooksFailed is the total number of hooks which ended + with an error + type: integer + type: object phase: description: Phase is the current state of the Restore enum: diff --git a/config/crd/v1/crds/crds.go b/config/crd/v1/crds/crds.go index fff600dc19..65391cd542 100644 --- a/config/crd/v1/crds/crds.go +++ b/config/crd/v1/crds/crds.go @@ -30,13 +30,13 @@ import ( var rawCRDs = [][]byte{ []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VAo\xe46\x0f\xbdϯ \xf6;\xec\xe5\xb3g\xb7\xbd\x14\xbem\xd3\x16\b\x9a\x04A\x12\xe4Nۜ\x19mdI\x95\xa8I\xa7E\xff{A\xc9\xcexl'\xb3Y\xa0\xbaY\xa2\x1e\xc9G>ZEQ\xacЩG\xf2AYS\x01:E\x7f2\x19\xf9\n\xe5\xd3O\xa1Tv\xbd\xff\xbczR\xa6\xad\xe0\"\x06\xb6\xdd\x1d\x05\x1b}C\xbf\xd0F\x19\xc5ʚUG\x8c-2V+\x004\xc62\xcav\x90O\x80\xc6\x1a\xf6Vk\xf2ŖL\xf9\x14k\xaa\xa3\xd2-\xf9\x04>\xb8\xde\x7f*?\xffP~Z\x01\x18쨂\x1a\x9b\xa7\xe8<9\x1b\x14[\xaf(\x94{\xd2\xe4m\xa9\xec*8j\x04}\xebmt\x15\x1c\x0f\xf2\xed\xdes\x8e\xfa\xe7\x04t7\x00\x1dґV\x81\x7f_<\xbeR\x81\x93\x89\xd3ѣ^\n$\x1d\ae\xb6Q\xa3\x9f\x19\x88\x83\xd0XG\x15\xdcH,\x0e\x1bjW\x00}\xa6)\xb6\x02\xb0m\x13w\xa8o\xbd2L\xfe\xc2\xea\xd8\r\x9c\x15\xf05Xs\x8b\xbc\xab\xa0\x1c\xd8-\x1bO\x89\xd8\a\xd5Q`\xec\\\xb2\x1d\b\xfb\xb2\xa5\xfe\x9b\x0f\xe2\xbcE\xa69\x980W\x1ec}88:A9\x12\x01\xa3\xb3\x8c\x18\xd8+\xb3]\x1d\x8d\xf7\x9f3\x15͎:\xacz[\xeb\xc8|\xb9\xbd|\xfc\xf1\xfed\x1b\xc0y\xebȳ\x1aʓר\xfdF\xbb\x00-\x85\xc6+ǩ9>\n`\xb6\x82V\xfa\x8e\x02\xf0\x8e\x06N\xa9\xedc\x00\xbb\x01ީ\x00\x9e\x9c\xa7@&w\xe2\t0\x88\x11\x1a\xb0\xf5Wj\xb8\x84{\xf2\x02\x03ag\xa3n\xa5]\xf7\xe4\x19<5vk\xd4_/\xd8\x01\xd8&\xa7\x1a\x99\xfa\x1e9\xaeTC\x83\x1a\xf6\xa8#\xfd\x1fд\xd0\xe1\x01<\x89\x17\x88f\x84\x97LB\t\xd7\xd6\x13(\xb3\xb1\x15\xec\x98]\xa8\xd6\xeb\xad\xe2Av\x8d\xed\xbah\x14\x1f\xd6IA\xaa\x8el}X\xb7\xb4'\xbd\x0ej[\xa0ov\x8a\xa9\xe1\xe8i\x8dN\x15)t\x93\xa4Wv\xed\xff|/\xd4\xf0\xf1$\xd6Y-\xf3Jby\xa3\x02\xa2\x16P\x01\xb0\xbf\x9a\xb38\x12-[\xc2\xceݯ\xf7\x0f0\xb8NŘ\xb2\x9fx?^\f\xc7\x12\ba\xcal\xc8\xe7\"n\xbc\xed\x12&\x99\xd6Ye8}4Z\x91\x99\xd2\x1fb\xdd)\x96\xba\xff\x11)\xb0Ԫ\x84\x8b4\x8b\xa0&\x88N\xd4Жpi\xe0\x02;\xd2\x17\x18\xe8?/\x800\x1d\n!\xf6\xdbJ0\x1e\xa3S\xe3\xcc\xda\xe8`\x18\x81\xaf\xd4k:\xd6\xee\x1d5R>aP\xae\xaa\x8dj\x926`c=\xe0̾<\x81^\x96\xae\xac<\xfc\xee\xd9z\xdcҕ͘S\xa3\xc5\xd8&w\x86\xe0d\xb2d\x19Ӳ\xe1\f\x1b\x80w\xc8#\xfd2*\xf32\x06\x16\xf3y\xa3\b\xa9\x10(r6h\x1a\xfa-u\x94i\x0egr\xba^\xb8\")\xed\xec3\xd8\r\x93\x19\x83\xf6\xb1.dR\x13\xf8h\xde\x15\xec\xe90?\x13\xe6݉1(\xd3J\x1b\xf4\xd3T\x9c\f\xd4K]ɴ\xe0O\xff\x9b\xe3E&vsw\x05ʊ\xe7V\xd8N\xa0,\xc12y\x10\xec\x1f5lM\x8c\xc4A95\xe0\x05\xa0y\x980\xa0\x04\xe5\xe4Dy\x057H\x92\x82\x9e\x89\x02;\n\xa9D\v\x1e6\xd1\x1b\xf2\xabT@\x98\xd8\xcb7\xe4hL\xa9\u07fczu`&,\x9aL\x16E%\x989\xbfB\xf9g\xbb\xcaH\xa5_\xe5p\x02\xfeJ\xb3Ú\xaa\xec\xc8\fd\x96\x91\xafh\xc9ֈ\xba\xc0\x85\xb3)\xf2\x7f\t\x02\xa0_vp5g+\x8c\xda(&\x0e\xad\x1fP\xea'8`\x17\x80\x93/\xd7\xd5͢!\xb4}e\xa9\xf3\xf9\xfd\xfdC[\xf6\x98\xeeS\x1f\xe9\xde\x12Ȇ\x05\x96`L\xecA9&\xee\x95,\x10&\x88\xdcI\x1f\x8a.g \xfa\xe4\xd7ծ`\xc6\xf2\xfd\xef\x15h+\xe4rCnQ\x93\x90\x1d\x90\xaa̭dn\xc8V\x90[Z\x00\xbf\xa5\x1a\x9e\x9d\x01\x96\xd2zm\t\x9bƂ\xb6\x12\xec7vTk\xfd\x10t\xd9\b\xbf\x9cB\xb8/!\xeb,\x18ۋ\xedY\x86˂\xec\xa5j\xf4\x85SW\x9b\x0e\xc8\xf8\x92\xb5O\xa6ٽ\xa0\xa5>J\xf3\xc0\n\x90\x95\xe9\xb7\xe8!t{\xbf\xedu\b\xc8x\xd4P\xadT\x1ar\xbbΞ(3\x16\xbd\x01Lb\x01\x91/\xa8a\x02<\xd44\x95&\xa6R\x02W\xe9g\xa0\xf9\xf9A\xfe\xa6\x81\xe4\x15\nk\xa6\x00\xa7|Cv\xb0\x97\n\"p\x15\xd8\xfe\xb61(e\t\xa3\x11%Y\x99\ry8\x82%#\xad\xb8\xf1r\xcf4y\xfd\x13)\x98\xa8\fl\x06\xd0F\x18\x8cD\xa1\x86\x16\xf2\x04j\x86^﨡\xbf\xdav=2\xd9\xfe\x04\x01ؙ\xee<\xc9vg\xfbcdV\x9e\xabd\xbboAd\x9a\xacVD*\xb2r&pu\x83\xa0\xadQ5k&ZcD >1\xceø\xcbf\xee\b\xe8x\xa7\x1f\xe4\a\xed\x84t\x8e\x10#\xddZty:\x829\x82\"\xa5\f\xc6'\x82\xf7\x9eq \xfa\xac\r\x14\x9e*A\xe5\a\"\xe2r\xe0܃Ж\xa8\x1e\xe7\xe1\t\x927\x98\xc8\xdb\x1e\xb2\xed\xa1\xbdS\x9e:\r\xef\xfa\xd4\xf1\x8d\xdbF\xb8!\x94<\xc2\xd9y,T\x10\xcb\x1cj\a\x1a\x89t\x86\xc4\xc1\xfd\f\x14\xb2G8#\x18\xbfA1\xdb;U\x14\xdc\xf3\b\xe7\x94f=\x02Z\x9c\x98\xf6\x1b/\x96\x92\xf6\x05\x12\x02\xf3\xd9\xe9\xc4#\xb8\xd9\x14t\xd1\xfc\xe4H\xba\"\tO\xa0\xfd\x05Ӭ\xd9\xd6ڨCƾԎEv\x15\x1cY\x998Qk\xe60\x95 \xf7\xf5v\xd3\x17\xcaY^\x0f\xe4\xe4~+ƽ\xe1\xee\xf3Q\x9a\xad\xb8!\xef\xbf2\xedw\xfc\xdeI\xd0\x1f\xa5\xc17\xcfBN\x87\xf8\x05\xc4t\x1dqy\t\xa7\xb6-\x1d\xda\xfbV\t\xc2\ud7ad\x8b\xf0j\xf60M\xb6\xc2\xc6-\x9e\x1e\xb8\v醛\xb6\x0fݧ\xa84nL\t)\xd6.\xf5\x12\x1b\xc9\x11;\x11\xa4T\x1d\x8e\fQ\xab\a\x1d\xc9\xf5ğ\akI\\\x7f\xb7\xaf\xcai\x06y\xd8W\xc1\xdd@j\xe0\xc02R\x80:L\x19\x8e\xf6SZ\xfd\x9e\x86B\xa2\xd6u\xcfB\tK3\xed\xe1\xf1\xaa;\x9a\xfc\xee>k\xbbr\x13Z\x05f\xcf6\x1d\xd9\x04\x1co:?#4\xb1\xe8\x7f\xccR\x97\xe69\x96iP~\xb7@\xe3/\xe0\xc5\xd0\xf6;Ĝ\x85,(nN\xfc\xb75s(\xd0\xffCJ\xcaT\xc2\x1a~\x8b\xe5\x18\x1c:}}\x16\xab=\x8c\x1d\x81ib\xf9{\xa2|\xb8\xbd\x1c\x99\x9c\xb4\xba\x05\xb83\xe4r?\xf0Xn\xc8\xd3QjgSqSd\x16$\xd3d\xf5\bg\xbf\x19\xd7\xd6\x03\xab\xadX9\x03\xbfX\xdd\xd4ނ\x14\xfcLV\xd8w\xf5-NP\xa2$&6\xfb\xba~\xac\xcbO\xd6\x05-\xd7^z\x8d,X6\xda\x0f\xcbeR]l\x1b\x83\x06\x0f\xc2v\xackD\xac{<5\xdb$\xf9-\xa5\x8e\xec|\x8f\xa0r'\xb5q\x19Ɏ;\xbb$\xfbE\x9c\xec\xf9\xac\x17\xa1{W\xa5#U\xa8\xbf\xb0겗\xa8\xb5\xdc\xd6Ӛ\xd9\xed\x19\xf8L\x9a\x03j\x03\xb2U\xb3\xf2\x9d\x1e^\xb9=\v\x1c\x84f\xe8\x94\xcc\xc2-\x95\xcc@Gw\x8b\x9b'A\xcb\xcf$\x17\xeb\xc4\"u\x81\x8f+n\x98Nf\x86'ݑ\xb5DZ\x18\x02\xbc\xff\xda\xcazZ\xa5a\xff\x9e\x13\xbe\xa5x\x11\\\xebEA\xfbU\v\xd9f\xc6J\xc6bO\x87m\xa1v\xac]\xd4VЯ\xac\xa8\nB\vK\xfa\xd4pi\xef*\xce:\x1c\xaf\xeb\xce\x10.\x9a\x11#\xed\xa2*9\x98\xd4\x15\xe9*\xcc\xec2\xd1,\x87\xda0{)\x90\x82P\xb2\xa7\x8c\x8f\x94\xbb\f\x9fE\xb4]\x12\xa3xeq\xbd\xe0#m\xf05\x92\"!\x81\x9b\xe8dNk\xebR\xa5\xbb\x8aw\n\xd2ܳ\xb9dvp\xcfJ\xc5$\xd6\xe9]\xd9C\xf3\"F\xc5\xf9\x87\x8b6x~\xb8h3\xcf\x0f\x17m\xf4\xf9\xe1\xa2\xcd??\\4\xff\xfcp\xd1\xc2\xf3\xc3E\xfb\xe1\xa2M5\x9b\xd2\xd6s\x18\xb9\xd3q#?\xceb\x91\xb0\xad=\x85\xe2\x04|_\x85\xe1\xeb\xbcS+3\xb7\xf1^\x91:\xfe\xe4\xdap\xdd2%u\xa9\xa6] A\xbc\xdda\x9f\x99\xe2\xcdo\xa8\x97\x0f\x83^T/\xbf\x9d\xec|\xa5zy\x8fa\xdf\xeb\xbeR\xb5|\x98\xff\xb2j\xf9\x1b_\xaaQ\x00\r\xe9y\xb7\x17\x9f\x8f\r\xd9\x1bm\x00\xf8\x0f\xae\xbf\x1dԇ]\xc6\xf8g\xaf\xb6\x1fa~ba\xfc\xea/\xab\xef\x8fҋi;J\xcd\x01\x99\"\x93\n\xc7|m\\\xd9.\xee\xea\x16\xd2}\x9f¹T\x1aS+\xe6\xa7\xe85\xd42-\x82}\xaf\x8b\xd9@\xf1\xa9\xf4\xb6\"\xedD\xe76\xd2e\xeeLgd>\x98\x04\xd0g\x91\x1d\x95\x14\xb2\xd2>o`\xa1\xbf\xc5\xf4\x85\xdf\n\xc52\xb0D\x05\xfb\x9a\x1ce\x15\xa9؞\xa0\xddL\xfd\xdex՞ߣ\x06CO\xaf7\xdd_\x8c\xf45|䉙c\x04ϧ#\b\xdc]\x17\x87vA~Xp\xfe\xc0y_\x90\x88TD0>f\xb0\xeaS\xf9\x1d\xd3\xf4\xa9tI\xa2Ŗ\x7f:\xc1\x91V\xe5wqm_\xb7vo\xc4\t\\\xba\x99\x9d~\x84!\xbdzo\xba\xdcnI\xcd^\xbf\"o\x14\xe8|\xa5^Jnj\xa6*\xef\x82Z\xbc\xc4:\xeco\xdezO\xa9\xb6\xbb\xa8\xc6n\xb6T9\xb1\xb2\xae[37\rrA=]\x12q\xe6k\xe7\x16W\xcc\xf9\n\xb5\xc9y$\xd7\xc9E*\xe0&\x01\x8fV\xc7Mս\xcd佇5q\xe9\xd5n\x93\xa0\xb1\x12n\xbe\xc6\xedz\x95\xec\u05c8\xb2\xc7U\xcdl\x9d\xdal\x14>\x8d\xdfl%ڒ\xfa\xb3Y\x8a]XkVג\x8d\x8c\xbb\xb4¬[A6\x024\xa5\xael\xa4nl\x04\xe2d5Yj\xb5\xd8\b\xec\x19\xb3;)%\x93?.\xa9\x12\x8b_\xa2Bf\xad!\xff\xa3\xe4\xefR2H\xd5q.\xe7\x02\x9aO\xbd\xe6\x96\xf3\xc1ǚvVc~*3\xc7\xe5\xcejQq\xc3J\x8eۋ'\x96Gcvs\x84s}1\xc4\xef\x12\x8fk\xba\xcbLȧϵ0oz.7\xd5\xe4\t8'4&\x8a\x83\x99g\xee\x1e\xa0L\xae\xc1Z\b\xbb<\xfd\x95\x17\xfe\xba\xa0\x1b'\xefx\"5\xb6\x03c\x8ePX(\xe3מ\x8c\xaa\xf2iw\xd2y\xbe\xf8\xee\xef\x15\xa83\xc1\xebYj\xffb\xe6<\x94[\x96\xda\xc6BAQxm\xe3n\x9f\xea\xb9\xd9\xcd\xf2$o\x853xQ\xb0=\x1c\x11\x8e\xd5\x10\xbc\xe6\xb5U\x866j\x18i\x1aO\xc4ʺw\xe4\xf79O5\xf50\xd1\xf3\x06\x1a\xcbC\x8dY#\xff,\xe1\xc6\xe5\x01\xc7\x04\xc8\xd4\xc3Ai\x1bⳇ\x81\x9e+\xf0\x98\v=\x92}\xae\xb4\xc3>\xcfq\xc8g\xc1\xe1\x9e\x05!Ȳ $\x99L)\x87x\x9e%\x14y\xc6`\xe49\u0091\xcb\x02\x92\x19\x90\xbd\xc39)\xc7n\x92\x8a=\x92\xf7;S\x8a5\xe6\xb7$\xa7\x8f\xd3$\x1c\xa3Iج\x9c\xc34\xe1\xb8̲c2\t4|\xa6P噂\x95\xe7\bW\x9e7`\x99\rYf%g\xe6\xe7e\xc7[.N\xdeK\x95\x83\x9a\xdc\xebH\x15\xcdI\xa1\xec\xc5\x17\xdd1{\x99\xffp\xa7\x9cm\xd5qec\t\xeb\xfa\xd4{F~f\xc2\xef\xa3Z!l\xd9\xfd\xce\x06L\xe3\x88\xc4\xf3\xff\x8d\x97\xe7o\x1bu\xbb6\x1aJ\xaap\x87uwv\xa5\x15zC\xde\xd3\xec\u0603~\x8c\xc6\x15{\xa9\njȪ\xde\xf2z\xe5\x80ۿW\x1bB>\xc8zӾ}\x93\x8cfE\xc9\xcf6n\x88\xc0\\\xb5A\\&\x10Q\xe1\v\xe3\xdfIβ\x88\xa7\x15\xbd\\\xc85\x1e\\\t\x81W\x1ee\xed\xad\xef\xd26\x8c;Z\xe8\x94u\xafW\xdcK\xce\xe5\xd3\xc2p\x9c\x96\xec?\xf0\x96\xe6\xf9\x1c\xceۻ-6\r\x92\x82\xb7;\xd7\x15B5\xd2;\xb0\x16\xb3\x99\xce؊\xdf\xee;\x10#\x95v\xf5\x9f(\xad\xb5\xc5fc\xb7.\xb9\xaa?\xabi\xee\xb6\x0e\xbb\r\n\v\x15g\"\xb1\xd6\xc3\x1c\x99\xca\xd7%U\xe6\xec\xea\nnj\x1c\xc6\xf38\xc1nNe[F\xcd\xcb\xf0\xba\xdf(mí\xbf\xb8\x99w.\xbb[\xa1}\x8a^\x82\xc7\xf8Q\xbe\xd9C|W\xc4c\xdc\x05Y#\xa5\"\xaf\xa3EIW\xcbbi\x7f\xb5\xed\xaf\xf2\x04\xef\xa2٬\x0ey\xee{\xcd#\xe5D\x01\xa2\xbb\xdcu\xeavP\xbc\x9c\xf32]\x14\xaf\x0f\nC\xfb\xeb;\x13\xe7\xe2[G\xa6\x12n.\rpu\x01\xfe\x95wݭ\x1b\xc39\xf0\x0f\x8c\x83vh%(ѻa\xafZ\xa7V\xc5\xce9j{\xfbc=\xc0\x88\xf5q\xd3\u009co\t\xca:F.;\\\xe9 \x96\xe3\x13o8\u0084\x81C$\xdf<\xa1EO\x9d\x8b\xbf\x83Hϩ\x94/\xf1^\xad|`kQ9W1\xaaQ\xc6ാ}\x80\x99r<^~\xdd[2ǜ\xff\xb1\xdb\xe1\xf1F\xf4\xf9\xfb\xe1\xdd\xc5\xe9\xfek\x10^\x90+\x85W\\\xfaK\xd5\xf1Jȋ\xae\x88\xdf\xd5uP\xc6/\xa2\x83\xeb8B\x047\xb7Q\xbb\x97\xc4f_\x84\v\"\x0f\x8bw`\xba탧D\x96\xd1\xc1\xb3\xc0\xd7\xc6iC\x8b\xb9\x9b\xeeo\x87=\xf03$*oU\xd3\xd5\u05f5?Qݰ9\xa6f\x1bp\xae'\x86\f\x16\x1a\xe4\x04N \x88\xb5S\x8e\xbe\xe1S9\xfd>\x11\xa8m(\xfe\x18\x85\xd3\xf5A\xf3\x87\xc8\xdf\x7f^\xe5\x01\xdd-u\x02\xf5RO\xc0\xac/\xe0\x8f\x10a(\x99.\x14\x7fc}YXG\x81&\xb9jQ]\x9bi\xd6\xd5\xf3\xc9J\xeb\xf6~;\xd6sT\x82C\x83\x18\xff\x06\x1f\xba\xf8F%5\x9cY\xaa\x8a\x1a\xcelNAu\xd4Qdr\x8d\x82\xba\xfa4q\xad\xceސ\x8d\x8d\x9c\a\x80\xc7\xc1\xc2\xe7\x0f\xdcy\xb0\x02\xb4\xa6\x87p5\xf6\x93u\x98\x0f \x00\x93G\x91\xd9\xf8t|s~\xa8{1\xb4\xdb7\xa4\x99\xa9\xa8\x1f Tg\xb6Z\xbd\x8c)`.\x0f\xee\xc3\x15,|\xb0(D\x12\vi\xf2\xb5d*%\xf2x_7\xb4\xb4A\xa7\x0e\x19\xd1|`\n8;0\xeb\xb6[&\x1d\xa8\xda\xd1\x03\xac3\xc99\xa0\xae\x1d\xe2\xf5\x9c\x8b՟\xd2\xfa\fT\xcfN\xedC\xbb\xad\xdf_r\xdcv\xf7*RW\xa8\x8c\xdf\x1b2LA\xf3\x01\xaf\x01B\x12\a^\x14i8*D?u5Ĵ\xdd6,0\xafW}\x16\xd2\x7f\xf9\xea\xc6Ǯ\xf1\xe4JA\x7f\x97\xea\x86\x14L\xd8\x7f\xa8\xc8\xdd\x06P\xe8\xbc\b\x7f\xbc\xf1|\x06\xef;ۦ>,\xdb\xf2#!,\x88\xb1\xc8:~@rM>\xc20\x10tg\x1e!\xc7-\xcf\xd8\xf7\xbdl\x93\xad\xb8S\xf2\xa0@\x0fW՚\xfc\x8d2\xc3\xc4\xe1\x83Tw\xbc:0\xd1\xf8\x1b\x8b\x1a\xdfQe\x18\xe5\xfc\xec\xf0\x89!\xca\x04\xe5\xec\x1f1\xee\xb4\x7f\x9c\aT\xab\xdb\xc8o\th\x8c\xfd\xf0\x0e\xac\xa9\x1d\r7\xe2\x82\xe0\xe9:'\v\xbeY\xb3EÄ\x93]<_\xb9\x93\x95\xe9(\xbfFy\xc6\xe2]\x0flC>J\x03a\xe7\x9fuaZs\x01ڬa\xbf\x97ʸ\x1d\xa1\xf5\x9a\xb0\xbd\x0f_b)}\xca8F\xb1\xee\x03a\x84\x99\xa6ԳYo\x98IR\xa86\xf0Fゞ]\xbe\x97f\x99\r\xf8\xe1\x956\x94G4\xf27\xc5\xf8\x18'\xda\xf5\x02\xf9o)i\xf0m\xbb\xfd0\xaeGp\x8erx\x16\xdaY\xa3\xa8m&\x98\a\x04A\x9e\x143\xc6Z\x80vi\x171V\xe7sN\xb4Ղ\x17\x05\xf8\xc4y\v\xdb\xf1\xad\xe6n\x02\xa9n<\xe6l\xf8\xc9\xe1\xf7\xb0vH\x82\xd1ą\xdfR\xf7}-+\xb3#\x15\a+TJV\x87c\x90\xcb\x11[>\x027\xaf\x00\x93!\xa8!t(\xab1\x95\x12\xad\xad\xb7\xfa|_\x83.\xcd\x1eG1\xf5\xa5\x03\xe1#\x95\xaf\xfcE\xf7뽒\xc5\xda\xf3\x02\xaban\xfcv\x98b\xd2\x06\xec\xe6\x18%9q\xdf\xc0\xf27J\xa3\x18\x94%\bB\xb5\xc7'\xe1\"\x90\x8b\xf36\xdaPeR\xe3\xa0\xfbN\xe3\x99\x10\b!\xc7\xf1\xbd\xf7\xdb}\xeeB\x94[\xff\t\xb8\x1a\xf0\r\xd1L\x84\xefc\xba\xcdD'\n\xdaFF\n0\x19\x1a-t\x1a\xc44\x9d\b\xa6\x8b\xfe\x1f\x1b\xbc\x9cj\x9b\xf8>\xc5\v\xfe\xd2k\xde;\xf4\x86\x1fC\xab\x9bx\xcf5B\x8f?\xb1\xbd\xab\xbd\xca,\xd6\x7f\xfe??\xccvJ\xf2\xb2^N:X\xe8;՞\xd2̧\xcf\xee8X\xcfG\x03t}\xb7\x97\x8b\x9c\xf4\xd3ea\xe75c\xce\xf0\xe9\xd6\xebDb\xa7ˢ\xcdg\v5\xaf;\xbb'\x8a\x9f\x8b\x9c[c\x7f\xf3\xcd\"\xb1\xa6\x87\x10\x896#Ө\xe3\xcf\xd9h\xb3\x15l\x06\x1cG\xbe\xee\xd4\v@\xaf\x14nF\xed\xc0\xe0%*м\xb5\xb6\xfdH\xfe\xcd\xff\x06\x00\x00\xff\xffw\xe0\xabZYz\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec}O\x93\x1b+\x92\xf8ݟ\x82\xd0\xef\xe0\x99\x89\x96\xfc\xfc\xdbˆo\x9e\xb6\xbd\xa3x\xef\xd9\x1d\xee~\x9e\xcb^PUJ\xe25\x055@\xa9\xad\xd9\xd8\xef\xbeA\x02\xf5\x97\xaa\xa2d\xf5[φ\xb9\xd8]\x82$\xc9L\x92\xcc$\x81\xf5z\xfd\x82\x96\xec\v(ͤxCh\xc9\xe0\xab\x01a\xffқ\xc7\x7f\xd7\x1b&_\x9d^\xbfxd\"\x7fCn+md\xf1\x19\xb4\xacT\x06\xef`\xcf\x043L\x8a\x17\x05\x18\x9aSC\u07fc \x84\n!\r\xb5\x9f\xb5\xfd\x93\x90L\n\xa3$\xe7\xa0\xd6\a\x10\x9b\xc7j\a\xbb\x8a\xf1\x1c\x14\x02\x0f]\x9f~ڼ\xfe\xff\x9b\x9f^\x10\"h\x01oȎf\x8fU\xa97'\xe0\xa0\xe4\x86\xc9\x17\xba\x84̂<(Y\x95oH\xf3\x83k\xe2\xbbs\xa8\xfe\x15[\xe3\aδ\xf9\xb9\xf5\xf1\x17\xa6\r\xfeP\xf2JQ^\xf7\x84\xdf4\x13\x87\x8aS\x15\xbe\xbe Dg\xb2\x847\xe4\xa3\xed\xa2\xa4\x19\xe4/\b\xf1Xc\x97k\x8f\xf0鵃\x90\x1d\xa1\xa0\x0e\x17Bd\t\xe2\xed\xdd\xf6˿\xddw>\x13\x92\x83\xce\x14+\r\x8e\xdd!F\x98&\x94|\xc1a\x11\xe5\xa9L̑\x1a\xa2\xa0T\xa0A\x18M\xcc\x11HFKS) rO~\xaev\xa0\x04\x18\xd05hB2^i\x03\x8ahC\r\x10j\b%\xa5d\xc2\x10&\x88a\x05\x90?\xbd\xbd\xdb\x12\xb9\xfb\x1d2\xa3\t\x159\xa1ZˌQ\x0399I^\x15\xe0\xda\xfeySC-\x95,A\x19\x16\xe8\xecJKxZ_{\xc3{i)\xe0j\x91\xdcJ\r\xb8ax*B\xee\x89f\xc7c\x8eL7\xc3E9\xea\x00&\xb6\x12\x15\x1e\xf9\r\xb9\ae\xc1\x10}\x94\x15ϭ\xb0\x9d@Y\x82e\xf2 \xd8?kؚ\x18\x89\x9drj\xc0\v@S\x980\xa0\x04\xe5\xe4Dy\x057H\x92\x82\x9e\x89\x02\xdb\v\xa9D\v\x1eV\xd1\x1b\xf2\xabT@\x98\xd8\xcb7\xe4hL\xa9\u07fczu`&L\x9aL\x16E%\x989\xbfB\xf9g\xbb\xcaH\xa5_\xe5p\x02\xfeJ\xb3Ú\xaa\xec\xc8\fd\x96\x91\xafh\xc9ֈ\xba\xc0\x89\xb3)\xf2\xff\x17\x04@\xbf\xec\xe0j\xceV\x18\xb5QL\x1cZ?\xa0\xd4Op\xc0N\x00'_\xae\xa9\x1bECh\xfb\xc9R\xe7\xf3\xfb\xfb\x87\xb6\xec1ݧ>ҽ%\x90\r\v,\xc1\x98\u0603rL\xdc+Y L\x10\xb9\x93>\x14]\xce@\xf4ɯ\xab]\xc1\x8c\xe5\xfb?*\xd0V\xc8\xe5\x86ܢ&!; U\x99[\xc9ܐ\xad \xb7\xb4\x00~K5<;\x03,\xa5\xf5\xda\x126\x8d\x05m%د\xec\xa8\xd6\xfa!\xe8\xb2\x11~9\x85p_B֙0\xb6\x15۳\f\xa7\x05\xd9K\xd5\xe8\v\xa7\xae6\x1d\x90\xf1)kK\xa6ٽ\xa0\xa5>J\xf3\xc0\n\x90\x95\xe9\xd7\xe8!t{\xbf\xed5\b\xc8x\xd4P\xadT\x1ar;Ϟ(3\x16\xbd\x01Lb\x01\x91/\xa8a\x02<\xd44\x95&\xa6R\x02g\xe9g\xa0\xf9\xf9A\xfe\xa6\x81\xe4\x15\nk\xa6\x00\x87|Cv\xb0\x97\n\"p\x15\xd8\xf6\xb62(e\t\xa3\x11%Y\x99\ry8\x82%#\xad\xb8\xf1r\xcf4y\xfd\x13)\x98\xa8\fl\x06\xd0F\x18\x8cD\xa1\x86\x16\xf2\x04j\x86^﨡\xbf\xdaz=2\xd9\xf6\x04\x01ؑ\xee<\xc9vg\xfbcdT\x9e\xabd\xbboAd\x9a\xacVD*\xb2rK\xe0\xea\x06A\xdbEլ\x99h\xf5\x11\x81\xf8\xc48\x0f\xfd.\x1b\xb9#\xa0\xe3\x9d~\x90\x1f\xb4\x13\xd29B\x8c4k\xd1\xe5\xe9\b\xe6\b\x8a\x942,>\x11\xbc\xf7\x8c\x03\xd1gm\xa0\xf0T\t*?\x10\x11\xa7\x03\xe7\x1e\x84\xb6D\xf58\x0f\xc7)*\xce\xe9\x8e\xc3\x1bbT5\xecΑa'%\a\xda_~\xfat\xf8\fڰl\x86\n\xab>\x19\\\xab\b\x11\x94\xff\x01\xc7\x16\x93\x88f\x96\x19\xfa\b\x84\x06j\xd8e\x91\xf3\x16\x11;\x14 \xff)\xc8;\xab\xb33\xabI\x87\xd8\x12\xaf\xb3\x19p\\'\x84$\\\x8a\x03(כ]\x0f\x83\xe4(\xb0\xb2\x95\x13\xab*\x15p\xab\xf3ɾ\xb2ZtHgB\xec,\x1e\x95\x01&\xb4\x01\x9aoV\xd7d\x10|\xcdx\x95C~댠{k\xbe\xe5\xc1h\x1dh\xc2\x1e\xa3\xdeO6\xf6+(g\x19\xda^\xde\xccZ\xa3\x85\x18cV\xb3\x90\x9eKpF\xaae\x9cǰY![\xd3\\\x83\xb1UV\x7fY\xddX~F\x80v{\xed\xf6\xa1\tUPS \xae\xf9\" \xa1(\xcdy\xc8=f\xa0\x88\x10lRM$\xb2\x8e*E\xcf#\x8c\xab-\xed\xcbX7ּ\xc7<\x11\xaa\xfd\xc1\xec\xeb\xf7\xbb\x90\x81\x11\x88L\x7f\xaf\f\\\xcc2\x8d\xde\"e²\xca:n\x1dNit\x86\"ñ4\xb3\xb6\"\x13\x0e\x1e\xfa9\rc\xbe\x17\xba,\x95\xe41ѭ%Ƌ\xa4\xf5\x10i\xd4*\xfa\x8e\x89r\x94\xf2q\x8e\x10\x7f\xb3u\x1a_\x83d\x18\x80 ;8\xd2\x13\x93\xca\x0f\xbd\xb1\x03\xe0+d\x95\x89\xceejH\xce\xf6{P\x16Ny\xa4\x1a\xb4s7\xc7\t2n>\x93\x96r\x88\xfe\xd8\x1bG\xc3H+\xa98\xf21ԭ!\xd0_\xd1B\xb1\x88Z\v\x17WΜ\x9dX^Q\x8e\x8b(\x15\x99\x1b\x0f\xad\xf1\x8a-\xc6\x13L\x1e\xe0\xec\x96耹\xe5D\xc7\x1d\x91\x02\xac\tZX\x1flX5\xb6ȸ26\xec\x1d\xb5v\x86t\"\xaa*\x0e\xdaw\xe5\f\xbbF\a܌\x82\xae9\xe2\xfcwNw\xc0\x89\x06\x0e\x99\x91*N\x8e9&\xbb\x92\xa2\xd7F\xa8\x18\xd1p]\x87\xa0\x19\xd8\x04H\x82NՑeGg\xa6Y\tB8$\x97\xa0q\x96Ӳ\xe4\x91\x15\xa0)\x93\x9c\xf7\x9dLM\xf4\xa6\xccL\xf9>\xbc\xd8\xe4oJ\x82nlʌ\x96\xecR\xb6\x16\ab\xe4\xe4\xb0\xffo\x126\xa8\xfd\v\x84v;hz]\xa1\xb5$e\xa0\xd1`B\xcb\xe5\x860\x13\xbe\xceA\xb4NN\xd3\xff\xbf0c\x96K\xfc\xb6\xdf\xf2\xaa\x12?ɕ9\x88\x96+u\xf7\xff\x82L\xc1\xc5\xe2ޯ\x15\xc9\f\xf9\xa5\xddꆰ}͐\xfc\x86\xec\x197\xa0z\x9c\xf9\xa6\xf9r\rb\xa4\xacw\xb6\x14\xd4d\xc7\xf7_\xad奛\xbd\x9cD\xba\xf4\x1b;\xfb5\xd8\xf3݅y\x06.\xc1\xa00SP\xb8`\xf3\x03R\xb3\xf9\x82\x16\xd5ۏ\xefbѬnI\x90\xbc\xc1@\xde\xf6\x90mw\xed\x8d\xf2\xd4axӧ\xf6o\xdc6\xc2\r\xa1\xe4\x11\xce\xceb\xa1\x82X\xe6P\xdbш\xa73$\x0e\xeeg\xa0\x90=\xc2\x19\xc1\xf8\r\x8a\xd9֩\xa2\xe0\xca#\x9cS\xaa\xf5\bhqb\xdao\xbcXJ\xda\x0fH\b\x8cg\xa7\x13\x8f\xe0fS\xd0E\xf3\x83#\xe9\x8a$\x94@\xfb\v\x86Y\xb3\xad\xb5Q\x87\x8c}\xa9\x1d\x8b\xec,8\xb22q\xa0v\x99\xc3P\x82\xdc\xd7\xdbM_(gyݑ\x93\xfb\xad\x18\xb7\x86\xbb\xe5\xa34[qC\xde\x7fe\xda\xef\xf8\xbd\x93\xa0?J\x83_\x9e\x85\x9c\x0e\xf1\v\x88\xe9\x1a\xe2\xf4\x12Nm[:\xb4\xf7\xad\x12\x84ە\xad\xf3\xf0j\xf60M\xb6\xc2\xfa-\x9e\x1e\xb8\v麛^\x1f\xba\xa5\xa84nL\t)\xd6.\xf4\x12\xeb\xc9\x11;\x11\xa4T\x1d\x8e\fQ\xab;\x1d\x89\xf5\xc4˃]I\\{\xb7\xaf\xcai\x06y\xd8W\xc1\xdd@j\xe0\xc02R\x80:L-\x1c\xedRZ\xfd\x9e\x86B\xa2\xd6ue\xa1\x84\xa5-\xed\xa1x\xd5\x1d\r~w\xcb\xda\xce܄Z\x81ٳUG6\x01ǫΏ\b\x97X\xb4?f\xa9K\xf3\x1c\xd34(\xbf[\xa0\xf1\x17\xf0b\xb8\xf6;\xc4\xdc\nYPܜ\xf8/\xbb̡@\xff7))S\ts\xf8-\xa6cp\xe8\xb4\xf5Q\xacv7\xb6\a\xa6\x89\xe5\xef\x89\xf2\xe1\xf6rdp\xd2\xea\x16\xe0n!\x97\xfb\x81\xc5rC\x9e\x8eR\xbb5\x157EfA2MV\x8fp\xf6\x9bqm=\xb0ڊ\x95[\xe0\x17\xab\x9b\xdaZ\x90\x82\x9f\xc9\nۮ\xbe\xc5\bJ\x94\xc4\xc4j_\u05cfu\xfaɺ\xa0\xe5\xdaK\xaf\x91\x05\xcbF\xdba\xbaL\xaa\x89m}\xd0`A؆u\x8e\x885\x8f\xa7F\x9b$\xbf\xa5ԑ\x9d\xef\x11T\xee\xa46.\"\xd91g\x97D\xbf\x88\x93=\x1f\xf5\"t\xef\xb2t\xa4\n\xf9\x17V]\xf6\x02\xb5\x96\xdbzZ3\xbb=\x03\x1fIs@\xadC\xb6jf\xbe\xd3\xc3+\xb7g\x81\x9d\xd0\f\x8d\x92Y\xb8\xa5\x92\x19\xe8\xe8nqS\x12\xb4\xfcLp\xb1\x0e,R\xe7\xf8\xb8\xe4\x86\xe9`f(醬%\xd2B\x17\xe0\xfd\xd7V\xd4\xd3*\r\xfb\xf7\x9c\xf0-ŋ\xe0\\/\n\xda\xcf\xe2IB\xf1ֵ\f\xd3\xc4\x03r.\x85:T\xa8\"\xd2-O/H\xdf\xc3\xf2^0\xb1\xc5\x0e\xc8뫛\x03\xb5r\x8d\xe5r\xc4J\x8f\xe4\xbemC\xf4\xfa\x83\x18I戕Rb\xc4_A\x87s\xc3\xf8\xb850\x13A\ni\xdaa\b\v\xb7\x94\xf9KM\xf6Li\xd3F4U(\xe2\xb9\"\xb1\xb2\xd4\xe3\x12\uf57a\xc8\xe1\xfa\xe4Z\xb6\x02`G\xf9\x14r\xa1F\x93'b\x057\x93\x80\xb0=a\x86\x80\xc8d%0lc\xa7:v\xe1X\xe0\x14t2\xc9\xd2\x14\x84- \xaa\"\x8d\x00k\x94:&&\xe3;\xed\xea\x1f(\x8b\xed@\x0f\xcbB\xb6\x99\xb1\x94\xb1X\xe9\xb0-䎵\x93\xda\n\xfa\x95\x15UAhaI\x9f\xea.\xed]\xc6Y\x87\xe3u\xde\x19\xc2\xc5e\xc4H;\xa9J\x0e&uF\xba\f3;M4ˡ^\x98\xbd\x14HA(\xd9S\xc6G\xd2]\x86e\x11m\x97\xf8(^Y\\\xcf\xf9H\xeb|\x8d\xa4H\b\xe0&\x1a\x99\xd3ںT\xe9\xa6❂4\xf3l.\x98\x1d̳R1\x89yzW\xb6м\x88Qq\xfea\xa2\r\xca\x0f\x13m\xa6\xfc0\xd1F\xcb\x0f\x13m\xbe\xfc0\xd1|\xf9a\xa2\x85\xf2\xc3D\xfba\xa2MU\x9b\xd2\xd6s\x18\xb9\xd3q#?\xceb\x91\xb0\xad=\x85\xe2\x04|\x9f\x85\xe1\xf3\xbcS33\xb7\xf1V\x91<\xfe\xe4\xdcp\xddZJ\xeaTM;A\x82x\xbb\xc3>3ɛߐ/\x1f:\xbd(_~;\xd9\xf8J\xf9\xf2\x1eþ\xd5}\xa5l\xf90\xfee\xd9\xf27>U\xa3\x00\x1a\xc2\xf3n/>\x1f\xeb\xb2\xd7\xdb\x00\xf0\x1f\x9c\x7f;\xc8\x0f\xbb\x8c\xf1Ϟm?\xc2\xfc\xc4\xc4\xf8\xd5_V\xdf\x1f\xa5\x17\xd3v\x94\x9a\x032E\x06\x15\x8e\xf9Z\xbf\xb2\x9d\xdc\xd5M\xa4\xfb>\x85s\xa94\xa6f\xccO\xd1k\xa8eZ\x04\xfb^'\xb3\x81\xe2S\xe9\u05ca\xb4\x13\x9d\xdbH\x93\xb93\x9d\x91\xf1`\x10@\x9fEvTR\xc8J\xfb\xb8\x81\x85\xfe\x16\xc3\x17~+\x14\xd3\xc0\x12\x15\xeckr\x94U$c{\x82v3\xf9{\xe3Y{~\x8f\x1a\f=\xbd\xdet\x7f1\xd2\xe7\xf0\x91'f\x8e\x11<\x9f\x8e pw]\x1c\xda\t\xf9a\xc2\xf9\x03\xe7}A\"R\x11\xc1\xf8\u0602U\x9f\xca\xef,M\x9fJ\x17$Z\xbc\xf2O\a8Ҳ\xfc.\xce\xed\xeb\xe6\xee\x8d\x18\x81K7\xb3ӏ0\xa4g\xefM\xa7\xdb-\xc9\xd9\xebg\xe4\x8d\x02\x9d\xcf\xd4K\x89M\xcdd\xe5]\x90\x8b\x97\x98\x87\xfd\xcd[\xef)\xd9v\x17\xe5\xd8ͦ*'f\xd6us\xe6\xa6A.ȧK\"\xce|\xee\xdc\xe2\x8c9\x9f\xa169\x8e\xe4<\xb9H\x06\xdc$\xe0\xd1츩\xbc\xb7\x99\xb8\xf70'.=\xdbm\x124f\xc2\xcd\xe7\xb8]/\x93\xfd\x1a^\xf6\xb8\xaa\x99\xcdS\x9b\xf5§\xf1\x9b\xcdD[\x92\x7f6K\xb1\vs\xcd\xea\\\xb2\x91~\x97f\x98u3\xc8F\x80\xa6䕍䍍@\x9c\xcc&K\xcd\x16\x1b\x81=\xb3\xecNJ\xc9\xe4\x8fK\xb2\xc4◨\x90\xd9Ր\xffQ\xf2w)\x19\xa4\xea\x18\x97s\x0eͧ^u\xcb\xf9`cM\x1b\xab1;\x95\x99\xe3rc\xb5\xa8\xb8a%\xc7\xed\xc5\x13ˣ>\xbb9¹\xbe\x18\xe2w\x89\xc75\xdde&\xe4\xd3\xe7Z\x987=\x93\x9bj\xf2\x04\x9c\x13\x1a\x13\xc5\xc1\xc83w\x0fP&\xd7`W\b;=\xfd\x95\x17\xfe\xba\xa0\x1b'\xefx\"5\xb6\x03c\x8ePX(\xe3מ\x8c\xaa\xf2is\xd2Y\xbe\xf8\xed\x1f\x15\xa83\xc1\xebYj\xfbb\xe6<\x94\x9b\x96\xda\xfaBAQxm\xe3n\x9f\xea\x99\xd9\xcd\xf4$o\x85[\xf0\xa2`{8\"\x1c\xab!x\xcdk\xab\f\xad\xd70R5\x1e\x88\x95u\xeb\xc8\xefs\x96j\xeaa\xa2\xe7u4\x96\xbb\x1a\xb3\x8b\xfc\xb3\xb8\x1b\x97;\x1c\x13 S\x0f\a\xa5m\x88\xcf\x1e\x06z.\xc7c\xce\xf5H\xb6\xb9\xd2\x0e\xfb<\xc7!\x9f\x05\x87{\x16\xb8 ˜\x90d2\xa5\x1c\xe2y\x16W\xe4\x19\x9d\x91\xe7pG.sHf@\xf6\x0e\xe7\xa4\x1c\xbbIJ\xf6H\xde\xefLI֘ߒ\x9c>N\x93p\x8c&a\xb3r\x0eӄ\xe32ˎ\xc9$\xd0\xf0\x99\\\x95grV\x9e\xc3]y^\x87e\xd6e\x99\x95\x9c\x99\x9f\x97\x1do\xb98x/U\x0ejr\xaf#U4'\x85\xb2\xe7_t\xfb\xecE\xfeÝr\xb6Vǔ\x8d\x05\xac\xebS\xef\x19\xf9\x99\t\xbf\x8fj\x85\xb0\xb5\xeew6`\x1aC$\x1e\xffo\xac<\x7fۨ۵\xd1PR\x85;\xac\xbb\xb3K\xad\xd0\x1b\xf2\x9ef\xc7\x1e\xf4cԯ\xd8KUPCV\xf5\x96\xd7+\a\xdc\xfe\xbd\xda\x10\xf2A֛\xf6\xed\x9bd4+J~\xb6~C\x04\xe6\xaa\r\xe22\x81\x88\n_\xe8\xffNr\x96E,\xad\xe8\xe5B\xae\xf2\xe0J\b\xbc\xf2(ko}\x97\xb6b\xdc\xd0B\xa3\xac{\xbd\xe2^r.\x9f\x16\xba\xe3\xb4d\xff\x81\xb74\xcf\xc7p\xde\xdem\xb1j\x90\x14\xbcݹ\xce\x10\xaa\x91ށ]1\x9b\xe1\x8c\xcd\xf8\xed\xbe\x031\x92iW\xff\x89\xd2Z\xaf\xd8l\xec\xd6%\x97\xf5g5\xcd\xdd\xd6a\xb7Aa\xa1\xe2L$\xe6z\x98#S\xf9\xba\xa4ʜ]^\xc1M\x8d\xc3x\x1c'\xac\x9bSі\xd1\xe5ex\xddo\x94\xb6\xe1\xd6_\xdc\xcc;\x97ݭ\xd0>E/\xc1c\xfc(\xdf\xec!\xbe+\xe21n\x82\xac\x91R\x91\xcfѤ\xa4\xabE\xb1\xb4\xbf\xda\xf6Wy\x82w\xd1hV\x87<\xf7\xbd\xea\x91t\xa2\x00\xd1]\xee:u;(^\xcey\x99.\x8a\xe7\a\x85\xae\xfd\xf5\x9d\x89c\xf1\xb5#C\t7\x97\x06\xb8:\x1e\xb5\xb1\xd3\xeb\xee\v\xbaV\xb5\n\xf3Ǝw\x9eB\xe8\xaa\x7f\xc1\xdd_\xaf\x9f#\xa5\x8dT\xf4\x00\xbfHw\xf5\xf2\x1c\r\xba\xb5;\xf7n{\x93'\xe4,\x86\xd9\x10s\x05\xfc%\xd0=`M*\xf2\xe0\x1a\\\x8b\xe5\xc2[}\x8d\xe13\x83yx\xf8\xc5\r\xc0\xb0\x026\xef*\xb7\x97o\xb5\x9d\x06K\xcd00\xd7hg\xff{\x8c\xac\x17\x04\xef\x93m\U00067177\x02Lwƴ\xb7E\xd8W%\x974\au+Ş\x1df\x06\xf2[\xa7ro\x9d\xcc\xf0\xa3\x1f\\\xbd\xfa\x04\xf8W\xdeu\xb7f\f\xe7\xc0?0\x0eڡ\x95\xa0D\uf1adj\x9dZ\x15;g\xa8\xed\xed\x8fu\a#\xab\x8f\x1b\x16\xc6|KP\xd60r\xd1\xe1J\a\xb1\x1c\x1fx\xc3\x11&\f\x1c\"\xf1\xe6\t-z\xea\\\xfc\x1dDzN\xa5|\x89\xb7j\xc5\x03[\x93ʙ\x8aQ\x8d2\x06\xa7\xf5\xf6\x01F\xca\xf1x\xf9uo\xc9\x1c3\xfe\xc7n\x87\xc7\x1b\xd1\xe7\xef\x87w\x17\xa7\xfb\xd7 \xbc W\n\xaf\xb8\xf4\x97\xaa㕐\x17]\x11\xbf\xab\xf3x\xea,!\xfd\xd6\x18(\xca\xe8\x05\xd2\x11\xf4F\xda\xd6\x06\x894\x947\xa2\x1b[\x02\xea&\x98a4\x99Z\xe4\xa6\xec\x04㦄66\xd6[\x9f\x93~\xc9X\xeb\xb6\xe9c\xd5U\x96\x81\xd6\xfb\x8a\xf3s\x9d\x0f\xbfd\xe01k\xe0J\xa4\xf8@\x19\xbf\x88\x0e\xae\xe1\b\x11\xdc\xd8F\u05fd$6\xfb$\\\x10y\x98\xbc\x83\xa5\xdb\x16<%\xb2\x8c\x0e\x9e\x05>7N\x1bZ\xcc\xddt\x7f;l\x81ϐ\xa8\xbc\x95MW_\xd7\xfeDu\xc3昚m\xc0\xb9\x96\xe82Xh\x90\x138\x81 v\x9dr\xf4\rO\xe5\xf4\xdbD\xa0\xb6\xa1\xf8c\x14N\xd7\a\xcd\x1f<\x7f\xff\xbc\xca\x03\x9a[\xea\x04\ua95e\x80Y_\xc0\x1f!\xc2P2\x9d+\xfe\xc6ڲ\xb0\x8e\x02M2բ\xba6Ӭ\xab瓕\xd6\xed\xfdv\xac\xe5\xa8\x04\x87\n1\xfe\r\x1e\xba\xf8F%5\x1cY\xaa\x8a\x1a\x8elNAu\xd4Qdp\x8d\x82\xba\xfa0q\xae\xceސ\x8d\x95\x9c\x05\x80\xc7\xc1\xc2\xf3\a\xee\xb4\xeb\xfa\xfd%\xc7mw\xaf\"u\x89\xca\xf8ސa\n\x9a\a\xbc\x06\bI\xecx\x91\xa7\xe1\xa8\x10}\xeaj\x88i\xbbn\x98`^\xaf\xfa(\xa4\x7f\xf9\xea\xc6\xfb\xae\xf1\xe0JA\x7f\x97\xea\x86\x14L\xd8\x7f\xa8\xc8\xdd\x06Ph\xbc\b\xff\xa3\x94\x8f\xf7\x11\xabr\x80\xfc\xdf\xea\x8aMt\x9e\t\x876\x1e\xad\xdb\xc9\xca\xef\xdb\xd6\x16f|/\x0f\xaf9\xbf\xb2\xe7\x840'\x14zt8\v\xf4\xb8\xeb`\xc4m\xba\x0f\xaf/q~\xbe\xe9C\xee\xbd\xd0\xd6\xc0\x9e\x82\x88\x92\xeb\x17\xf1\xf6\xb5P~\xaf\xa4\a\xc4U\x9f\xb8\xe3\x9e\xf4\xec\xc8K\xbc7O\xe31\x93/N\xe0i;\xcf!߲\xd4FP\xf7\xf6\x9b\x9b\xd5\xd7u<\xf1\xc2\xff\x19ɿ\xb3u\xea\xb3\xe2-7\n\xc2z0\x16X\x8a\x9f\x0f^\x93\x8f0\x8c\x83\xb8#\xbf\x90\xe3\x8e\x7f\xecy;[e+\xee\x94<(\xd0C\xc1Y\x93\xbfSf\x988|\x90\xea\x8eW\a&\x1as{Q\xe5;\xaa\f\xb3\xa2\xec\xf0\x89!\xca\x04\xe5\xec\x9f1\xe5\xd4\xfeq\x1ePmmD~K@c\xec\x87w`-\xcdQo;\xaa\aKO\xd79Y\xf0\xd5\xe6t`X\xfb\x1b\xdb!\x16\xee\xf1\xc06\xe4\xa34\x10\x12_X\x17\xa6\xb5\x96@\x9b5\xec\xf7R\x19\xb7!\xba^\x13\xb6\xf7\xde{\x04\xaeU\x1c\x18\xc4q\xef\xe3\x11f\x9aL\xe7f\xb9\xc1@\xaa\xc2U\x13/\xf4.\xe8\xd9mw\xd0,\xab\xacE\xf4J\x1b\xca#\x06\xc97)j\f\x93\xd8\xf9\x02\xf9o)\xbb@\xdbv\xfdaX\v\xc19\xca\xe1U\x00\xce\x18\x8b\x9a\xa6\x04\xc3\xe0 ȓb\xc6X\x03\xa8\x9d\xd9H\x8c5y8'\xda\x1a\x01\x17ŷ\x88Sp\xdb\xf1L\x8bn\xfc\xb4\xae<\xa6\x1f\xfd\xe0\xf09\xb8\x1d\x92`4n\xe73J|[\xcb\xca\xecH\xc5\xc1\n\x95\x92\xd5\xe1\x18\xe4rĔ\x1d\x81\x9bW\x80\xb1@\xd4\x10:d\x95\x99J\x89\xd6\xces}\xbc\xb5A\x97f\x8f\xa3\x98\xfa̙\xf0F\xeb+\xff\xce\xc3z\xafd\xb1\xf6\xbc\xc0d\xb0\x1b\xbf\x1b\xac\x98\xb4ք9FIN\xdc\x13p\xfeBu\x14\x83\xb2\x04A\xa8\xf6\xf8$܃s\xf1\xea\xa1\rU&5\fpߩ<\x13\x01@\xc8q|\xef\xfdn\xb7\xbb\x0f\xe8ֿ\x80X\x03\xbe!\x9a\x89\xf0<\xac\xdbKw\xa2\xa0\x89\x14\xf8\x8a\x9cT\xf1<\xbf\x81K\xdfq\xe0\xbb\xe8\xff\xb1\xbe\xfb\xa9^\x13ߧ8\x81_z\xd5{g>\xf1-\xc0\xba\x8aw\xdc\"\xf4\xf8\x13ۻ\xd4\xc3\xccb\xfd\xe7\xff\xf5\xb3\x9c\xa7$'\xe3\xe5\xa4\x7f\x81\xaeC\xed(̼\xfcw\xc7\xc1Z>\x1a\xa0뺼\\䣞.\x8b\xba\\3\xe4\x12^.\xbeN \xe2tY\xb0\xe5\xd9\"-\xd7\x1d\xdd\x13\xc5\xd7R\xe7\xe6\xd8\xdf}\xb5H\xa8\xc5C\x88\x04[\"è\xc3/\xb3\xc1\x96V\xac%\xe08\xf2\xb8Y/\xfer\xa5hKt\x1d\x18|D\x05\x9a\xb7\xe6\xb6\xef\xc9\x7f\xf9\x9f\x00\x00\x00\xff\xffqN\x18=X}\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4YKs\xe3\xb8\x11\xbe\xebWt\xed\x1e|YQ3\x9bKJ\x97\x94FN\xaa\xa6\xe2\x89]#ǹ\xe4\xb0\x10Д\xb0\x06\x01\x06\x0fi\x94T\xfe{\xaa\x01\xf0!\x92\xb2\xe4\xa9$\x8b\x8bM\xb2\xd1\xe8\xfe\xfa\r\xcd\xe7\xf3\x19\xab\xe5\vZ'\x8d^\x02\xab%~\xf3\xa8\xe9\xc9\x15\xaf\xbfw\x854\x8b\xc3\xc7٫\xd4b\t\xeb༩\xbe\xa23\xc1r\xbc\xc7Rj\xe9\xa5ѳ\n=\x13̳\xe5\f\x80im<\xa3\u05ce\x1e\x01\xb8\xd1\xde\x1a\xa5\xd0\xcew\xa8\x8bװ\xc5m\x90J\xa0\x8d̛\xa3\x0f\x1f\x8a\x8f?\x17\x1ff\x00\x9aU\xb8\x84-㯡v\xdeX\xb6CexbY\x1cP\xa15\x8543W#\xa7\x13vքz\t݇\xc4!\x9f\x9e$\xff\x14\x99m\x12\xb3\x87\xcc,~W\xd2\xf9?_\xa6y\x90\xceG\xbaZ\x05\xcb\xd4%\xb1\"\x89\xdb\x1b\xeb\xff\xd2\x1d=\x87\xadS\xe9\x8bԻ\xa0\x98\xbd\xb0}\x06ษq\tqw\xcd8\x8a\x19@\x86&r\x9b\x03\x13\"\x82\xcdԓ\x95ڣ]\x1b\x15*ݞ%\xd0q+k\x1f\xc1L\xba@V\x06\x1am\xc0y\xe6\x83\x03\x17\xf8\x1e\x98\x83ՁIŶ\n\x17\x7fլ\xf9?\xf2\x03\xf8\xd5\x19\xfd\xc4\xfc~\tE\xdaU\xd4{暯\xc9FO\xbd7\xfeD\n8o\xa5\xdeM\x89\xf4\xc0\x9c\x7faJ\x8a(ɳ\xac\x10\xa4\x03\xbfGP\xccy\xf0\xf4\x82\x9e\x12B@\x10!4\b\xc1\x91\xb9|\x0e\xc0!q\x89\x18MK\xaaFg\x9d\x89M\xa2\xc0ˀK\x92\x9f\xded\xe9{l\x1b\xff.\xb8Ŗ\xa5\xf3\xac\xaa\xcf\xf8\xaevx\x89\xd9\x19\x14\xf7X\xb2\xa0|_U\xb2\x92\xea\xfb\xe5\xb9Z5\xf2B\xa4]g'ޟ\xbdK\xa7n\x8dQ\xc8\x12\x97Du\xf8\x98\xbc\x90\xef\xb1b\xcbLljԫ\xa7\xcf/\xbfۜ\xbd\x86)G\x1a\x04\x05\x19\x8e\xf5l\xb3G\x8b\xf0\x12\xe3/\xd9\xcde\xd5Z\x9e\x00f\xfb+r\xdf\x19\xb1\xb6\xa6F\xebe\x13,i\xf5rQ\xef\xed@\xa6;\x12;Q\x81\xa0$\x84ɏr\xbc\xa0Ț\x82)\xc1\xef\xa5\x03\x8b\xb5E\x87\xda\xf7\xe1m\x05+\x81\xe9,^\x01\x1b\xb4Ćb9(A\xb9\xeb\x80փEnvZ\xfe\xb3\xe5\xed\xc0\x9b\xec\xbc\x1e\x9d\x1f\xf0\x8c\xf1\xa9\x99\"W\r\xf8\x130-\xa0b'\xb0H\xa7@\xd0=~\x91\xc4\x15\xf0\x85\xfc]\xea\xd2,a\xef}햋\xc5N\xfa&\asSUAK\x7fZ\xc4t*\xb7\xc1\x1b\xeb\x16\x02\x0f\xa8\x16N\xee\xe6\xcc\xf2\xbd\xf4\xc8}\xb0\xb8`\xb5\x9cG\xd1uJ\x9a\x95\xf8\xd1\xe6\xac\xed\xee\xced\x1dEmZ1k\xbea\x01ʘ\xc9\v\xd2֤E\a4\xbd\"t\xbe\xfeq\xf3\f\xcd\xd1\xd1\x18C\xf4#\xee\xddFי\x80\x00\x93\xbaD\x9b\x8cXZSE\x9e\xa8Em\xa4\xf6\xf1\x81+\x89z\b\xbf\v\xdbJz\xb2\xfb?\x02:O\xb6*`\x1d\v\x13l\x11B\x1d㾀\xcf\x1a֬B\xb5f\x0e\xff\xe7\x06 \xa4ݜ\x80\xbd\xcd\x04\xfd\x9a:$N\xa8\xf5>4\xb5\xf0\x82\xbd&\xa3xS#?\x8b\x1f\x81NZ\xf2p\xcf<Ƹ\x18\xe0\x9aC\xfcr1m\xd6tp\xd3b\x9c\xa3s_\x8c\xc0ᗁȫ\x96\xf0L\xc6\x1am%],\x8bP\x1a;\xac\x18\xac\xcd\xc0\xfd\xd5d\xaab\xf4\ru\xa8Ƃ\xcc\xe1+2\xf1\xa8\xd5\xe9§\xbfY\xe9\xc7\a]0$\xad$\xe2\xe6\xa4\xf9\x13Zi\xc4\x15\xe5?\r\xc8[\b\xf6\xe6\betk\xedՉr\x90;i>ζ\xcdZ=}n2o\n\xa0\x1co\x19\xab\x02V9rM\t\x1f@HG\r\x80\x8bL\xc7`\xe9\xa0b\x83\xb0\x04oû\xd4\xe7F\x97r7V\xba\xdf\xd3\\\xf2\x98+\xac\aȭ\xe3I\x94\x9a\xc8;jk\x0eR\xa0\x9dS|\xc8R\xf2,I\xb0\xa9r\x95\x12\x95pcM/DYTŢ\xa0\xa8f\xea\x8a\r\xd7-a쀙\xd4Ƀ;\x061\xd9\xd8*\x97T\xedQ\x8b\xb6\x1b9\x93\xc6Ĭ\xe5P\xc0Q\xfa}J\x87j*\xee\xe0\xcdأ\xf5\x8a\xa7\xa9\xd7\x03ٟ\xf7H\x94\xa9\x80\"8\xe4\x16}\xf46T\xe4>\xe4J\x05\xc0\x97\xe0bB\x1d\xe6\x89f\xc5F\xad\xd9\xfd\x8a\xa71\xd0p\u0378\xb9\x85\xb9.\xf2\x1d\xb5\u038d\xc0\x16K\xb4\xa8\xfddR\xa7\x01\xc4j\xf4\x18\xf3\xba0\xdcQJ\xe7X{\xb70\a\xb4\a\x89\xc7\xc5\xd1\xd8W\xa9ws\x02|\x9e#h\x11NJŏ\xf1\xcf\x05\x95\x9f\x1f\xef\x1f\x97\xb0\x12\x02\x8cߣ%\xab\x95A5\x8e\xd6\xebo~\x8a5\xf6'\bR\xfc\xe1\xee{p1u\x8a\x9c\x1b\xb0\xd9D\xef?Q\xa3\x16\x85\"\x886\xc9*\xc6\x02UJ2v\x95\xad\x99r͔#Nu\x98\xfdE\x89\x89*\xc8TF}\xc5q2}#\xcc\x00\xbe\xcd;C\xcd+V\xcf\x135\xf3\xa6\x92|6\xd46\xb6\xc1W\"\xb2i\xbb\xa5\x16\x92S\xdbv\x1eI\xcd8\"κ\xf3\t\x18\x86\xfd\xfa\xa5\xfc1\rSR7W\xcf+\x12?\xf6i\xbb!.%\xb3\\\x11\x1dzj\xb7\x1ch\xa4\x8a\xc9\xec\x18\xe7\x98B\xb8њb\xd7\x1b`mb\xbcsÊ\xf0\xce|\xb2\r\xfc\x15'\x80\x1f\xa9\xf2)\x126\x18\xa7m$Kp\x18S\xf551\xe0zDp\xb6F{\x8b,\xeb\x15\x11\xb6E\x95\xc1z\x05۠\x85\xc2F\xa2\xe3\x1e5\xcd\x13\xb2\x14?\xfcf3\x93b\xce\xd3\b\x84\xe2+\x1e\xe4\xf8Nh\x8c\xee\xc3hG\x13\xf8m8\xd0\xc3/\xcdh\xbd\xb0\x99\xec\x97\t0J\xa9\xa8s\x9c\xc8\x13]\xc70\xbe\xbd\xfc\xb4y\xb8s\xb1\xe1G\xed\xa7\x9a\xc4#Z\x8c\xf3\x15\n\xea\xf9M\xbe\xc5\bΣ\x9dp\x80\xd6z\xd1栌\xde\r\x02'\xad|\xa7A\xfd\\r(cA\xa0\xa7Ҥw\xc0\xf7Lﰻ\xb3\xca\xf2\xbf-)\xb9\xcf\xc0g:\x0f\x91\xfa\x92{\xdcd\xd1g9\xd5ԏ\xee\x8b;\xe2\xe9\xbb\xe2F\xfaƲ\x17\x87\xa2+\xb8\x8f\xe8\x9b*M\xa0\xce}w\x7fܭ\xef\x1f\x86Ǘ\xd37 \xf1ޛ\xf37nA\xe0\xc8\\w\x87\xfe\xdb\xe1PQ\xb7z\xb5\x05\xfe\x92\xa8\xd2ec\xde\x02lk\x82\x7f+2\xef\xa6\x1c:\xff8\xf0\x1e\x19\xe3O\x1eך\f\xa2i,\u0083\xa5\xc1\xb3\xbbC\x8bIa\xaa\xb6\xdc~\x19\xb5\x1a\xfc2\xd3\xff6\xfe\xdd\xe6\x06\xbd&k\xed\xe8e\xaa\x97=\xbbf\x90\xfbo¶\xbdW^¿\xfe=\xfbO\x00\x00\x00\xff\xff\x80.\x12\xd3P\x1c\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4\x96M\x93\xdb6\x0f\xc7\xef\xfe\x14\x98y\x0e\xb9<\x92\xb3\xed\xa5\xa3[\xb3\xc9a\xa7mƳ\x9bɝ&a\x8bY\x8ad\x01\xd0[\xb7\xd3\xef\xde!)\xf9E\xb67\xdbCy\x13\t\x02\x7f\xfe@\x80j\x9af\xa1\xa2\xfd\x8a\xc46\xf8\x0eT\xb4\xf8\x87\xa0\xcf_\xdc>\xffĭ\r\xcb\xdd\xdd\xe2\xd9z\xd3\xc1}b\t\xc3#rH\xa4\xf1#n\xac\xb7b\x83_\f(\xca(Q\xdd\x02@y\x1fD\xe5iΟ\x00:x\xa1\xe0\x1cR\xb3E\xdf>\xa75\xae\x93u\x06\xa98\x9fB\xef\u07b7w?\xb4\xef\x17\x00^\r\u0601A\x87\x82k\xa5\x9fS$\xfc=!\v\xb7;tH\xa1\xb5a\xc1\x11u\xf6\xbf\xa5\x90b\aDž\xba\x7f\x8c]u\x7f,\xae>\x14W\x8f\xd5UYu\x96\xe5\x97[\x16\xbf\xda\xd1*\xbaD\xca]\x17T\f\xd8\xfamr\x8a\xae\x9a,\x00X\x87\x88\x1d|β\xa2\xd2h\x16\x00㱋\xcc\x06\x941\x05\xa4r+\xb2^\x90\xee\x83K\xc3\x04\xb0\x01\x83\xac\xc9F)\xa0\xbe\xf4X\x8e\ba\x03\xd2#\xd4p \x01\xd68*0e\x1f\xc07\x0e~\xa5\xa4\xef\xa0ͼ\xdaj\x9a\x85\x8c\x06\x15\xf5\x87\xf9\xb4\xec\xb3`\x16\xb2~{K\x02\x8b\x92ē\x88\x12\xd7\x06\x0ft\xc2\xf7\\@\xb1oc\xaf\xf8<\xfaSY\xb8\x15\xb9\xda\xec\xee*i\xdd㠺\xd16D\xf4?\xaf\x1e\xbe\xfe\xf8t6\r\xe7Z\xaf\xa4\x16,\x83\x9a\x94fp\x95\x1a\x04\x8f\x10\b\x86@\x13Un\x0fN#\x85\x88$v\xbaZu\x9c\x14\xcf\xc9\xecL»\xac\xb2Z\x81\xc9U\x83\\\xa0\x8d\x97\x00\xcdx\xb0\n\xd32\x10FBF_\xeb\xe8\xcc1d#\xe5!\xac\xbf\xa1\x96\x16\x9e\x90\xb2\x1b\xe0>$gr\xb1\xed\x90\x04\bu\xd8z\xfb\xe7\xc17\xe7s\xe6\xa0N\xc91?\xd3(\x97\xce+\a;\xe5\x12\xfe\x1f\x9470\xa8=\x10\xe6(\x90\xfc\x89\xbfb\xc2-\xfc\x961Y\xbf\t\x1d\xf4\"\x91\xbb\xe5rkej\x1a:\fC\xf2V\xf6\xcbR\xffv\x9d$\x10/\r\xee\xd0-\xd9n\x1bE\xba\xb7\x82Z\x12\xe1RE\xdb\x14\xe9\xbe4\x8ev0\xff\xa3\xb1\xcd\xf0\xbb3\xad\x17\x17\xa4\x8eR\xe8\xafd \x97yM{\xddZOq\x04\x9d\xa72\x9d\xc7OO_`\n]\x921\xa7_\xb8\x1f7\xf21\x05\x19\x98\xf5\x1b\xa4\x9a\xc4\r\x85\xa1\xf8Dob\xb0^ʇv\x16\xfd\x1c?\xa7\xf5`\x85\xa7+\x99s\xd5\xc2}餹\xa8S4Jд\xf0\xe0\xe1^\r\xe8\xee\x15\xe3\x7f\x9e\x80L\x9a\x9b\f\xf6m)8}\x04\xe6ƕ\xda\xc9\xc2Ծo\xe4\xebJ\xd1>E\xd49\x83\x19b\xdem7V\x97\xf2\x80M x\xe9\xad\ue9e2\x9d\xd1=\x14x{\xb6p\xbd\xa0\xf38\xb6\xc9\xf9\xca\xcd\xc3Cɝ%\x9c\xdd\xc2\x06.z\xee\xeb\\J3\xfc\x97dj'\x1e\xd9\xe8D\x84^N\xfa\xb3\xba\xb6\xe9\xad,\x90(\xd0\xc5\xecLԧbT^ze=\x83\xf2\xfbq#H\xaf\x04^\x90r\x19\xe8\x90r\x9fA\x03&]\xf0\x1b\xb1\x9c\xbe%\x91\x82F\xe6\xf6\xc2\xce\n\x0eW4\xbd\x92\x9d<|rN\xad\x1dv \x94\xf0Ff\x15\x91\xda\xcf\xd6ʛ\xf5\x1d\x04\xabls-\a\x87w\xfa\xbbI(\xb8}\x1a.#5\xf0\x19_\xae\xcc>\xf8\x15\x85-!ϯ|^\\Uz\x87\x9f\x817P\xbaz)/&9\xf7;sB\x91%\x90ڞr\xe5\xb4>\xf4\xef\x0e\xfe\xfa{\xf1O\x00\x00\x00\xff\xff\x045\f\xc6i\n\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VM\x93\xdb6\f\xbd\xfbW`\xa6\x87\xb43\x91\x9c\xb4\x97\x8eo\xad\x93\xc3N\xd24\xb3N\xf7NS\xb0\xc4.E\xb2\x04\xe8\xcd\xf6\xd7w@J\xfe\x94\xbd\xdeCu\x13\t\x82\x8f\x0f\x0f\x8f\xac\xaaj\xa6\x82y\xc0Hƻ\x05\xa8`\xf0;\xa3\x93?\xaa\x1f\x7f\xa5\xda\xf8\xf9\xf6\xfd\xecѸf\x01\xcbD\xec\xfb{$\x9f\xa2\xc6\x0f\xb81ΰ\xf1n\xd6#\xabF\xb1Z\xcc\x00\x94s\x9e\x95\f\x93\xfc\x02h\xef8zk1V-\xba\xfa1\xadq\x9d\x8cm0\xe6\xe4\xe3\xd6\xdbw\xf5\xfb\x9f\xebw3\x00\xa7z\\@㟜\xf5\xaa\x89\xf8OBb\xaa\xb7h1\xfa\xda\xf8\x19\x05Ԓ\xbb\x8d>\x85\x05\xec'\xca\xdaa߂\xf9Ð澤\xc93\xd6\x10\x7f\x9a\x9a\xfdl\x86\x88`ST\xf6\x1cD\x9e$\xe3\xdadU<\x9b\x9e\x01\x90\xf6\x01\x17\xf0E`\x04\xa5\xb1\x99\x01\fG̰\xaa\xe1t\xdb\xf7%\x95\xee\xb0W\x05/\x80\x0f\xe8~\xfbz\xf7\xf0\xcb\xeah\x18\xa0A\xd2\xd1\x04\xceD\x9d`\x06C\xa0`@\x00\xecw\xa0@9P\x91\xcdFi\x86M\xf4=\xac\x95~La\x97\x15\xc0\xaf\xffF\xcd@\xec\xa3j\xf1-P\xd2\x1d(\xc9WB\xc1\xfa\x166\xc6b\xbd[\x14\xa2\x0f\x18ٌ,\x97\xef@C\a\xa3'\xc0\xdf\xc8\xd9J\x144\"\x1e$\xe0\x0eG~\xb0\x19\xe8\x00\xbf\x01\xee\fA\xc4\x10\x91\xd0\x159\x1d%\x06\tRn8A\r+\x8c\x92\x06\xa8\xf3\xc96\xa2\xb9-F\x86\x88ڷ\xce\xfc\xbb\xcbM\u0090lj\x15\x8fr\xd8\x7f\xc61F\xa7,l\x95M\xf8\x16\x94k\xa0W\xcf\x101\xf3\x94\xdcA\xbe\x1cB5\xfc\xe1#\x82q\x1b\xbf\x80\x8e9\xd0b>o\r\x8f\xbd\xa3}\xdf'g\xf8y\x9e\xdb\xc0\xac\x13\xfbH\xf3\x06\xb7h\xe7d\xdaJE\xdd\x19F\xcd)\xe2\\\x05Se\xe8.\xf7O\xdd7?ġ\xdb\xe8\xcd\x11V~\x16\x99\x11G\xe3ڃ\x89\xac\xf9+\x15\x10\xd5\x17\xc1\x94\xa5\xe5\x14{\xa2eHع\xff\xb8\xfa\x06\xe3ֹ\x18\xa7\xec\x17\xe5\xec\x16Ҿ\x04B\x98q\x1b\x8c\xa5\x88Yy\x92\x13]\x13\xbcq\x9c\x7f\xb45\xe8N駴\xee\r\xd3(f\xa9U\r\xcbl(\xb0FH\xa1Q\x8cM\rw\x0e\x96\xaaG\xbbT\x84\xff{\x01\x84i\xaa\x84\xd8\xdbJp腧\xc1\x85\xb5\x83\x89\xd1\xc9.\xd4\xeb\xa4\xd5W\x01\xb5TO\b\x94\x95fctn\r\xd8\xf8\bj\xdf\xf9\x03\x81\xf5Q\xe6\xe9\xce\xcd\xe0Tl\x91OGO\xb0|\xcbA\xb2\xfdS\xa7\x8e\x8d\xe6G\xac\xdbZ\xbc\x82\x06 \xc5=~\xaa\xcf2^\xc6\x00\x93\xea\x9dD2\x8aXh\x10^\xc5\nĤ\x0e1\x9do-\x1f\xba\xd4OoP\xc1\xef\x19\xf3g\xdf^\x9d_z\xc7\"\xf7\xabA\x0fަ\x1eWN\x05\xea\xfc\v\xb1w\x8c\xfd\x9f\x01c\xb91\xaf\x86\x8e\x17\xef\ue5ba\x12\x98\xec\xc5}\xefQ\xfc\x1e/\x9ft\b\xb8)\xcb\r\x98\x86ț\x0e\xba\\ݽ\x86\xc2\v\xe1\xaf(ҝ\xdb\xf8\xe9\xb8\v\xed=~\xf9\x1a\x7fY\xab\xf2\x10\x18\xb5*K\xca݆\xf0)\xad1:d\xa4\xbd\xcd>\x19\xee&3\x02}S\xe6\x84H\xf9\x01\xa4\xd5\xe9\xd3K\xbe5B\x83\x16\x19\x1bX?\x97\x1b\xe9\x99\x18\xfbs\xdc\x1b\x1f{\xc5\v\x90˻b3!#\x97\xacUk\x8b\v\xe0\x98.\xa9l\xf2\xe0\xa1S4цGg\xfe*1S\xc2\xd85\xe3Ue\xc0\xc5{\xa3\x82/\xf841\xfa5z\x8dDx\xdeF\x17O2\xd9\x04g\x83$/\xac急\xe1\xe1>\x8c\xfc\x17\x00\x00\xff\xff\t\x15i;\xcd\r\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4YKs\xdc6\xf2\xbf\xebSt9\a\xfdSerb\xff\xb7\xb6\xb6\xe6f\xcb\xeb-\xed&\xb2ʒ}I\xe5\x80!\x9a$\"\x12@\x00p\xa4\xd9T\xbe\xfbV\x03\x04\x87\x0f̌F\xb5\xce\xf2\"\r\x1e\xdd?4\xfa\x8d,\xcb.\x98\x16_\xd1X\xa1\xe4\x1a\x98\x16\xf8\xe4P\xd2/\x9b?\xfc\xcd\xe6B\xad\xb6o.\x1e\x84\xe4k\xb8\xea\xacS\xedg\xb4\xaa3\x05~\xc0RHᄒ\x17-:ƙc\xeb\v\x00&\xa5r\x8c\x86-\xfd\x04(\x94tF5\r\x9a\xacB\x99?t\x1b\xdct\xa2\xe1h<\xf1\xc8z\xfbC\xfe\xe6m\xfe\xc3\x05\x80d-\xaeA+\xbeUM\xd7\xe2\x86\x15\x0f\x9d\xb6\xf9\x16\x1b4*\x17\xea\xc2j,\x88veT\xa7װ\x9f\b{{\xbe\x01\xf3\xad\xe2_=\x99\xf7\x9e\x8c\x9fi\x84u\xffJ\xcd\xfe(\xac\xf3+t\xd3\x19\xd6,A\xf8I+d\xd55\xcc,\xa6/\x00l\xa14\xae\xe1\x86`hV \xbf\x00\xe8\x8f\xe8ae\xc08\xf7Bcͭ\x11ҡ\xb9\"\nQX\x19p\xb4\x85\x11\xday\xa1\xdc*\x0e\x01 \x04\x84`\x1ds\x9d\x05\xdb\x1550\v7\xf8\xb8\xba\x96\xb7FU\x06m\x80\a\xf0\xabU\xf2\x96\xb9z\ryX\x9e\xeb\x9aY\xecg\x83x\xef\xfcD?\xe4v\x04\xda:#d\x95\x82q/Z\x84\xc7\x1a%\xb8ZX\b\xa7\x85Gf\t\x8eq\xfe\x94i\xc6~\x9e\xb6[\xc7Z=Ape\x90\xed\xb7\x06\b\x9c9L\x01\x18\xe4\t\xaa\x04W#I\xde+\x16\x13R\xc8\xca\x0f\x85\x9b\x00\xa7`\x83\x1e\"r\xe8t\x02\x99\xc6\"\u05ca\xe72\x12\x9d\xc0\xba\x99\x8d\x9e\x92\r\xad\xffo\xa3\x9a\x00\xbaU\xfc\x05P\xce\xe2\x1b\x16O\xb8~\x1d\x0f\x9dԏ\x1a\xfd\x9aȼӍb\x1c\r\xb1\xaf\x99\xe4\r\xd2\xcd2p\x86I[\xa29\x00#n\xbb\xdf\xe9)\x98/\x91\xdeh\xe6\x1ca\xf4\xb6s\xe7\x94a\x15\u008f\xaa\xf0\x0e\x8aT\xda\xe0D\xa7m\xad\xba\x86\xc3&r\x01\xb0N\x99\xa4\x82\x13Ⱛ\xa7\x1b\xc9\xce\xecl\xca\xf30\xfa\x11\xed\xe8O\xf3\x82lD(\x99\xb6\xa0w\x15\xa6\xad'Lo\xdf\x04wU\xd4زu\xbfRi\x94\xefn\xaf\xbf\xfe\xff\xddd\x18@\x1b\xa5\xd18\x11\xddg\xf8F\xc1a4\nSQ_\x12\xc1\xb0\n8E\x05\xb4A\a\xc3\x18\xf2\x1eC\xb8\x0ea\xc1\xa06hQ\xba\xb1H\xe2\xa7J`\x12\xd4\xe6W,\\\x0ewh\x88L\xbc\x98B\xc9-\x1a\a\x06\vUI\xf1\uf076%]#\xa6\rs\xd8{\xf1\xfd\xe7\x1d\xadd\rlY\xd3\xe1k`\x92C\xcbv`\x90\xb8@'G\xf4\xfc\x12\x9b\xc3O\xca \bY\xaa5\xd4\xcei\xbb^\xad*\xe1bP,T\xdbvR\xb8\xdd\xca\xc77\xb1\xe9\x9c2v\xc5q\x8b\xcdʊ*c\xa6\xa8\x85\xc3\xc2u\x06WL\x8b\xccC\x97>0\xe6-\xff\xce\xf4a\xd4^N\xb0.\x14#|>\x98\x1d\xb9\x01\ng ,\xb0~k8\xc5^\xd0\xd1\x1d}\xfe\xfb\xdd=D\xd6\xfe2\xe6\xd2\xf7r\xdfo\xb4\xfb+ \x81\tY\x92Y\xd3%\x96F\xb5\x9e&J\xae\x95\x90\xce\xff(\x1a\x81r.~\xdbmZ\xe1\xe8\xde\x7f\xeb\xd0:\xba\xab\x1c\xae|\xa6@\xee\xa9Ӥ\xb9<\x87k\tW\xac\xc5\xe6\x8aY\xfc\xe6\x17@\x92\xb6\x19\t\xf6yW0Nr拃\xd4F\x131E9p_\xb3\xbc\xe3NcA\xb7G\x02\xa4\x9d\xa2\x14\xbd\x87*\x95\x016_\x9eO\b\xa7\r\x97\xbe\xa4w\x9a/\x9a!{\x9f\xda\x13\xb1ɑO\x8d\x0e3\xac\\\x10\x05h\xe6^v\xd8cP++\x9c2;\"\x1c\x1cl\xbe\xa0p\xe0\x1a蓊\xe3\x89s\xdc(\x8e)ش\x15\\͂\xb6R~E\xfe\xa8\x93rɅ>%\xcf\x02\xa6\x15?\x81\xab\xe7\xc8\xc0`\x89\x06e\x81\xd1q\x1dK\x1e\x12\xc8\xc6a}\x89\xf1\xb0R\xc0\x11\xaf\x9eD\xfc\xee\xf6:z\xf2(\xc4\x1e\xbb[\xf2=!\x1f\xfaJ\x81\r\xf7\x81\xee4\xef\xcb\xeb20\xf3>\xcd)`\xa0\x05\x864p\b\x12 \xa4u\xc88\xa82I\x91j\x12 \xc37\xd8\xefx\x1d\xad\x032R\xa8\xca{b\x8a\x9a\xa5\xa0l\x86Ҙ\x10\x8b\xbd6.\x82y\xfcl\x17\xd4\xc7)(j&+\f\xe7E(;\x8a\x8e\xf9\xe5K\xecx\x99\x92\xc4/\x91\x9a\xcc\x1d\xc7\xff,\xb8?\xf3p>\x83~\xc6\xe1\xc6U\xc6\xd1\xc3=t\x1b4\x12\x1d\xfa\xf3qUX:Z\x81\xdaٕڢ\xd9\n|\\=*\xf3 d\x95\x91jfA\a\xecʗ\xa9\xab\xef\xfc\x9f\x17\x9f\xc5W\xb4\xcf=Ф\xd2\xfe\x96\xa7\">v\xf5\xa2C\xc5\x1c\xf6\xf9q\xec\xf2\xaeϬ\xe6{\xc9,\x1ekQԱ8\xe9}\xec\x01c\x12\x94\t\xf3\xe0\x9a\x99\xdc}sU&\x81v\x86\x10\xed\xb2\xbe\x97\x961\xc9\xe9\x7f+\xac\xa3\xf1\x17I\xb0\x13\xcf2\xdf/\xd7\x1f\xfe\x1c\x05\xefċl\xf5@\x02\x1e\xbe\xa7l\x0f+k\x99\xce\xc2j\xe6T+\x8a\xd9j\xcaJ\xaf9\t\xbe\x14hN\xa4q\x9f'\x8bc\xa2\x99\xc8o\x875g呎U\x89\xc4m\xdc:<\x96\xde\x1d\x95״q\xc3*\v\xcc 0h\x99\xa6{~\xc0]\x16\x12\x02\xcd\x04Es\n\xd8CW\x04\x98֍H\x06\xee>\xec\xf7)k/\t*\xcbYe\x0f\x9d=yk\xb1\vt\xa5d)\xaa\x13\xf7\xf0e\xb2x\xc8\tlL;JQuf_J\x8d\x9bS\xe7&͚\x19\xd64\xd8|\x14\r\xda\xc0\xf7\x19\xf6s\xbb\xdc5\x94%]\xbbACB/ir`p\xc0\xef\x04\xdc>\xbf\xd2hJe\xdaВ\xedl,\x17\x0e\x9fl/r!\x1dVC\a\xee\xac\v\xb9\xa7%ϻ\x0eZ\x1a\x8fy\xa21\xe8ꔣ\x9d\xb4\v\x97\xe7AٵK(\x19<(-Xbܠu\v\x83\xa7\x89W\xcbD\xf1\x88\xf5\x04\xe5>!\x83\xbe_\x9f\xa8={\xdb\b\x89\xfaP\x7f\xfa\x16m\xaa\xc0;\\\xdd\x1d\x84h\xf0\xb7\x8eʎ)\xc4,\xdd\t\x98\xad\xa1\xcax6\xa4\x15\xbf\x98\vr\xec\xf7f\x93\x936\xf2\x18\xe9\xb2=\xe2_\a\xceh\x90\x84W\x8f^\xa6!\x1c\xba\xf8\x16B\xb5\xe0K[$\x85\xa22k\xd2b=q\xbdW\xcb\x1d\xbe\x1bix\xaf\xee\xa2%w:z#\xe9y\xa4z\x1c0\"\x17v\xfa\x04\x84\xa8!\xf75\x10\x95h%\x13\rr\x88\x0fa\xf3=\t\xaac*\x1b,)Z\aӋ\xae\xa2\x877\xd4\x195\x82\xf5m\xbeK{\x84fg\x91{?\x9a\x10²\xf6 \x17\xc5\\hKgI\xa2\xb2k\x1a\xb6ip\r\xcet\xcb\xe9#\x96آ\xb5\xac:e\x8a?\x85U\xa1\xe1\xd2o\x01\xb6Q\x9d\x1b:.\x93xui{\x9d:\xaf\xe9\x93\xece̢\x00՚\xb6\xaf\xb9\x9a\xc6\xef\x19;\x82\xfd#\xa9G\xb5\xc1t\xce\xf6\x12\x9f\x00\xe0_\xffN!\xa45)\x03\x1b\xbc\xd7Q\v\x83#N\xf9\x06\x1f\x13\xa3\x8bW\xcb\xf1\xe4U4\x99\xc4\xdcGo\rg\x9d\xbfgtJ\x04\xfd2\xa8U\x13\x8dY9\u058c\x02\xf5f\xe7\xd0N\xddy\xaa\xbd\xe6\xcb\xf2\xbd\x18G\xfb\xe3\xfd\x05J}\xa7\xa1`ҷ\xc1ɺ\x9c\x02.\xacn\xd8.\x95\x82D\x84\x94z\x93q\x91\v\xd8\xebs4j\x8d!\xe597\xc3\xf1\x98>(y\xa0P\x8c\xf6,\xa4\xfb\xeb_^\x90b@\x10\xe7{\xe2\xf2m8\x1cIb\xacd\xda\xd6\xca]\x7f8\xa1\x05w\xc3\xc2h\r\xfb\x1c~\xc8!#\xb5^\x15RW5\xf8\x96\xb3Lu\xfa^~\n\xead\xf1\x89(Կԧb\xd0\x1dR\xea\xe90<\xe9\\\xcd\xdf\x1c_\x83\x15\xbeOM\xa5@\xa8\rB\x17\xc9Rp\xa2\xd4J\x19L\xb8LX\x86\x95I\x10\x99\xc2\xff3\xe3GRO\x16\x83\x1e9\x1f\xd1\xee\xdf:\xc6#\xddfx\xc7[\xc3\xef\x7f\\\xfc'\x00\x00\xff\xffo\x05\xb2\xa4Q#\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4Y_\x93۶\x11\x7fק\xd8q\x1e\xae\x991\xa9\xd8\xedt:z\xb3\xef\x9aε\xc9Yc\x9d\xfd\x92\xc9\x03D\xacHD$\x80\x02\xa0tj&߽\xb3\x00A\xf1\x9f\xa4\xd3M.\xe1\x8b}\xc0b\xf1\xc3\x0f\xfb\x0f\xab$IfL\x8b\xafh\xacPr\x01L\v|r(\xe9/\x9bn\xffaS\xa1\xe6\xbbw\xb3\xad\x90|\x01\xb7\xb5u\xaa\xfa\x8cV\xd5&\xc3;\xdc\b)\x9cPrV\xa1c\x9c9\xb6\x98\x010)\x95c4l\xe9O\x80LIgTY\xa2Ir\x94\xe9\xb6^\xe3\xba\x16%G\xe3\x95ǭwߥ\xefާ\xdf\xcd\x00$\xabp\x01Z\xf1\x9d*\xeb\n\rZ\xa7\f\xdat\x87%\x1a\x95\n5\xb3\x1a3R\x9e\x1bU\xeb\x05\x1c'\xc2\xe2f\xe3\x00z\xa9\xf8W\xaf\xe7s\xd0\xe3\xa7Ja\xdd\x7f&\xa7\x7f\x10\xd6y\x11]ֆ\x95\x138\xfc\xac\x152\xafKf\xc6\xf33\x00\x9b)\x8d\vx (\x9ae\xc8g\x00\xcd9=\xb4\x04\x18\xe7\x9e9V.\x8d\x90\x0e\xcd-\xa9\x88\x8c%\xc0\xd1fFh\xe7\x99i\xf5\x80ڀ+\x90\xb6\xf4\xac2!\x85\xcc\xfdP\x80\x00N\xc1\x1a\xa1A½2\x80_\xac\x92K\xe6\x8a\x05\xa4D\\\xaa\x15Oe\xd4\xd9\xc8\x04\xce\x1f\x06\xa3\xee@\xe7\xb0\xce\b\x99\x9fB\xf6;\x83\xea\xe1Y*\xfeL$\x8f\x05z\x99\x88\xa6֥b\x1c\rm^0\xc9K\x042Pp\x86I\xbbAs\x02E\\\xf6x\xd0}$_\xa2\xbe\xce\xcc5\xec\\CE\x90\xedm\xff\xb5;tiߥ\xe2\xcd\x02h\x8c\x1a\xacc\xae\xb6`\xeb\xac\x00f\xe1\x01\xf7\xf3{\xb94*7h\xed\x04\f/\x9e\xea\x82\xd9>\x8e\x95\x9fx]\x1c\x1be*\xe6\x16 \xa4\xfb\xfb\xdfNck\x16\xa5N9V~<8\xb4=\xa4\x8f\xc3ဖ\x9c-o\xae\xffO\x81\xbb&HwJ\xf6y\xfd8\x18\x9d\x02\xdbQ\x1a\xe3m\x9a\x19\xf4\xa1\xf6QTh\x1d\xabtO뇼\xaf\x8f3\x17\x06\xc2\xf4\xee]\beY\x81\x15[4\x92J\xa3\xfc\xb0\xbc\xff\xfa\xd7Uo\x18@\x1b\xa5\xd18\x11\xa3k\xf8:ɣ3\n}foHa\x90\x02NY\x03mp\x8a0\x86\xbc\xc1\x10\x9cEX0\xa8\rZ\x94!\x8f\xf4\x14\x03\t1\tj\xfd\vf.\x85\x15\x1aR\x03\xb6Pu\xe9#\xd0\x0e\x8d\x03\x83\x99ʥ\xf8_\xabے\xefѦ%s\u0604\xf8\xe3\xe7c\xb0d%\xecXY\xe3[`\x92C\xc5\x0e`\x90v\x81Zv\xf4y\x11\x9b\u008fd!Bn\xd4\x02\n\xe7\xb4]\xcc\xe7\xb9p1if\xaa\xaaj)\xdca\xee\xf3\x9fX\xd7N\x19;\xe7\xb8\xc3rnE\x9e0\x93\x15\xc2a\xe6j\x83s\xa6E\xe2\xa1K\x9f8ӊ\x7fc\x9a4kozXGN\x17>\x9f\xeb\xce\xdc\x00%;\x10\x16X\xb34\x9c\xe2Ht\fٟ\xff\xb9z\x84\xb8\xb5\xbf\x8c!\xfb\x9e\xf7\xe3B{\xbc\x02\"L\xc8\r\x05]\xbačQ\x95\u05c9\x92k%\xa4\xf3\x7fd\xa5@9\xa4\xdf\xd6\xebJ8\xba\xf7\xff\xd6h\x1d\xddU\n\xb7\xbe\x92\xa0xYk\xb2\\\x9e½\x84[Vay\xcb,\xbe\xfa\x05\x10\xd36!b\x9fw\x05\xdd\"h(\x1cX\xebL\xc4\n\xe6\xc4}\r\xab\x92\x95ƌ\xae\x8f\x18\xa4\xa5b#2\xef\x1b\x14~\x80\x8d\xe4Ӟ\xeaiץoͲm\xadWN\x19\x96\xe3\x0f*\xe8\x1c\n\r\xb0}\x9cZ\x13\xc1\xc9N\xce\v\xca\xc1\x06ɑR\x802.\xde\x17h\xb0\xbbƠVV8e\x0e\xa48d\xcbt\xa4\xe1\xc4E\xf8#+~\xe1\x18\x14\xee\xbdC\x18ܠA\x99a\x8c\x10\xe7*\x99\x89St\x12\xfa\x18\xe2i\xea\xe1L\xf4\x9c\x04\xfcay\x1f#fd\xb8\x81\xee\xc6\xfb^\xa0\x87\xbe\x8d\xc0\x92\xfb\x84ry\xef\x9b\xfbM\xd8\xcc\xc7\x0e\xa7\x80\x81\x16\x18*\xd26\x18\x83\x90\xd6!\xe3\xa06\x93\x1a\xe9m\x00\xe4`\x06\x9b\x15oC\xa4hB\xd21\x84\x13\xf5\xc0(F\t\x0e\xff^}z\x98\xffk\x8a\xf9\xf6\x14\xc0\xb2\f\xad\xf5\xf9\x1a+\x94\xeem\x9b\xb39Za\x90S\xe1\x82iŤؠui\xb3\a\x1a\xfb\xd3\xfb\x9f\xa7\xd9\x03\xf8^\x19\xc0'V\xe9\x12߂\b\x8c\xb7\xe1/ڌ\xb0\x81\x8eV#\xec\x85+\xc40i\xb5\f\x90u5\xc7\xde\xfb\xe3:\xb6EP\xcdqk\x84Rlq\x01o|%x\x84\xf9+9\xd6ooNh\xfdKp\xa07$\xf4&\x80k\xf3]\xd7#\x8f ]\xc1\x1c8#\xf2\x1c\x8f\x85\xe8\xf0\xf3\xc1\x9bBⷠ\f1 UG\x85WL\xb7\x17\xe2\x11\xf2\x11\xe8\x9f\xde\xff|\x12q\x9f/\x10\x92\xe3\x13\xbc\a!\x037Z\xf1oSx\xf4\xd6q\x90\x8e=\xd1NY\xa1,\x9ebV\xc9\xf2\x10\xaa\xfd\x1d\x82U\x15\xc2\x1e\xcb2\t\xf5\x06\x87=;\x10\v\xf1\xe2\xc8\xde\x18hf\xdcYk\x8dU\xc6㧻O\x8b\x80\x8c\f*\xf7\xf1\x8e\xb2\xd3FP\xd5@\xe5B\xc8y\xde\x1aGI3~\xb6\x0e\xe6\xe3\x14d\x05\x939\x86\xf3\"lj\xcaB\xe9\xcdK\xfcx\x9c\xfa\xe37Q\x02\f\x03ǟ\x96D\x9fy8_\xa9>\xe3pݷ\xd6\xd9\xc3m\xeb5\x1a\x89\x0e\xfd\xf9\xb8\xca,\x1d-C\xed\xec\\\xed\xd0\xec\x04\xee\xe7{e\xb6B\xe6\t\x99f\x12l\xc0\xce\xfd\x93y\xfe\x8d\xff\xe7\xc5g\xf1\xaf\xeb\xe7\x1e\xa8\xf7\xe8\x7f\xcdS\xd1>v\xfe\xa2C\xc5Z\xf1\xf9y\xecf\xd5\x140õ\xe4\x16\xfbBdE|\x0441\xf6\x843\t\xaa8y\b\xcdL\x1e^ݔ\x89\xd0\xda\x10\xa2C\xd2\xf4\xb4\x12&9\xfd\xdf\n\xebh\xfcE\f\xd6\xe2Y\xee\xfb\xe5\xfe\xee\x8f1\xf0Z\xbc\xc8WO\x14\xba\xe1{J\x8e\xb0\x92\x8a\xe9$H3\xa7*\x91\r\xa4\xa9\xf6\xbb\xe7D\xfcF\xa0\xb9P\xc5}\xee\t\xc7*t\xa2\x8ale\xae*#\xadd\xda\x16\xca\xdd\xdf]\xc0\xb1j\x05#\x86\xe3u5\xc5c\xd45h\x02]\x87\xc7\xfb\xcb\xc3\xe9@\xd2\a\u0557\x8eȔ\x11\xb9O[\xad\xef\xfbW\x84d\x15\xeb6\xff\xba_Ŵ\x162\xbf\nk\xb7\x97v\x01藎hDy\xa1\x9b\xe7\x8a)\x9c\xbd\x1e\xdf\x18-ʺ\x1aCI`\xab\xb4`\x13\xe3tG#\xfb\xa4\x897\xe3\xba\xe6\f\x13\xc1\x00.pд\x9e&\xdeQ\x8d\xfd\x84\xbaҏ\xd0\xdb\xc5[\xd1t@\xbe֮\xe8\xd9MEr\x1fa2\xfd:\x1c\xc8h\xc5gCҺ.9\x98<:\xd4p\xa2o\xab\x83\xd9^K\xb4{\x9a\xf1\xc3\xda\xf7ۮyZ\x87\x1e_\xc3{\x88\xf0.v\xfe\xe8y\xf3\xe2\xc7u\xa6\xe8\xe9\xd0k\xcf]\xb0\x81\xdb\xf1\n\xdf\xc92\xbc\xf1\tQ\xa1\x7f\xb1\x86\xf6\xe4\x9eٸ\xc9\xd4}CG_X\xea\xb3*\xa9C\xee\v{zwl\x98(\x91C\xfb+\x8bo\xa5[\xdfҹ\x99\xaac\xa3\xa2\xda\"\xf7qc\x02\xf4x]\xec\x92r\xe60!\x15#\tY\x97%[\x97\xb8\x00g\xea\xf1\xf4\x19\xf7\xaa\xd0Z\x96_\xf2\xaf\x1f\x83Tx\xf37K\x80\xadU\xed\xdaG\x7f\xe3h\r\x157\xb6\xb1\x82\xeb\x1a\x0f\x05\xb3\x97\xa0,If\xca\xe2Z\x97?orp&\x94=\xe0~btԵ\xeeN\xdeF\x13\x9a\x98\xfb\xde[\xc7U\x044\x1b]\xe2\xa0\x11\x83B\x95Ѻ\x95\xa3\xa4TWk4D\x84o\x95GFb\xe0\x98\xea\xa2\xf8\xd7בɣ\x86\x18\v\x83\xaa\xe6=\x991雊d\xbfN\x01\x17V\x97\xec0\xa17\x9e\xc4\x17Xd\xbe\xe4GG\x8b\x89^H\xee\xef\xe7\xae\xed\xfe\xb4?\x05L\x97\x7fS?,L\xddB\xf7W\x82\xc1|\xfb\x1b\xc8\xeb\xecp\xa6䳎\x19\xf7ܰ\xb7\xea\t_\x8ax^\xf5t\xbc놮q\xa0\xeao\xf3GƨI\xa2F\x83\x1e9\xef\xe8n:\xa7ݑz\xdd\xfe.\xb0\x80_\x7f\x9b\xfd?\x00\x00\xff\xffg\b\x17r\xc1\x1f\x00\x00"), - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xdc]O\x93\xdb:r\xbfϧ@9\a'U#y\x9d\\Rs\x9b\xf8\xd9Ye\xf7\xd9Sc\x97\xdf\x19\"[\x12vH\x80\x0f\x005VR\xf9\xee\xa9n\x80\xe0\x1f\x81$\xa8\x19\xbd\xf5\x06\xb7\xa1\x80\x06\xd0\xddht7~\xc0\xacV\xab\x1b^\x89\uf80dP\xf2\x8e\xf1J\xc0\x0f\v\x12\xff2\xeb\xa7\x7f7k\xa1\xde\x1d\xdf\xdf<\t\x99߱\x0f\xb5\xb1\xaa|\x04\xa3j\x9d\xc1/\xb0\x13RX\xa1\xe4M\t\x96\xe7\xdc\xf2\xbb\x1bƸ\x94\xcar\xfcl\xf0O\xc62%\xadVE\x01z\xb5\a\xb9~\xaa\xb7\xb0\xadE\x91\x83&\xe2M\xd7\xc7?\xad\xdf\xff\xeb\xfaO7\x8cI^\xc2\x1d\xd3`\xac\xd2`\xd6G(@\xab\xb5P7\xa6\x82\fi\ued6a\xab;\xd6\xfe\xe0\xda\xf8\xfe\xdcX\x1f]s\xfaR\bc\xff\xd2\xfd\xfaWa,\xfdR\x15\xb5\xe6E\xdb\x19}4B\xee\xeb\x82\xeb\xf0\xf9\x861\x93\xa9\n\xee\xd8g\xec\xa6\xe2\x19\xe47\x8c\xf9\xa1S\xb7+?\xea\xe3{G\";@\xc9\xddx\x18S\x15\xc8\xfb\x87\xcd\xf7\x7f\xfb\xda\xfb\xccX\x0e&Ӣ\xb2\xc4\x00?6&\f\xe3\xec;\xcd\r\a@\xbcf\xf6\xc0-\xd3Pi0 \xada\xf6\x00\x8cWU!2bu\xa0ȘڅV\x86\xed\xb4*[j[\x9e=\xd5\x15\xb3\x8aqf\xb9ރe\x7f\xa9\xb7\xa0%X0,+jcA\xaf\x03\xadJ\xab\n\xb4\x15\rc]\xe9\xa8K\xe7\xeb`.oq\xba\xae\x16\xcbQO\xc0\rٳ\fr\xcf!\x1c\xad=\b\xd3Nm8\x1d?%.\x99\xda\xfe\r2\xbbf_A#\x19f\x0e\xaa.rT\xaf#hdN\xa6\xf6R\xfcw\xa0mp\xa2\xd8i\xc1-xy\xb7EH\vZ\xf2\x82\x1dyQ\xc3-\xe32g%?1\r\xd8\v\xabe\x87\x1eU1k\xf6+\x89G\xee\xd4\x1d;X[\x99\xbbw\xef\xf6\xc26\xcb$SeYKaO\xefH\xe3Ŷ\xb6J\x9bw9\x1c\xa1xg\xc4~\xc5uv\x10\x162[kx\xc7+\xb1\xa2\xa1KZ*\xeb2\xff\xa7 \xb6\xb7\xbd\xb1\xda\x13j\x9e\xb1Z\xc8}\xe7\aR\xf3\t\t\xa0\xc2;]rM\xdd,ZF\xe3'\xe4\xce\xe3ǯߺz&̐\xfb\xc4\xf7\x8e\xf2\xb5\"@\x86\t\xb9\x03\xed\x84Hچ4A\xe6\x95\x12\xd2\xd2\x1fY!@\x0e\xd9o\xeam),\xca\xfd\xf7\x1a\f*\xb4Z\xb3\x0fd;\xd8\x16X]\xe5\xdcB\xbef\x1b\xc9>\xf0\x12\x8a\x0f\xdc\xc0\xd5\x05\x80\x9c6+dl\x9a\b\xbafoX\xd9q\xad\xf3Cc\xbcF\xe4\xe5W\xff\xd7\n\xb2ފ\xc1fb\xe7\x979\xdb)\xdd3\x0e\xd8d\xdd#\x1a_\xb4X\xdc\xeaG\v6\xfce0\x94\xff\b\x15Q\x7fp\x10\xb5\x14\xbf\xd7@&έX83)g$Y3>R\x8b\xf5\xd9\xef#<\xc5\x02?\xb2\xa2\xce!\x0f\xd6\xf6l.\x83\x11\x7f\xe7d\n?]I\xb1\x95#|\x8cX;\xfa\xb7\x13\x9b \xc9P͟\x0f\";8'\x00u\x93\xe8\xb0\\\x81!Á\x8e\xeail\x92lN\xf6\xbe\x93)\xd3і\x9955\xa4\x173'mI0\xb7m\x991\xbcg\x86\xc5\x7f\x8f\xee\x9cm\xf9\xff\xc9\xd8f'\xb9@i7gM_Wi)\xa8Bg\x7f\xb3cPV\xf6t˄m\xbe\xceQ\xe4E\xd1\xe9\xff\x1fX0\xcb5~3l\xf9\xaa\x1a?)\x959\x8a(\x95\xd0\xfd?\xa0Ph\xb3\xf8\xea\xf7\x8ad\x81\xfc\xb5\xdbꖉ]\x10H~\xcbv\xa2\xb0\xa0\a\x92y\xd1zy\rf\xa4\xecwXJn\xb3\xc3\xc7\x1f\xe8٘6ϔȗac\xe7\x1271B\x7fc\x9e\xa1\xcb(|\x15\x1aJ\x17\x16\x7f#n\xb6_(\x9e\xb8\xff\xfc\v\xe4S\xecai\x9aw6\x91\xfb\xc1`\xbb]{??u\x1a\xde\xf5\t1\x93Kx\xdc2Ξ\xe0\xe4<\x16.\x19\n\x87[\xf2w\xa3\xd1\xd39s(\xf3BJ\xf6\x04'\"\xe3S)\xb3\xadSU\xc1\x95'\x88\xb8\xfb\xb1\xd2c \x8e\xc9\a\xb8\x8e\x93\xf8\x81\x18A\x81w:\xf3\x18\xa5\xc5\x1a[4?9\x96nH\x9a\xd2\xf0\xfe\x82i\x06\xb1u҇$طƉ\bW\xc1AT\x89\x13\xa5\xec\xa1\x01Z-Mb\xec;/D\x1e:rz\xbf\x91\xe3\xdep\xbf|Vv#o]DfHK~Q`>+K_\xae\xc2N7\xf0\v\x98\xe9\x1a\xd2\xf2\x92\xcel#\x1f\xba\x19\xb6\x04\xe5ve\xe3\x12)A<°\x8d\xc4\xc0\xc5\xf3\x83\U000a5bbb\xe9\xfd\xa1_\xca\xdaP\nM*\xb9\xa2\xadr\x1d\xeb\xc91;\x91\xa4\xd2=\x89\x9c\x0f-t\xea:L$\xfb\rw\x12\xd7\xdee\x80\v\x9eA\xdeD\x9b\x94\xb7\xe4\x16\xf6\"c%\xe8\xfd\xd4\xc6\xd1-\x15\xda\xf7\xb4!$Z]W\x16jX\xda\xd6\xde\x14o\xba\xf3\xf9\xc1\xacp\xe5&\xd4j\x84=[u$]9^u~F\xb4Œ\xff1\xcb]\x9e\xe7t\x84ċ\x87\x05\x16\x7f\x81,\xce\xf7~70\xb7C\x96\xbc\xc2\xf5\xfb?\xb8͑B\xff/\xab\xb8\xd0\tk\xf8\x9e\x8e\x89\n\xe8\xb5\xf5\x89\xb1n7\u06030\f\xe5{\xe4\xc5y\"<29\x85\xb6\x05\n\xb7\x91\xabݙ\xc7r˞\x0fʸ=u'\xa0\x88\xa5l\xfaE\x18\xf6\xe6\tNon\xcf\xec\xc0\x9b\x8d|\xe36\xf8\xc5\xe6&x\vJ\x16'\xf6\x86ھy\x89\x13\x94\xa8\x89\x89\xd5~\xac\x9eBJnU\xf2j\xe5\xb5תRd\xa3\xedd4=ޖ\x9e:uS\xe4mnܻ\xc7S\xb3M\xd2\xdfJ\x19\xfb\xe7x\xa2od<\x0fM\x8b\xbeO\x1bɗ\xcd\xfa\xfa>\xf7\x15\x8c1z\x80;\v\xda'\xff\x9c\x81n\"\x87\x17\xc6Tsɽ\x90\xd8\xe3!!\x8b\f\x9e\xd1&wT\x922\xc4%\xde&\xf2e\xa1\x9f\xfe\xf1G'7\x89+\x1b\xff\xeeN䵽\xe1L\x95%\x1f\x1e\x0e&\r\xf5\x83k\xd9\xe8\xb4'䤯\xf75\xad\xe7t7\xb1\xd1!:\x16|\x16\xf6 $\xe3\x8d\xd9\x00\xed\x15\x8a\xb3J\xcd[0W\x0eܰ-\x80\f9\xf5\x9fa\x9f/\x85\xdcP\a\xec\xfd\xab\xfb\x05\xace\xd7E\xe2lX\x1d\x04\x1a>\xd0N\x95\xeaR\xa9\x9c=\x1f@CO+\xce\x13\xe5\xe8i&\x92\x94\xcav\xf3\x11H\xb7R\xf9[\xc3vB\x1b\xdb\x1dh\xaa\xc2\xd5&U\x1d\x16J\x18g\xf7M\x94\xa0j{\x81\f>\xb6\xad{\xe7\xba%\xff!ʺd\xbcTu\x82S\xe0\n\xee/\xa2\f\x87\xaf^\x02\xcf\\\xd8p\x0eE\x99\x19\xabPJU\x016U\xc4[ء9ʔ4\"\a݀\x03\x9cd\x85\u0085\xbb㢨c\xc7>\xb1\xb24\xbc\x95\x1f\xb5\xbe(\xba\xfd\xe2Zv\xb2\x8d\a\xf5\xdcgP2\v\x0e\xfc\bL옰\fd\x86r\x01\xedL6u\xe1\x99A\xacIV\xcb4\x03\x8f\x05d]\xa61`E+[\xc8\xc9dZ\xb7\xfa'.\x8ak\x88\r5\xef\x93ҏ\xc0\xf3K\x120\xbfu\x9a3\x90\xa6\xd6tp\xef\xcc˳(\xd2ƌ\x92c\x05\xafev\x00\xb2S\xb2g>\x98#/\xa4\xb1\xc0Su\x01\xbd\xa6ZJ!\xf7i\xb2KNq\xb6űz\xabT\x01|\bx\x8a\x15\xe4\xf5\xe5f跶\xf5\x1fb\x86\x82\x04\xd2݅-xQy[ĭ\x85\xb2r\xebM1]\xcb\xee\xees\x05+\xb4$\x06\xf7\xa3x\xcd\xe0ZH\x91 \xd8\xc1\x99\x8b\xb0]\xcf\x12I\\ճ\xc4\x0e\x82SqI\xfal\xd3#\x80\xab\xb3\tRh\xecAk\x16x\x99[`<\xcf!w\x89ItU|\xcc\xe2\xe0e#P\x85\xe8얻\x89I\x92mJ/\"\xa5T\xac>ª\x96OR=\xcb\x15E\xf2f\xb1\x01IO\r\xbej\xf7\xf6bK\xf4GZ\xa1\xbe\xbe\xa6\xebT\xe3<]\xc1\xca$\xeb͢lȔ\x16\xcc\xd95\a]\x1e\xf9qv\x14S\xfdO4\xf6\a\xcd\x1f\x1c\xe68\x15϶\x89\xb7\xea8\x7f\xcf\a\xb0\a\xd0\r\x98yE\xb8혝nϣ\xdb8&\x00\xdcP\x7f\x1aW\xd8\x01/\a\x90\xb7x\xa0\x83^\xc0-*6\xaf\v\xeb\xe0Ǻ\x8e(Q\x12\xf0+\xee\x19\xa4 '\xe6\xf0\x12}\f`\xc0+4 @\xd5t\x12\x99\xa1\x93\xa5C\xfav\x0f\xe3\xfb\xc0\aJ\xf95#\xfd\xbb\xc3\x03\x130\r3H\x86i\xd0\xe4\x14\xbf\xceզ˱V\a}=\x8f\xa6\xfd\xb9\xd8g\xa1\xfcR\xf9u0\xea\x80\xf69\x18i2\x80\x83\x90\xe5Ɛ\x9d\x80\x05\\Č\v\xaeB\x9f\x0eD\x8a\xf7\x19\xadD\xd5\x106\x94j\xf6\xabͣۅa\xef\xd9A\xd5\x11H\xdd\x04wf\x00\x16\xe3\xb0\n\x7f\x88\x00\x96\x1f߯\xfb\xbfX\xe5A\x16\x94\xf9\x8a̎\x02\x956\x9b*d.\x8e\"\xafy\xd1[d\x1d\xb5h\xb5\x87)ͤ(b竨VM\xfb\x9e\x1a\xb1/\x95;gYl\x8e\xa6]\xc44,\xc6\xc5\b\x8c>\xc2bd\x93Zz\xe4\x90\x0e5M\xc7XL\x83\"\x96 +\x86\xb8\x89Q\xa2\xf3x\x8a\x14\xef~\x06;q\x01b\"\x11-\xf7\xe2\x03\x92\x14L\xc4EH\x88Y@Y\"\xfe\xa1\x8fl\x98&\xb9\x00\xf5\x90Ĝy\x84\xc3b\\\x83\xc7\x11L\xce#\x19\xcd\x10\xc1)L\x12\x1e\xc50L\xa1\x13\xa6Y\x1eA.\xa4c\x12&I\x13^a\x1e\x89\xf0zx\xc3\u05c8\x02\xc6M\xcd,\x9a\xe0EQB\x02^`\tJ`\x96c\x17\"\x02\u0089\xffH\xbfKq\x00\xfds\xfe\x11\xa2)\xa7\xff#\xa7\xfb#\x14'\xcf\xfcS\xcf\xf4Gh\xcfl\xbb\x93Z2\xf9㒳\xfc\x10\x86\xfcʫJ\xc8\xfd\xb9\x9e\xa4jӤ&\x9d\x01\x01\xba}\xf6T\xa9\x1b-\xf4\xe2\xacX\x97\xeeVn$&k\xd2zBZ\xb5f\xf7\xf2tF\x97\xae\x04Dc\x90\xfe\xb5-\x1cֳ(\x8a\xee\xdd$\"\xdb%\xe5o\xf9\x99xf\x00+\x8ey\xd8Q\x11*\xdd\xf3\x8e\xe7B\xb0/\x83\xea\xddDᴷ\x1ds\xb4\x85=\\\xe8m\x97uaE\x15]\xf2\x95VGAi\xc7\x03\x9c\x02?\xff\xa6\xe8V\xd0\xf6D\x94\xbe<\x86ո\x1e\x04\x0e<\xb6\x86\x9e\xa1(\x187\xe7\xd3\xcf\xdc\xc5\xd8L\xad\xe8\xae\x1bJ\xb2\xd1\a\x7f\x81\xf6\x96Vl,b\x97͕\xcd\x12\xc9\xd0\xe5Z\x13Ɉ\x8c\xeeE\xd3\xfe\xb0s\xdd\xe9\xdb\xef5\xe8\x13SG:\xd3\xf7\x0e\xd2\f\xec\xde\xd9\x15\x83\xf1[c鼹tױ\aqBk_ؽt;v\x94\xec`\x8cD\aM\\\x1b\x1b\xa15ǰg\xa4j\x94\xaaT\xa1u\\\x1f&7\xa6T\xcc\xfau#\xa5\xe5\xb1Ҭ\x97r\x95x\xe9\xf2\x88i\x82d*\x06=\xedLd\x16s~\xad\xc8i.vJv\x1a\xd30\xe5\xd7\xc0\x92/\xc0\x90/\x88\xa1\x96EQ\xc9lJ\xc1\x8a_%\x96\xbab4u\x8dx겈j\x86\xe4\x00\x03\x9e\x82\xeeN:\xc6K>\xb3I9e\x9b?9\x9eFm'\xa0\xb5\x13N\x83\xe6F\x9a\x80\xca^\x86\xc6N\xe0\xe1\x95b\xad+E[\u05c8\xb7\xae\x1bq\xcd\xc6\\\xb3\x9a3\xf3\xf32\x14\xf5Ň\f\xcdq\xf4g\x95Ã\xd2v.@x\x18֏\x1c\x01v\x82&U\xe4L6Uc'\r\xe8\xfb{\xbf\xff\xb2I\xc5O\xeb\x1a\xf7\xf7W\x95\xe3\xd8\xe6\xce\x16\x1e\a\xd5Ϯ\xd0\xee@\x83t\x0fK\xfc\xd7\xd7/\x9f\x03\xfd\x98?\xea\x9d\xde\xc1\x9b\x06\xce\xc1\xc8=s\xfc\xe9\x93\a\xdc8n\xd1\x1e\xfeʇ\x04\xbc\x12\xffIov\xcd'd\xee\x1f6T\xb5\xf1\x96譯p\xa0\x1f\xce\u07b6\x80\xbbG\xe0Ȩ\xf6ov=\x8a\x11\xd8i\xf8\x93ыI\xcd\xee%\xc60Y\x0e\x84\x84\xab\xeea\xe3F\xb7f\x9f\xd0u\x93'\xa6\x9c\xe2\x1d\x84\xceW\x15\xd7\xf6D\xdaan\xc3\x18Ɠ2\xcd\x1e2\x95:\x195\xb5\xe7oAEy\xdb<\tE\ap\xa7\xaa\x7f\x9a9\xe4\xe8%\xe3\x18\xbf=1{o\xe2\x15\xc71\xbe\x1d\xaf\x88S\x91\xcfQ\x04ī\xa5\xa4\xbc\x19z\xf8>g\xd6\x1eC\xc5i{\x86\x91l\x93։\xf0\aۓI3\x92W\xe6\x10\xc9\n\xbd̦\xd1CU\x96\xdb:q>\xaenoJ\";t\f\xd034&Jw\x9e\xed\x1b\x8c\tת#D\xdb0\xb9\xd0R\x14\xb7\x9d\xc0\xfc\x8f9\xf2L|\x16\xe4\xe2\aA\x1c{FL\x05e\x9aЌ5\xba\xd0\xf2\xe5\x82\xc3\xceY\x17.\x01\xd8:\xedw&\xbe(q\xf1[\x12\xf3̊0j\xec\x19\x89\x94\xa7\"\xfe\xae\xfc\x9c0I&;@^\x17\x90\xf0\xc0\xdb\xd7N\xd5\xf9'\xde\x1a±5\xa9\xfa\x8f\xbc!_;\xdb+:\xbc\xfd\xc7\xe4<\xd3=\xe5\x11\x88w\x97\xa4\xf3\xecݫS\x19\xfa\xf1\xa6\xce20fW\x17\rZ&\xd3\xc0-\xe4M\xf5(2\xbf\x99\xc3\x02XH|\x17Yu\x9eѻI\x90\x8c\x89\x98\xc9\t\x13\x99\xf1\xca\x12\n\x9e\xbc\x8cZk\x9a\xb2\xfbM\xedΞ\xfe\xeb\x91\x1d7Z\x1e\xce\xe8\xc18\xc6\xf22\xe2\x89\r/\x82\r[\xd0\x03\x9b:\xef\xc0w\xba/\xa4\x05TN,\xad\xcdM@T\xe6\xeb\x0emG\x86\x9c\x1f$\r9\x83#H\xa6$\xdd5\x81|\n\xbe\xfb\x8d\xd2f\xfa\b\xfa\xad\tt\bP\x84\xbe\xe2W˵\rC?\u05c8\x9d\xd2%\xb7w,\xe7\x16V\xd8\xfa\xb2\x1d2\xfez\xa1\xd6\xf3'\x1ctiŇ\xc1tӄ\xc4[\x14\xfe\xaaI\t\xc6\xf0}\xe3\xbe?\x83\x06\xb6\a\x89,\x9ez\xa0\xad\xbd\xad\xe3Wp\xc0\x9d!\xb7xfk\xee;p;e8\xfb\x89\x9d\x1b\xb8W?)\"؏\xae\x1b!-\xec\xcfN]\xfcM\xa1G\xe0f\xf8J\xec\x19#>u\xeb\xfa\x9c\x99\xe3\x81{\x92\x84;\x90\x18=*jE\x88RF\xac\x11\xf6\xbc\b\xfaU\x1d\xb8\x993\x97\x0fX'\\\xa1\xeb,\xca`)\x1fG\xc6\x14\xbfҳb\x9f\xe19\xf2\xf5\x13)=\xe5A\xe3Ki\xc56\xf2A\xab\xbd\x06s\xae\xd2+\xba\xe3!\xe4\xfe\x93\xd2\x0fE\xbd\x172@\xf0\x96U~\xe0\xda\n^\x14'7\x9eH\xdb\x0f\xcdb\x8e\xfc6\xdfz\xe4\x87)!\xf99\xcf&\x05\\\xb56\xa7\"\xa4[\xe8t\x81m\xabj\xdb]\x15oM\xbb`b\x01\xb4\xa7\xb6f\x9f\x95\x85&W/\xfaD\x05\x06\xcfƮ`\xb7Sں\x1c\xcej\xc5\xc4\xce\x1b\xeaX\xae\x81\x8b\x82|\r\xf7\xc6-: \x01]\x12v>\x1fOjZ\x15䤔\xfc\xe4\xc2R\x9ee5ځw\xc6\xf2؆\xf6\"ז\x9c\x1b\xaf\xcd)\x11\xe5\xa6[?\x84tu\xb9\x05M\x97:\xf0g\xc7:\xba[\xe7LP\xf4\x9c\x92\xd1=\xae\xce\xd5^fp9\xc7\xd3jSƇ~W\x96\x17\x9bqG\xad\x7f!!T\x0e\xb11~9\x9fF\xef5\xcfq,\xa20MS\x94Yv\xe0r\x8f\xea\xa3U\xbd?4*8f\xa9\xc7Ҩ5\xe5|*Z\xa9\xa69\U000f2d56\x9d\x94\xad?\x05\xcb\xdb\xe1N\x11\x9dfᄟ\xa9[Dnk3\xee\xdd]\xad\x98\xceļ\x9d\x91\xc6#\xfc\x8f%\x94B\x13nN2\x9b\x86\t\xbb䑘\xb8\r4Ō\xe8|\x83\x05\xbcd\xbe\xa1q\xfa|[\xaf\xb78\xb5\xbeԒɏ\xfbٯ\xc0\x0eg\xd2/\xe1\x85k9\xb6\xf0h~\x91\x91/\x12\xb7\xcf6\x80D\a\x93\xc0 \xd1\xfb\x96\xe4t,\xe3\x85\xe9y\x99sAW\xaf\xf2˼i\xea\x18}\xe9\x9f\xd7\v>\x067\xe6c\x8a?\xfc}P}p\xe7\x02=㖢\xf7a#\xcc\xf9g\xb1k\xfe-¶\x80\x7f9\xab\xf1\aߝx\xe6Z\n\xb9\x9f\x9b\xfco\xbeZ$\x1c\xf0\x14\"\x01Ad\x12!DX\x14\x104\x83\x1cy\xf9;\x04\t/\b\t\xa2\xdb\xc9\xd9GR\xe4\xbc\xc3dߓ\xff\xf2\x7f\x01\x00\x00\xff\xffT\xf5\x7f\x80\xacd\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xdc=Msܸrw\xff\n\x94spR\xa5\x19?'\x97\x94n\x8a\xd7\xceN\xde[[%\xb9\xbcg\f\xd93\xc4\x13\tp\x01p\xe4I*\xff=\x85\xc6\a\xbf@\x12\x1ci\xf6\xed\x067q\x80\x06\xd0\xdd\xe8o@\x9b\xcd\xe6\r\xad\xd9w\x90\x8a\t~Kh\xcd\xe0\x87\x06n\xfeRۧ\x7fW[&ޟ>\xbcyb<\xbf%\x1f\x1b\xa5E\xf5\x00J42\x83\x9f\xe0\xc08\xd3L\xf07\x15h\x9aSMo\xdf\x10B9\x17\x9a\x9a\xcf\xca\xfcIH&\xb8\x96\xa2,An\x8e\xc0\xb7O\xcd\x1e\xf6\r+s\x90\b\xdcO}\xfa\xcb\xf6ÿn\xff\xf2\x86\x10N+\xb8%\x12\x94\x16\x12\xd4\xf6\x04%H\xb1e⍪!30\x8fR4\xf5-i\x7f\xb0c\xdc|v\xad\x0fv8~)\x99\xd2\x7f\xed~\xfd\x1bS\x1a\x7f\xa9\xcbFҲ\x9d\f?*ƏMIe\xf8\xfc\x86\x10\x95\x89\x1an\xc9\x173MM3\xc8\xdf\x10▎\xd3nܪO\x1f,\x88\xac\x80\x8a\xda\xf5\x10\"j\xe0w\xf7\xbb\xef\xff\xf6\xd8\xfbLH\x0e*\x93\xacֈ\x00\xb76\xc2\x14\xa1\xe4;\xee\xcd,\x00qMtA5\x91PKP\xc0\xb5\"\xba\x00B\xeb\xbad\x19\xa2:@$D\x1c\xc2(E\x0eRT-\xb4=͞\x9a\x9ahA(\xd1T\x1eA\x93\xbf6{\x90\x1c4(\x92\x95\x8d\xd2 \xb7\x01V-E\rR3\x8fX\xdb:\xec\xd2\xf9:\xd8\xcb;\xb3]ۋ\xe4\x86O\xc0.١\fr\x87!\xb3Z]0\xd5nm\xb8\x1d\xb7%ʉ\xd8\xff\x1d2\xbd%\x8f \r\x18\xa2\nє\xb9a\xaf\x13H\x83\x9cL\x1c9\xfb\xef\x00[\x99\x8d\x9aIK\xaa\xc1ѻm\x8ck\x90\x9c\x96\xe4D\xcb\x06n\b\xe59\xa9\xe8\x99H0\xb3\x90\x86w\xe0a\x17\xb5%\xbf y\xf8AܒB\xebZݾ\x7f\x7fd\xda\x1f\x93LTUÙ>\xbfG\x8eg\xfbF\v\xa9\xde\xe7p\x82\xf2\xbdb\xc7\r\x95Y\xc14d\xba\x91\xf0\x9e\xd6l\x83K\xe7xT\xb6U\xfeO\x81l\xefzk\xd5g\xc3yJKƏ\x9d\x1f\x90\xcdg(`\x18\xde\xf2\x92\x1djw\xd1\"\xda|2\xd8y\xf8\xf4\xf8\xad\xcbgL\r\xb1\x8fx\xef0_K\x02\x830\xc6\x0f -\x11\x91\xdb\fL\xe0y-\x18\xd7\xf8GV2\xe0C\xf4\xabf_1m\xe8\xfe[\x03\xca0\xb4ؒ\x8f(;\xc8\x1eHS\xe7TC\xbe%;N>\xd2\nʏT\xc1\xd5\t`0\xad6\x06\xb1i$芽ag\x8b\xb5\xce\x0f^xM\xd0˝\xfe\xc7\x1a\xb2މ1\xc3\xd8\xc1\x1dsr\x10\xb2'\x1c̐m\x0fh\xfcКfO\xbf\x91`\xc3_\x06K\xf9\x8f\xd0\xd1\xf0\x8fYD\xc3\xd9o\r\xa0\x88\xb3'\x16F\"e\x04\x92\xf8\xf5![lG\xbfO\xe0\xd44\xf8\x91\x95M\x0ey\x90\xb6\xa3\xbd\fV\xfci4\x00\xb5\x0ee\xdc\xf0\xbf\x11\xfffټ\xfdՈ\xd3Ȋ\xa9\x04b8\x90q\v\x8f0\x8e\x9b\x8db\xda4\xa6\xa1\x8a,nvw\x84\xf0\xa6,龄[\xa2e\x03\x13\x98\xa1R\xd2\xf3\x04b\xbc\nN\xc5K\xe8\xef\x04B\xc92\xe8*\n\x8b\x1a\xabd\xa8\x1c\xaf\x88\xfc\xc1\xb1\u0094\x11g~\x97\xf7\xa2d\xd9y\x115\xb1A\xfe\xb8\xb9\xc3\xe79x\x0f\x05=1!#[2'\xd2t\xed(\xd2V\x98\n#\xcb\x1c\x90\xfc\xb2\rG\x91U\b\xf1\xb4D\xfb\x9fM\x9fVj\x93\f\x8d\xb7\xb0\x15Gm\xa7D\xf7@\xe0\ad\x8d\x8e,\x93\x90\xbcA\x05\"$\xa9\x85\xd2\xd3t\x9f\x96=Ċ\x83)\xa6%sL3ڙ\x13\x95\x9erf\xa3=\xb1)8\x98\xb5V\x86rm_)\x1a\xdbw\xa8\xdf:\x18\x8fc\x84쩂\x9c\b\xc7\xf5M\t\xca͕#\xf9[\xb9r3\t:l\xdeZ\x1a%\xddCI\x14\x94\x90i!ǘL\xc1\xa7m)\xb2r\x02\x8f\x11\xa9\xd9g\xffvc3 \x89a\xf3\xe7\x82e\x855\x02\fo\"\x1c\x92\vP(8\x8c\xa1z\x9e\xda$Y\xa2\xbd\x9bdNt\xb4m\xe1L\r\xe1\xc5\xc4I\xdb\x12\xc4m\xdb\x16\x04\xefH\xb0\xb8\xefQ\xcdٶ\xff\x9f\x88\xf5\x9a\xe4\x02\xa6ݍ\x86\xbe.ӢSe\x8c\xfd݁@U\xeb\xf3\ra\xda\x7f]\x82H˲3\xff\x9f\x980\xeb9~7\x1c\xf9\xaa\x1c?K\x95%\x88\x86*a\xfa?!QPY<:]\x91L\x90\xbfuG\xdd\x10v\b\x04\xc9oȁ\x95\x1a\xe4\x802/:/\xaf\x81\x8c\x14}gZEuV|\xfaa,\x1b\xd5ƙ\x12\xf12\x1clMb\xef#\xf4\x15\xf3\x02\\\x82\xee+\x93PY\xb7\xf8\x1bb\xb3\xfd\x82\xfe\xc4ݗ\x9f \x9fC\x0fI\xe3\xbc\xd1F\xee\x06\x8b\xedN\xed\xec\xfc\xd4m8\xd3'\xf8L6\xe0qC(y\x82\xb3\xb5X('\x868T\xa3\xbd\x1b\xf5\x9e\xc6\xc8\xc1\xc8\v2\xd9\x13\x9c\x11\x8c\v\xa5,\x8eNe\x05۞ b\xee\xc7Z\x0f\x81fM\xce\xc1\xb5\x984\x1f\x10\x11\xe8x\xa7#\x8f`X\xccˢ\xe5͑tA\xe2\x9b\xc7\xfd\x05\xdb\fd\xeb\x84\x0f\x91\xb0\xef\x94%\x919\x05\x05\xab\x137\x8a\xd1C\x05xZ|`\xec;-Y\x1e&\xb2|\xbf\xe3\xd3\xd6p\xbf}\x11z\xc7o\xacG\xa6\x90K~\x12\xa0\xbe\b\x8d_\xae\x82N\xbb\xf0\v\x90i\a\xe2\xf1\xe2Vl\x1b\xc1\xf9\xed\xcdH\x0e\xbc\xdd\xf1\xb7V\xc1\xaf\x167\xc1Z\x10\xbc<\x93\xb78\xf6\xedK\x8c\xa0DNL\xec\xf6c\xf3\x14Br\x9b\x8a\xd6\x1bǽZT,\x9b\x1cǣ\xe1\xf1\xb6\xf5ة\x1b\"oc\xe3\xce<\x9e\xdbm\x12\xff\xd6B\xe9\x9fま\x89\xf5\xdc\xfb\x11}\x9b6\x12/[\xb4\xf5]\xec+\bcc\x01\x1e4H\x17\xfc\xb3\x02\xda{\x0e/\xf4\xa9\x96\x82{!\xb0GC@\xd6 x\x81\x9bl\xaa$e\x89k\xacM\x83\x97\x95v\xfa\xa7\x1f\x9dؤ9\xd9\xe6\xef\xeeF^\xdb\x1a\xceDU\xd1ar0i\xa9\x1f\xedH\xcf\xd3\x0e\x90\xa5\xbe<6x\x9e\xd3\xcdD\xcfC\x98\x16|f\xba`\x9cP/6@:\x86\xa2\xa4\x16\xcb\x12̶\x82*\xb2\a\xe0!\xa6\xfeG\xd0\xf3\x15\xe3;\x9c\x80|xu\xbb\x80\xb4躈\x9c\x1eՁ\xa0\xe1\x03j\xaaT\x93J\xe4\xe4\xb9\x00\t=\xae\x18\aʍ\xa5\x99\b\x92\vݍG\x18\xb8\xb5\xc8\xdf)r`R\xe9\xeeBS\x19\xaeQ\xa9찒\xc2fw\xdfX\x05\xa2\xd1\x17\xd0\xe0S;\xba\x97\u05ed\xe8\x0fV5\x15\xa1\x95h\x12\x8c\x02ی~aUH\xbe:\n\v\xf9\x004\xbf$\x00\xf3kg8\x01\xae\x1a\x89\x89{+^\x9eY\x99\xb6fC9R҆g\x05\xa0\x9c\xe2=\xf1A,xƕ\x06\x9a\xca\v\xc6jj8g\xfc\x98F\xbb\xe4\x10g\xdb,\xaa\xf7B\x94@\x87\x05O\xb1fp}\xb9\x18\xfa\xb5\x1d\xfd\xbb\x88\xa1@\x81tsa\x0f\x8eTN\x16Q\xad\xa1\xaa\xedy\x13D6\xbc\xab}\xae \x85\xd6\xf8\xe0n\x15\xaf\xe9\\3\xce\x12\b;ȹ0ݵ,\r\x88\xabZ\x96f\x82`T\\\x12>\xdb\xf5\x00\x98\xd3\xe9\x9d\x14\\{\xe0\x9a\x15V\xe6\x1e\b\xcds\xc8m`Ҙ*\xceg\xb1\xe5e\x13\xa5\n\xd1ݭ7\x13\x93(\xeb[\xcf#\xc5P\xac<\xc1\xa6\xe1O\\<\xf3\rz\xf2j\xb5\x00I\x0f\r\xbe\xea\xf4\xfabI\xf4{J\xa1>\xbf\xa6\xf3\x947\x9e\xae e\x92\xf9fU4d\x8e\v\x96\xe4\x9a-]\x9e\xf8qq\x15s\xf3\xcf\fv\x89揶\xe68\xb5\x9em\x17\x1f\xd51\xfe\x9e\v\xd0\x05H_̼\xc1\xba혜n\xf3ѭ\x1f\x13\n\xdc\f\xffxS\xd8\x16^\x0eJ\xde⎎\xb1\x02n\fcӦԶ\xfcX6\x11&J*\xfc\x8a[\x06)\x95\x13K\xf5\x12\xfd\x1a\xc0P\xaf\xe0\x8b\x00\x85\x9f$\xb2CKK[\xe9\xdbM\xc6\xf7\v\x1f0\xe4\xe7W\xfa\x0f/\x0fL\xa8iX\xa8d\x98/\x9a\x9c\xc3טm\xba\x18ky\xd0\xf5sմ\x7f,\xf4i\xa8\xbe\xd6\xee\x1cL\x1a\xa0}\fF\x86\f\xcaAPr\x1b\x97\x1d\v\v(\x8b\t\x17s\n]8\xd0@\xbc\xcb\xf0$\n\x0fXa\xa8ٝ6W\xdd\xce\x14\xf9@\n\xd1DJ\xeaf\xb0\xb3P`1]V\xe1\x92\b\xa0\xe9\xe9ö\xff\x8b\x16\xae\xc8\x02#_\x91ݡ\xa3\xd2FS\x19\xcfى\xe5\r-{\x87\xac\xc3\x16-\xf7\x10!\tge,\xbfj\xd8ʏ\xef\xb1\x11\xf9Z\xdb<\xcbjq4o\"\xa6\xd5b\\\\\x81ѯ\xb0\x98PRkS\x0e饦\xe95\x16\xf3E\x11k*+\x86u\x13\x93@\x97\xeb)R\xac\xfb\x85ډ\v*&\x12\xab\xe5^\x9c I\xa9\x89\xb8\xa8\x12b\xb1\xa0,\xb1\xfe\xa1_\xd90\x0frE\xd5C\x12r\x96+\x1cV\xd75\xb8:\x82\xd9}$W3D\xea\x14f\x01O\xd60\xccU'̣\xff.\xf0V\xd0\xfe\x8c\x90\xbe>\x84Ӹ\x1d8\x0e4v\x86\x9e\xa1,\tU\xe3\xedg\xf6bl&6x\xd7\xcdP\xd2\xf3\x83\xbb@{\x83'6\xe6\xb1s\x7fe\xb32`\xf0r\xad\x8aDD&uѼ=lMw\xfc\xf6[\x03\xf2L\xc4\ts\xfa\xce@Z(\xbb\xb7rE\x19\xff\xcdK:'.\xedu쁟\xd0\xca\x17rǭƎ\x82\x1d\xac\x11\xe1\x18\x11\xd7\xfaFF\x9a\x1b\xb7g\xa2k\x14*\x17at\x9c\x1ff\x15Sj\xcd\xfau=\xa5\xf5\xbeҢ\x95r\x15\x7f\xe9r\x8fi\x06dj\rzZNd\xb1\xe6\xfcZ\x9eӒ\xef\x94l4\xa6Ք_\xa3\x96|E\r\xf9\n\x1fj\x9d\x17\x95\x8c\xa6\x94Z\xf1\xab\xf8RW\xf4\xa6\xae\xe1O]\xe6Q-\x80\x1cԀ\xa7Tw'\xa5\xf1\x92s6)Y\xb6\xe5\xcc\xf1|\xd5vB\xb5vB6hi\xa5\tU\xd9몱\x13px%_\xebJ\xde\xd65\xfc\xad\xebz\\\x8b>\xd7\"\xe7,\xfc\xbc\xae\x8a\xfa\xe2$\x83OG\x7f\x119\xdc\v\xa9\x97\x1c\x84\xfba\xffH\n\xb0\xe34\x892'\xdcw\x8de\x1a\x8c\xed\xef\xec\xfe\xcb6\x15\xcf\xd6y\xf3\xf7\x17\x91\x9b\xb5-\xe5\x16\x1e\x06\xddGWh\x0f \x81ۇ%\xfe\xeb\xf1\xeb\x97\x00?f\x8f:\xa3w\xf0\xa6\x8150r\x87\x1c\x97}r\x057\x16[\xa8\xc3_9I@k\xf6\x9f\xf8f\xd7r@\xe6\xee~\x87]\xbd\xb5\x84o}\x85\x84~Ƚ\xed\xc1h\x8f\x80\x91I\xee\xdf\x1dz\x10#e\xa7\xe1O\x82/&y\xedŦj\xb2l\x11\x929u\xf7;\xbb\xba-\xf9lL7~&\xc22^\xc1d\xbe\xa9\xa9\xd4g\xe4\x0eu\x13\xd60\x1d\x94\xf1:d.t2)j\xc7oAEq럄\xc2\x04ܹ\xeeg3\x87\x18\xbdd\x1dӷ'\x16\xefM\xbc\xe2:\xa6\xd5\xf1\x061\x15\xf9\x1c\xad\x80x\xb5\x90\x94\x13C\xf7ߗ\xc4\xdaC\xe88/ό'\xeb\xc3:\x11\xfc\x98\xf1(\xd2\x14\xa7\xb5*\"Q\xa1\x97\xc94|\xa8JS\xdd$\xee\xc7\xf6\xedm\x89eEG\x00=\x83\x17Q\xb2\xf3l\xdf`M\xe6\xacZ@\xa8\x86ф欼\xe98\xe6\xbfO\xca3\xf1Y\x90\x8b\x1f\x04\xb1\xe8\x99\x10\x15\x18i2b\xcc\xf3B\x8b\x97\v\x92\x9d\x8b&\\Ba\xeb\xbcݙ\xf8\xa2\xc4\xc5oI,#+\x82\xa8\xa9g$R\x9e\x8a\xf8\x87\xe2sF$\xa9\xac\x80\xbc)!ၷ\xc7N\xd7\xe5'\xde<\xe0ؙ\x14\xfdG\xde\f^;\xea\xd5\x18\xbc\xfd\xc7\xe4\x1c\xd2\x1d\xe4\x89\x12\xef.Hk\xd9\xdbW\xa72cǫ&\xcb@\xa9CS\xfaj\x99L\x02Ր\xfb\xee\xd1\xca|\xbf\x87\x15e!q-\xb2\xe9<\xa3\xf7&\x812*\"&gDdFk\x8dU\xf0he4R\xe2\x96\xedo\xe20z\xfa\xaf\avZh\xb9rFW\x8c\xa34\xad\"\x96\xd8\xf0\"\xd8p\x04>\xb0)\xf3N\xf9N\xf7\x85\xb4P\x95\x13\vkS\x15**\xf3m\a\xb6\x05\x83Ə\x01\r9\x81\x13p\"8\xde5\x81|\xae|\xf7\x1b\x86\xcd\xe4\t\xe4;\x15\xe0`A\x91\xb1\x15\x1f5\x95:,}\xcc\x11\a!+\xaaoIN5l\xcc\xe8\xcb4d\xfc\xf5B)\x973\x1cxiŹ\xc1x\xd3\x04\xc9[\x96\xee\xaaI\x05Jѣ7ߟA\x029\x027(\x9e{\xa0\xad\xbd\xad\xe3Np\xa8;3آ\x99n\xa8\x9b\xc0jʐ\xfb\x89\xe5\r쫟\xe8\x11\x1c'\xcf\r\xe3\x1a\x8e\xa3\xac\x8b\xbb)\xf4\x00T\r_\x89\x1d!\xe2s\xb7\xaf\x8b\x99Y\x1c\xd8'I\xa8-\x12\xc3GE5\v^ʄ423\xaf*\xfd*\x84xJ2e~\x0e\x1d[\x97\x9dq\xcbGx?j/\x1a\xdd5R\xc4T\x80\x0f\x1f\xef{e\x93\x04a\xde\xd9\v\x13Sq\xa8\xf1\xeb\x80a@p\n\x84\xa6%\xe1M\xb5\a\x89\f\x19:\x143\x97\x86\x1f\xfd\x13\xa5ey\xbe\x19B\x1e\xbcY\xdc\u009e\x83\x88\xa4w2\xa0s\x91\xd4\aP\x06@l\xf7\x99\xf7\v\t>8\xeb\xf5\xc6ԋis\x1c\x1dp\xfc\x19\x17\x95\x8a`\xdb{\n\xbbv\xf1\xd6\x16\x06\x1e7\xe5\x89O`\xfacq\xc1\xd2g\x8c\x86\xba\xa0j\xc9Z\xb87}\xc2\rҎN\n\x86\xc2\xc3đ\x8c\xdfhې/\xf0\x1c\xf9j\x91\x85i\x80\xb8&ِ\x1d\xbf\x97\xe2(A\x8d\x19g\x83W\x9c\x18?~\x16\xf2\xbel\x8e\x8c\x87\n\xd4u\x9d\xef\xa9\xd4̰\xb2]Od\xecG\xaf\xcb\"\xbf-\x8f\x9e\xf8aFF\xd5nϋ11\xdbmI>9\x01\xfaN\xb5\xfa\"\x16?rж\xe4\x8b\xd0\xe0SU\xac\x0f\x94)\xb2\a\xa57p8\b\xa9m\bs\xb3!\xec\xe0\xec\x94X\xa8\x8d\xb2\x12Mm\xfbij\xb1\xbfCqU0\xfc\\8E\xa2R@\x1b\xbd\xa2g\x1b\x95\xa1Y\xd6\x185\xf8^i\x1a\xb3\xe7^$FѶwܜr\xc8w\xdd\xfe!\xa2\x11\x0e8\x82\xb3\xa8ë\xa5V\x03G\xd3\xf4\x04\xaf1vn\xb6\x13e\xb4\xd9%ǝXA\xb3\x9b\xf6S\xfa\xf7qB\xe7)9\xe5\xb6\xd1{\xccv\xba\x14\x97)?\xd4\xd0,+(?\x1a\xf6\x91\xa29\x16\x9e\x05\xa7\f\x95\t\xa0y\x83!\xcf\x1aO\xaa\xf2)_\xddH\xde\xc9X\xb8$p\xde.w\x0e\xe8\xc5\x12S\xb6\x05\xe9\xad̘Ѽ1c\x7fb\xf0\x04\xfec\xf1\xd40\x84\xaa3\xcf\xe6\xab\xe4m\xec\x94\xcd\\\x86\x9bCFt\xbfA\x02^\xb2\xdf08}\xbf]\xe5ݺ\x12k6?\xedf\xbe\x02:\xa6\x8c\x82e\\\xcc\x1b\b\xb8\xbf\xc8\xcaW\x91\xbbk`xS\"\x16\xd42\xc6\xc5:\\\xa8\x9e\x93\xb5\x14s\xe8u~\x993\x89\x13\x1bW\xf2\x8f\xeb\x04\x9e\x82\x19\xf3)\xc5\x1d\xfc>\xe8>\xb8rd\x1c\xc3\x16\xa2s\xe1\"\xc8\xf9gv\xf0\xff\x15d_¿\x8cz\xfc\xceW\x87\x9e\xa9\xe4\x8c\x1f\x976\xff\xab\xeb\x16\xf1\x86\x1d\x84\x88?\x1c\xd9D\xf0\x90W\xf9\xc3~\x91\x13\x0f\xdf\a\x1f\xf9\x05\x1eqT\x9d\x8c>\"#\xe7\x1d$\xbb\x99ܗ\xff\v\x00\x00\xff\xff\x0e;\xfc{\xabg\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec=Mw$)rw\xfd\n\x9e|\x18\xdbOU=m_\xfctk\xab{\xecz;ӭ\xd7\xd2\xf6\xc9\x17*3J\xc5(\x13r\x81,uy\xdf\xfew\xbf\b \xbf*\xc9$K\x92gfW\\\xba\x95\x05A\x10\x11\xc4\a\x04\xb0Z\xad.x%\xbe\x816B\xc9k\xc6+\x01\xdf-H\xfcˬ\x1f\xffì\x85zwx\x7f\xf1(d~\xcdnjcU\xf9\x15\x8c\xaau\x06\x1fa'\xa4\xb0Bɋ\x12,Ϲ\xe5\xd7\x17\x8cq)\x95\xe5\xf8\xd9\xe0\x9f\x8ceJZ\xad\x8a\x02\xf4\xea\x01\xe4\xfa\xb1\xde¶\x16E\x0e\x9a\x80\x87\xae\x0f?\xae\xdf\xff\xdb\xfa\xc7\v\xc6$/ᚙl\x0fy]\x80Y\x1f\xa0\x00\xad\xd6B]\x98\n2\x04\xfa\xa0U]]\xb3\xf6\a\xd7\xc8w落\xf3\xed\xe9S!\x8c\xfdS\xef\xf3\xcf\xc2X\xfa\xa9*j͋N\x7f\xf4\xd5\b\xf9P\x17\\\xb7\xdf/\x183\x99\xaa\xe0\x9a}Ʈ*\x9eA~\xc1\x98ǟ\xba^1\x9e\xe7D\x11^\xdcj!-\xe8\x1bU\xd4e\xa0Ċ\xe5`2-*K#\xbe\xb3\xdcֆ\xa9\x1d\xb3{\xe8\xf6\x83\xe5W\xa3\xe4-\xb7\xfbk\xb66To]\xed\xb9\t\xbf:\x129\x00\xfe\x93=\"n\xc6j!\x1f\xc6z\xfb\xc0n\xb4\x92\f\xbeW\x1a\f\xa2\xccrb\xa0|`O{\x90\xcc*\xa6kI\xa8\xfc'\xcf\x1e\xebj\x04\x91\n\xb2\xf5\x00O\x8fI\xff\xe3\x1c.\xf7{`\x057\x96YQ\x02\xe3\xbeC\xf6\xc4\r\xe1\xb0S\x9aٽ0\xf34A =l\x1d:?\x0f?;\x84rn\xc1\xa3\xd3\x01\x15\x84w\x9di \xb9\xbd\x17%\x18\xcb\xcb>\xcc\x0f\x0f\x90\x00\x8cHT\xf1ڐp\xb4\xado\xbb\x9f\x1c\x80\xadR\x05py\xd1V:\xbcw\xb2\x97\xed\xa1\xe4\u05fe\xb2\xaa@~\xb8\xdd|\xfb\xf7\xbb\xdeg6\x90%O)&\f\xe3\xec\x1bM\f\xa6\xfdLev\xcf-Ӏ\x9c\ai\xb1F\xa5a\x15\xa8\x9b7 \x19S\x9aU\xa0\x85\xcaE\x16\xb8B\x8d\xcd^\xd5Eζ\x80\fZ7\r*\xad*\xd0V\x84\xa9\xe7JG\xa3t\xbe\x0e0\xfe\x01\a\xe5j9I\x04C\xc2\xe7'\x14\xe4\x9e\x0en~\b\xd3\xe2OL\xea\x01fX\x89K\xa6\xb6\xbfBf\xd7\xec\x0e4\x82\tXgJ\x1e@#\x052\xf5 \xc5\xff6\xb0\rJ\xbd%a\xb4\xe0\xf5A[h\x02K^\xb0\x03/j\xb8b\\\xe6\xac\xe4G\xa6\x01{a\xb5\xec\xc0\xa3*f\xcd~Q\x1a\x98\x90;u\xcd\xf6\xd6V\xe6\xfaݻ\aa\x83&\xcdTY\xd6R\xd8\xe3;R\x8ab[[\xa5ͻ\x1c\x0eP\xbc3\xe2a\xc5u\xb6\x17\x162[kx\xc7+\xb1\"\xd4%i\xd3u\x99\xffS\xe0\xa8\xf9\xa1\x87\xeb\xc9|s\x85\x14\xe1\x04\aP#:\x81qM\xdd(ZB\xe3'\xa4\xce\xd7Ow\xf7]a\x12fH}\xa2{G\xc2Z\x16 \xc1\x84܁\x9f\xd1;\xadJ\x82\t2\xaf\x94\x90\x96\xfe\xc8\n\x01rH~SoKa\x91\xef\x7f\xa9\xc1X\xe4՚ݐyA9\xac+\x9c\x81\xf9\x9am$\xbb\xe1%\x147\xdc\xc0\xab3\x00)mVH\xd84\x16t-㰲\xa3Z\xe7\x87`\xde\"\xfc\ns\xfc\xae\x82\xac7e\xb0\x9d؉\x8c&\x06i\xcfF\x05\f4\xa8+㳖~!55\xfc:\xc0\xc3\xe9\xb2\xd0+\x18\xb4\x1fvO\x1cn\xcd\x18ʕ\x83\x86:E\xaa!wǴ`\x87\x12\x1e\xca\f&}\xad\x97j\xdfN`2\xaf\xea\xd6\x11\x1cO\xb8J?AY\xa1ژA\xf1\xdeWC\x14\x91>y\xe35\x05\xc3\x1fԬ\xf2ڕ\x9d(7\xean\x0fȷ\x83Ƚ\xf6:\xe1*\x9b\xe4,\x96̈;\xc9+\xb3W\x16m\x9c\xaa\xedX\xad\xc1\x00n\xee6\x83F\x1d\xce#VdÉ\xd1V\xb1'.N9\xed\n\xca\xe5\xcd݆}C\x97\b\x02L\xe6,9\xb3\xb5\x96\xa4\x8e\xbf\x02Ϗ\xf7\xea\xcf\x06X^\x93V\nv\xf9*\x02x\v;\x9c\xf4\x1a\x10\x066\x00\xadq\x0e\x18BM\xd5vM\x0eG\x0e;^\x17\xd6+9a\xd8\xfb\x1fY)dm\xe1\x94\xefl\x9a\xf7D$ny\xa9\x0e\xa0\x13h\xf8\x91[\xfe\v\xd6\x1d\x90\x0ea0\x02\xe2\xd9Od\xdc\x1e#\x03\xc5&['\xa9l\xb3\xeb@\x15\x86]^\xe2<\xbbt.\xf1啫[\x8b®\x84\xa4~\"0]\xefO\xa2(B\xff\xe7Q\xc3\x11\xd7\xf1\xd6ܫ\x9f\x8c\x13\xeb\x14\xe2D\x9a\x8e(\x98J\xe5\xec@\xf5b2&\n`\xe6h,\x94\x9eR\x1dυ\x88Kڱ(<\x18öǀ\xfb\xf8\xb8e]\x14|[\xc05\xb3\xba\x1e\xefvJ\x91\x8d\xd1\xe6+\x18+\xb2\x04\xca\\\x0eI\xe3Z\x8e\x10F\xd3\x0f\x11\xa2\f(\x80.\x0f\x7fD\xb7\xdbS\b}\xa7\xa2\xe8\x10w\x9e*\x8c\xfd\x8fd\x1f\xd1\xdcgh\x84\xaf\xbdq\x17P\x90C!\x15+\x94|\x00\xedzD\xc7)H\x98\x06\x94\xb8<\x02\x15-\xad\x86\x02]\x06\xb6\xab\xd1\b\xaf\x19j\x82\xa8\x8c\bi,\xf0|}\xf9Z̃\xefYQ\xe7\x90\xdf\x14\xb5\xb1\xa0\xef0\x04\xccC\b<\xaae\aL\xfc4\t\xc0\xbb_\x85\xc8\x00\xf9\x90\xb9J+\x8a4cDj=\xb1c\x05.\xf0E\xa6zL[\x17\xab\xa3*\fX\xacr\xf9\xaf\x971%\x8a\x12\xd0\xef\xbdߏa\\CC\x8d\x9eF\x8d@l\xf4,\x94\x95=\x8eˑ\xb0PF\x888\xabr\x16\xb0\x97k\xcdǔj\x18N\x13џ\xcf\xde\x18\x88\x01\x83e\xa8\xf6\x1b\xb1x\xd8\xff?\"\x93\xcfb\xab\xa1u,.$\xb2\xb3\x10\xc6\xf6\xb89\f\x88\x1a\xcc0vF\x9ab\xd0\"\xa4\x83\x89ʭü\xdf3\xcdΙ\t1\xd1o$͋\xf3\x9eDŽ\xea\x0fH\xb0\xbdR\x8f)D\xfao\xac\xd7\x06\xca,\xa3%U\xb6\x85=?\b\xa5\xcdp\xb5\x05\xbeCVۨ\x9e\xe0\x96\xe5b\xb7\x03\x8d\xb0h\x81\xb0YO\x9c\"\xd6t\x98\xc0:\n(Za0\xae\x96\xe9\xc8<\xa2Fl(\x14\x8eE\xa12B\x1c\xbdx\xb2\xee\xb98\x88\xbc\xe6\x05\x19z.37>\xde\xe0\x17sOf\x04\xe2\x04\x7f\xe7N\x84Q \x97zQ\xb6\x92\x80\xeeu\xa9t\xcc\xf3t\xe5\x14L\x9c\f[N\xc1q,$m\x8b\xae\v0\x1e\x15\xe7\xc0\xb6z\xe7\xaa\xe5\x94[\xa0*\xf8\x16\nf\xa0\x80\xcc*\x1d'O\x8a\x10\xb8\x92\xaa?#\x94\x1dѤ\xfd hV\x89\xb6\x05\x03̽\xc8\xf6\xce\xddD)#X,W`Hc\xf0\xaa*\"V\xa8-\xb3\x92\xe1;\x9bS\x1amIP\x1fC\xb81EҖD\x1dܖ\x19mܧz#6oD\xef\xa1)\x9f%웓\xe6//\xecHn\x01\x86\x9c>\U000bab98\xb0\xe1k\nԞ\x1fh\xfe\xce\x18w\xdel\xd9\f[\xbf\xf8ly\x11\xae5h\xfc\x9d0\x8d\x8c՝\xb7U\x8b\x18\xf6s\xb7\xe5\x15\x13\xbb\x86a\xf9\x15ۉ\xc2\x02\xf9Rs\x88v\x1c\x9dYν$\x81Rm/\x96\x92\xdbl\xff\xa9Y\xd6Nh1\xa0\xd5\x10\x80\xf3\xcbC\fC\f<\x9d.\n4\xc0$\x90\x9dA\x91\x9b\xd6\xc4xn?\xef\x8aq\xf6\bG\xe7Y\x8d.\x0f\x8d\x15d-o@j\xa0\xcdER#\x8fp$P~\xb70\t\xde\x12Qq\xe5\x11\x8e\xa9U\aDE\xfc\xfc>\x85\xa3.~\xa0Q\xa4L\xa5\xb64D\xf5s\x87Y\x956X\xb6L)\x85\x12(~\xe6\xb0\x1b\x86\xf5\xb6\xc8\x1f\xe1\xf8\x83q\xec\xc3Y\xb3\x17\xd5\x02\n\xa0¦%\x19\xb5k\xf6\x86\xbf\xf1B\xe4Mg4O\x16@\xdc\xc8+\xf6YY\xfc\xe7\xd3wa\x10E\x99\xb3\x8f\n\xccge\xe9˫\x92\xd8\r\xe2L\x02\xbb\xc64-\xa53\vH\x97E\xfd\xb78\x90\tE\x11m\xd8&\f\xdbH\x8c\xcf\x1c}\x96\xb0i\x0f\x019\x87VY\x1b\xda]\x96J\xaeܒ\x96\xefm\x01\xd0.^\x9eUJ\xf78u\xb5\x10\xe2(\x8a\x1e\xbd{\xb4V\ue5d3}\xf9\xa9\xa2\xa1*x\x06y\xd8e\xa3$\x00n\xe1Ad\xac\x04\xfd\x00\xacB\xbb\x91.T\v4\xb9+gHa\xbak\x11\x8a7\v#{\xdace\x85\xb3>\xb1f`sR\xf5Ȏ\xfft\xf5\xb4Q\x92y'\x7f(\x89\xfa\xdd\x14\xb5e\x96e!\xbfN}\x10\x87\xa4s?JN\x1bO\x7fE\xf3J\xe2\xfd\xb74kȅ6k\xf6\x81\x12\xf4\n\xe8\xb6\x0f\xab\x84\x9d\xae\x92@\"&\xc20\x94\x93\x03/\xd0}@\xe5-\x19\x14ΙP\xbb\x13\x0f*M\xc5<\xed\x95q6\xbf\xd9\x18\xbb|\x84\xa3ߜ\xedj\x89ˍ\x8c\xae\xda\xf7\v\xea\xfc\x13\xa5\xd5x-J\x16GvI\xbf]\x92c\xb6d\x8a\x9c\xe1\xbc-\x90\xea\x05U\xbf\xaf\x1e\xeb-h\t\x16̪\xe4\xd5\xca\xcf\x06\xab\xca\xe8\x1e\xa7+\x94F\xb7$\x8c\xc08=x<ظI6C\xf7\x7f\x8e\x02\xc9\xf3\xa1R&\x92i\x11A\xebV\x19\xeb\x16\x0f{\xae\xfa\xc8\xeabJ\xe4\xe8W\x1c\x19\xdfY\xd0\xccX\xa5Cb\x17\xaa\xec\xc1\xe2:J\x8d\x99\x97\x1b\xb7O\xe4W2\x1d`\fP/[\xed\xe2\xec\xc1\xa5۫\xc2\xff\xcf\xc3\xcc\xc8\xd1\"ؕV\x19\x98h6B[\x12\xad\xce\xccbo\xb3\xd0\xcb]\xe0\xb7KR\xeb)\xcbС,s㑴g\x04E\x9f\xbew֬Q\x85\xe1\xdf)\xa2|\x0e\x8e\x8cr\xbb˒\x0f\x93\f\x93ѽq\xad\xc3\x04\xf4\xc0\\\xb0\xa5\x1fjRH\xcb|n/\x92\xbf7\xa7\xa5\x14rC\x1d\xb1\xf7\xaf\xe6\xe8\xb0`\x06b\x19Ice\xc0\x0e߾eH\xf3!5\xf6e!UM\xd1>\x8f\x86\x1egOwA\xd29\xc5\xd0\x11\x97\xcav\x17z|O?\x18\xb6\x13\xda\xd8\x16\xe1\x05P\x85\x99\xc8z\x1a\x1d\xde\x19\xf1\xa9\xfc\xa4\xf5\xd9\xe1\xe9\x17\u05fa\xb3$\xb9WO>\xc1sIP\x1e\x88\xbf\xe7\a`bDŽe 3UKZ,Cu\x81\xdd,\x80\xe8\x98\xe8\x8cI\xa2\xcd\xec4\x96u\x99N\x90\x15I\xa7\x90\xb3+k\xdd&?q\x91\xb6\xb2\xc5\xcec\xab\x9dJ\xa2\x1c+\xfd\xccP\x9fM\xd9\xcd\xe4-\xf9wQ\xd6%\xe3%\xb2eI̹sy\x98!\xed\xd7\xf1\xfa\x89\v\xebOS\xb8M\xd9e\xda4SeU\x80\x85\x90a\x99)iD\x0e\x8d\xfb\xe0\xf9?\x9a\xaf\x1a+\x9c\xed\xb8(j\xbd@G/\xe6\xccҘϫ\xa7\x97\x0f\xe4\xd2\x11Y\x111\x13\x17\xec\x178\xdc\xf3\xf6\xa3\xd2\xcb\\\xe6[\r/\xef\x9aVZ(ʁ\x9d\xf1Nga\x92\xf7\xda\xf7N\xbd\xf0ry\x8c\xb9\xa7\xb3P\t\x937\xf7\xb4)o\xee\xe9\x9b{\xfa\xe6\x9e\x0eʛ{\xfa枾\xb9\xa7\xe3\xe5\xcd=\xed\x947\xf74\xd9~\xa4`\xb8\xa2\x95ۉ\nIX%\xa6o̡=ӗ\xcfR\xf2gA\x96dWo\xc6[\x8e\x9c\x05Zt\x86\xc4t\x8c^\x93n\x8dS2L&w\xa64\xc1\v\x7f\x81\xb36\x01\x81\xb3\xcf\xdal&\x01\xbc\xe0Y\x1b\x8f\xe9p\xed\xfc\x05O\xda\x04Z,?\x84q\xe5ӘJ\xe0aK\xc8\xe5\xa0\xe4\xb1nc^l\x0f\x8f\xd1:\xbfq\xd6\xfdI\xb6\xe6\xf9\"\xf3\xffr~'\"6'\xa7S#P\x85A\xb9\xfacp\xe2,\xdaG\xa9\xed\xfe\x17\x1b]KX\xa7x\xdd5\x03\xddT\xcb~\xca\xeb\x1fG\xb0ϑ\xe4\xd4\xf371\xe7<\xae\xdb:\xc4Խ\xf3\x1e\xbfoZZ(\xbfTޒ\xa5\x9f{ߌ4{\xc6\xc9wn\x8e2\xdbk%Um\xfc\n\x0f\xf6\xf0!sW\x01\x84\x8e\xcc\x12e\xf0\x9e\xedU\x1d9\xe31Cׄ\xcc\xdbx\xbe\xad\xcf\xe0\x00\xcb\x0f\xef\xd7\xfd_\xac\xf2ٷ\x11\xac\x9f\x84ݻ\xfb\x18x\x9e\xa3\xa3\xde9\xe2\x13&\xaf\xbf\x93e(x\x11\x88J3)\n'\x95\x01B߀~\xa9ܒ\xdf\xd9~\xcb\xfc\xc2Sz\x8e\xee\xd2\xcc\xdc&\x97r\xdeK~F>\xee\xb2\xc3R\xb3\xb9\xb7)H\xb3\x94\x8c\xdb\xf1\\\xda\x19\xa8K\xf2lS\xd7\x14\x13rj\xd33i\xd3\xc8\xc3\xe8&\xa5\xd4\xfc\xd9\xe4(45W\xf6u2d\x13\xf3b;ٮ\xb3 \xcf̆M&XZ\xe6kr\xbek'\x8bu\x9eZ\x13Y\xae㹫\xb3 \xc7r[S2V\x93pM\xceSm\xb2O\xe7wF\x9e\x95\x9d\xfa\xf2\xe7`^r\xddb:\xd74)\xc34imc\x1e\xe7\xa4\x1cҥ\x99\xa3IT]\x9a%\xdad\x80Nt\x9c\x94\x1bz\x9a\xf795\x94ٌ\xd0x\xb6\xe7\x14ر<Є\x1c\xcf\t\x90\xdd\xec\xcf\xc5n\xc0\xac4\xcdVX\x9a\xbb9~?Z(\xf3ֹ\xf8-d\xf6\xb9dR\xba\xe74\xa7\x04w_\x06MPZ\x82\x9f8\xe6\x88\xc7Ce瞟\xe1\x88G@nv\xac\xac\v+\xaa\xa2sA\x99\xddñ\xb9\xf2\xe7WE\a\u05f7G\x82\xf6\xe5k#\xf21\x90\xfd\x90\x82\x1b\xf6\x04E\x81\xff\x9eP!s\xd7\x01fj\x05h\xa5\xe2\x1b\x81\xfe\xaa#\x7f\x97\xe0\x95[\x16\xa3S\xfdd\x01K\x844}\x01֤)\x99v\x8f\x9dWO\xdf\xfeR\x83>2\xbas+\xf8AQ1kO{\xfa\xc9l0&\f\xca\xc7k1w)e_\x19\xc5gC\xa3\x02\xd8\a\xe9\f\xf3\x10W\x82\x85Z\xa7\r\xa7\xa6\x94-FO1\x10R5\x10\"\xedS\xbc\xef%\xc7\x1f_#\xb8z\x89\xf0*\xc9\x11y\x8d\x10뵂\xac\xa5a֒䍤㋯\x11l-\t\xb7\x16\xf9\x8c\xe9\xc7\x13_\xebX\xe2+\x84]g\a^\x8bH\x97z\xecpq\xf8\x950\xbe\x99c\x86'>Z\x02\xc8\xe8\xf1\xc2\xf1\x10,\x01\xe2ɱ\xc2\xd9 ,e\x1e\fôg\x1f\x12LNdZ\xb4\x9b\x9e\x9a\x84\x94\xb6\xd1=\x7f\xf8/\xf1\xd0_\xe26x\n\xf6\x89\x87\xfb\x96\x1f\xeaK\xa4\xf3\x99\xe1\xd9d\u05c9\x87\xf7\x16\x05hg\x86h\x93\x10\xa7\x0e\xebM\ai\xd3\vp\xc3Czg\xb8\x13\t\x12\x96Pe\xf9A\xbbgo\xc6(\x9d\x83\x9e\xdd\xd7Z\"γ\x82<\x88\xa3\xfa\xfd\x0fvt\u008d\xa8X\xab\xbbg\x16\xe3\xa8j\xee\x1d\xc9؟\x84\xf4\xbb\xf5(\xb8\x1d\x9f\xa4\xb7\xf1\xd6:L\xf1}\x9d\xd6K\xf5\x17\xab\xbb\x1d;\x03\x15״\x8f\xbf=\xba\xa4 \xb3f\x9fx\xb6oz\x88\x80\xa4~\xf7ܰ\x9d\xd2%\xb7\xec\xb2\xd9\n}\xe7:\xc0\xbf/\u05cc\xfd\xa4\x9a\xf4\x91νb\x11\xa8F\x94UqĈ\x89]v\xc1ti^\xd6ɂ\x88\xc5(\xd8\\\xf8\v\v\a\x97\n\xefTQ\xa8\xa73\x97.x%\xfe\x8b\xde1I[\x1b\xfbp\xbb\xa1\xeaA\xaa\xe8\r\x94&{\xae\x91\xb1-L+\xf4v\xe0\xe4zt\xa1\x8ed\xaf6\x7fN@\xa4W\x04\x82\x9f\xe1\xd5x\xa6P\x8b\xddn\x1c\x96k\x12,.\x8fL\xf9{\xe2\x85\xceW\x15\xd7\xd1M=\xe6\xe5\xc1\\\xf50\fv|n\x05kҬ\x9d\xbe\x8a\xd0-=\x9a\x87\a\x12h\xb3\xf7X\xf5\xb7щ\xd2\x1dz>\a\xa7\xe9\x83˳G\x96_\x01\xa7i\x97iET\x8c\xfc\x14M\xc7{\xf1\xd5C\xe3/\x91\xffE\x1d\xe0ct\x15\xb1\xffd\xc0\xa0\xc9H\x02]\x80:umz\x9b5\x17\xbf\xce\xfa\x052\xe2\x02*\xfe\xe2\xeb\x05\xe3\xf3-\xc6^i\xf0\xf7\x7f\a\xd8\x13\xb6\r\xa7\xec\xed7\n7\x1bu\xe9g\xb8\x0f&\xc3R\xe1\xe0\xaa\xd6\b\xc8\xd83\v/E-\xab4\x7f\x80\x9f\x95{\t#\x85Z\xfd\x16\xbd\xc7P\xbc3\x17\xb2\x89\xfd\\\x8b)s?\xb6!\xc0\xf6\x90\xc1\xc9u\xf4\x88홷\xed[[$\f\xee\xfe\xfeg7 +JX\x7f\xac]\x86\t\xea]\x03H\xe90P\xd7h\x1b\xd7N{\xf5D\xf7\xb9w\x9f\xab\xe8<\b\x04t\xa8\x81\xd2F\xcf\x1aM]\x15\x8a\xe7\xa0o\x94܉\x87\x84\x81\xfd\xb9\xd7``\xd93\xfa\xe8\a\x1b\xeccd`\xa1\xe7W\xcc\fA\x97\xad(\xa0\xf8I\x14`\x1c≊\xfe\xf6\xb4e\xa3\xf7\xebr\xeb\x1c\xd4\x1d\xfe\xd8t2aM\xddPiM\xbf\x02\x8d\x8e\xa0[\xfd\xafM\x10\xf0ib\xb0\x86\x8fBZx\x88$\xf8\xcch\xf8C\xef\xe9\x8f0IR\xd4ڷ\xf1\x96\x1do\xb93]\xa7\x92\x05\xd5.\n\x8b\x1b\xa32A\x0e6\xed\x8eЩ\x91\u05fbsz*P\x9a\xa0cm\xe0˓\x04\xfd5\xa8d\xb3\x91\xb1\xb76\xfa\xb3\xe6\xa4a\xf4\x9d\r\xabȭ\x1fT\x1f\v\xe7\xa4'\x90q\xaf\xb4\x84m\x1ea\x9a\xb7yNI73\xaf\xe2Z~\xdc'Y\x8d?\x87\xb3j^\xe8\xb9H\xa0\xac{\x85\xa6\x0fx\xfc\xf1%\xf7\\M\xc6+[\xeb\xa0rjM\x17p#\x10p\xf7S\x9f\xf7\xfcR\xfb\x9c\xdd\f/\xdb\a\xeeڅ\xef\xd9\xe7\xf4F\xf8\xd7<\xa0\x14}Qȅ\x8c\uee7b\x15\xc2?\x8f\x9d\xa3\xf3\x80.,\x9f\x19\xe9-\xd6iN\xfcyBS\xc3p\xd1\xf9]\f\xf5\xf1#\\+\xf6\x19NC\xb6\x15\xfb$q\x10\xa7\x9e\x9c;\xa7\x059-\x97\x8f==79\xc4Cӊ\x0eɍh\x8b\xbe\x9a\x1bT\x1f\xa4\xf0\xd2\xc35M\x15w n\x8c\xad\xff,vn/#\xc31\xfd\xcbI\x8d\xa8\xe2\x9aTZ1\x855:\xa5N>\x1a\xd0\az)&\b\x89\xf7ֺ_\xeam{\xef<\xfb\xeb\xdf.\xfe/\x00\x00\xff\xff\x89~\x02\xaf\x9ft\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4V\xcfo\xeb6\f\xbe\xe7\xaf \xb0û\xcc\xce\xebv\x19r\x1b\xba\x1d\x8am\x0fE\xf3л\"\xd3\tWY\xf2H*]\xf6\xd7\x0f\x92\xec&\xb1\x9d\xb5\x1b0\xdd\"\xf1\xc7Ǐ\xe4\xe7TU\xb52==#\v\x05\xbf\x01\xd3\x13\xfe\xa9\xe8\xd3/\xa9_~\x90\x9a\xc2\xfax\xb7z!\xdfl\xe0>\x8a\x86\xee\t%D\xb6\xf8\x13\xb6\xe4I)\xf8U\x87j\x1a\xa3f\xb3\x020\xde\a5\xe9Z\xd2O\x00\x1b\xbcrp\x0e\xb9ڣ\xaf_\xe2\x0ew\x91\\\x83\x9c\x83\x8f\xa9\x8f\x9f\xeb\xbb\xef\xea\xcf+\x00o:܀ \xa775\x1a\x85\U0004f222R\x1f\xd1!\x87\x9a\xc2Jz\xb4)\xfe\x9eC\xec7p~(\xfeC\xee\x82{\x9bCms\xa8\xa7\x12*\xbf:\x12\xfd\xe5\x96ů4X\xf5.\xb2qˀ\xb2\x81\x1c\x02\xeb\x97s\xd2\nD\xb8\xbc\x90\xdfGgx\xd1y\x05 6\xf4\xb8\x81\xec\xdb\x1b\x8b\xcd\n` $Ǫ\x06.\x8ew%\x9c=`gJ\x12\x80У\xff\xf1\xf1\xe1\xf9\xfb\xed\xd55@\x83b\x99zʹ.T\x06$``@\x01\x1a\xc0X\x8b\"`#3z\x85\x82\x12ȷ\x81\xbb\xdcɷ\xd0\x00f\x17\xa2\x82\x1e\x10\x9e3\xe5Ce\xf5\x9bIϡGV\x1a\xd9\x18\xdc\xceCvq;\xc1\xfa)\x95S\xac\xa0IӅ\x923\r\x94`30\x00\xa1\x05=\x90\x00c\xcf(\xe8u\x8a2\xf3ӂ\xf1\x10v\xbf\xa3\xd5z\xe0AR\xb3\xa2k\xd2P\x1e\x91\x15\x18m\xd8{\xfa\xeb-\xb6$BRRgt\x9c\x93\xf3!\xaf\xc8\xde88\x1a\x17\xf1[0\xbe\x81Μ\x801e\x81\xe8/\xe2e\x13\xa9\xe1\xb7\xc0\x98\xc9\xdc\xc0A\xb5\x97\xcdz\xbd'\x1d\x97ˆ\xae\x8b\x9e\xf4\xb4\xce{B\xbb\xa8\x81e\xdd\xe0\x11\xddZh_\x19\xb6\aR\xb4\x1a\x19צ\xa7*C\xf7y\xc1\xea\xae\xf9\x86\x87u\x94OWX\xf5\x94&K\x94\xc9\xef/\x1e\xf2B\xfcC\a\xd2:\x94\xf9(\xae\xa5\x8a3\xd1\xe9*\xb1\xf3\xf4\xf3\xf6+\x8c\xa9s3\xa6\xecg\xdeώrnA\"\x8c|\x8b\\\x9a\xd8r\xe8rL\xf4M\x1fȗ鲎\xd0O闸\xebHe\x9c\xddԫ\x1a\xee\xb3\xe2\xc0\x0e!\xf6\x8dQljx\xf0po:t\xf7F\xf0\x7fo@bZ\xaaD\xec\xc7Zp)\x96S\xe3\xc2\xda\xc5\xc3(s7\xfa\xb5\xb0\xdd\xdb\x1em\xea`\"1ySK6\xaf\a\xb4\x81\xc1,\xb9\xd4\x1fB\x92=\xfe%\x96AI\n\x9a\x89\xbe\xa4\xfd|\x1fͲ\x9c䗃\x11\x9c^N0=&\x9bi~G-ړuXB\x145\xc1\xf7\xa1\xa4\x83>v\xf3\x9c\x15|\xc1ׅ\xdbG\x0eIY\xb3\xae_\x9f\x1b\xb3\x01\xe5{\xb3'?+wZY\xb1\xca߰K\xa9\xbe\x10\xe8!\x10p\xf4>\xed\xedL!3\x90\xa9\x92\xcflH\xb1[@\xb3\x88\xe7\xc1\xb7!\x7f\xf0MJl\xb4\xec\x13\x0e\xcd\x1e\xf2\x14\\\v\x01o\xf7\xba\x9c\xb9x}\x88\xd0r\xf2\x97\xf4\xbf9'\xb9!\xc6\xc5\xdcUF\xb5\xf8\x902.1\xbe\xbc_\x03\xca\xe8\x9c\xd99܀r\x9c{\x17_\xc3lNө\x19G\xed+u(j\xba\xfe\xbd\x01\x9a9\xa4=y=\xa0\xbf\xb5\r\xf0j\xa6*\x7f\x95\x19v\xa7[\xae\xf7o\xff\x01\xe7+UFw\x03I\xbb+\xa5\x05\xce>D\xcab\xf7\xcaH/\xfe\xf3\x98\x11\xb2\xbd\xb4\x1d5\xe3j5\xc6?\"\xf3\x1anBXl\xf6\xec2\x87o.\xca\x13\rl\xf6c\xc1\x7f\a\x00\x00\xff\xff\xb1J-\xe7\xa6\v\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VA\x93\xdb6\x0f\xbd\xfbW`&\x87\xbdDr\xf2}\x97\x8e/\x9d̦\x87L\x93f'N\xf7N\x8b\x90\x8d\x9a\"U\x10\xd4\xc6\xfd\xf5\x1d\x90\xd2\xdak\xcb\xc9n\xa7\xd5\xc5c\n\x04\x1f\xde\xc3\x03UU\xd5\xc2\xf4t\x8f\x1c)\xf8\x15\x98\x9e\xf0\x9b\xa0\xd7\x7f\xb1\xde\xff\x14k\n\xcb\xe1\xedbOޮ\xe06E\t\xdd\x17\x8c!q\x83\xef\xb1%OB\xc1/:\x14c\x8d\x98\xd5\x02\xc0x\x1f\xc4\xe8rԿ\x00M\xf0\xc2\xc19\xe4j\x8b\xbeާ\rn\x129\x8b\x9c\x93OG\x0fo\xea\xb7\xff\xab\xdf,\x00\xbc\xe9p\x05Cp\xa9\xc3\xe8M\x1fwA\\hJ\xcez@\x87\x1cj\n\x8b\xd8c\xa3Gl9\xa4~\x05\xc7\x17%\xc5x|\x81~\x9f\xb3\xad\xc7l\x1f\xc7l9\xc0Q\x94_\xbf\x13\xf4\x91\xa2\xe4\xc0\xde%6\xee*\xb2\x1c\x13w\x81\xe5\xb7\xe3\xe9\x15\fѕ7\xe4\xb7\xc9\x19\xbe\xb6\x7f\x01\x10\x9b\xd0\xe3\n\xf2\xf6\xde4h\x17\x00#?9]5Q\xf3\xb6dlvؙr\x0e@\xe8ѿ\xbb\xfbp\xff\xff\xf5\x93e\x00\x8b\xb1a\xea%\xb3<_\"P\x04\x03\x13\x12x\xd8!#\xdcg>!J`\x8c#\xe8Ǥ\x00\x13\xfeX?.\xf6\x1czd\xa1\xa9\xf8\xf2\x9c\xf4\xd7\xc9\xea\x19\xae\x1b\x85^\xa2\xc0jca\x04\xd9\xe1T>ڱZ\b-Ȏ\"0\xf6\x8c\x11\xbd\x1c\x85<>\xa1\x05\xe3!l\xfe\xc0FjX#k\x1a\xd5&9\xab\xfd8 \v06a\xeb\xe9\xaf\xc7\xdc\x11$\xe4C\x9d\x11\x1c5?>\xe4\x05\xd9\x1b\a\x83q\t_\x83\xf1\x16:s\x00F=\x05\x92?ɗCb\r\x9f\x02#\x90o\xc3\nv\"}\\-\x97[\x92\xc9WM\xe8\xba\xe4I\x0e\xcbl\x11\xda$\t\x1c\x97\x16\at\xcbH\xdb\xcap\xb3#\xc1F\x12\xe3\xd2\xf4Te\xe8\xbe\xf8\xa0\xb3\xafxtb\xbcy\x82U\x0e\xdaEQ\x98\xfc\xf6\xe4E6\xc2w\x14P\x0f\x94F([K\x15G\xa2uI\xd9\xf9\xf2\xcb\xfa+LGg1\xce\xd9ϼ\x1f7ƣ\x04J\x18\xf9\x16\xb9\x88\xd8r\xe8rN\xf4\xb6\x0f\xe4%\xffi\x1c\xa1?\xa7?\xa6MG\xa2\xba\xff\x990\x8ajU\xc3m\x1e6\xb0AH\xbd5\x82\xb6\x86\x0f\x1enM\x87\xee\xd6D\xfc\xcf\x05P\xa6c\xa5\xc4>O\x82\xd39y\x1e\\X;5\xd88ޮ\xe85\xef\xe4u\x8f\xcd\x13\x03i\x16jitv\x1b\xf8\x8cW3\xf9|>_\xfd$|\xde\xe0P\x86|K\xdb\xf3U\x00cm\xbe\"\x8c\xbb\xbb\xba\xf7;\x84\xcd\xd4}\x9bO\xd2Fm\x03+\xa2\x81,r5\xd59\"I<\x16L\xe8l\xac/R^\xe1<\x97\xc2hUc\xe3.\x81>E\xf2\x18\x98\xef8C\xbeP~L\x90[\x8f\xbbq\xc6zAo\xf3P\xbf@\x13r\x0fG\xb4\xf0@\xb2+\xe6p\xa7\x97\xd4\xf3T\xd0g\x8f\x87\xb9\xe53\xec_w\xa8\x91e\x9c\"Dl\x18EqDtj^uf\r\xf0)\xc5l/3\x9b\x11tD\x90\x9dv\xef\xf1pI4\xfcH\xdc\xf1\xbe\xff1\xe4\x1b\xbd\x17'\xc0\x8c-2z\x99\xb5\xb8~b\xb0G\xc1\xecr\x1b\x9a\xa8\x06o\xb0\x97\xb8\f\x03\xf2@\xf8\xb0|\b\xbc'\xbf\xad\x94\xf0\xaa4B\\\xe6\xef\x86\xe5\xab\xfcs\xa5䯟\xdf\x7f^\xc1;k!\xc8\x0eYUk\x93\x9b\x1a\xed\xe4\xb6{\x9d'\xeekHd\x7f\xbe\xf9'\xbc\x84\xbe8\xe7\x19ܬs\xf7\x1f\xf4\xe6Π\x94\xa2uQ%0\xe8\xdcT\xb1\xbbQ\xcd2\x1f\xe6\x1aq´\t\xc1\xa1\xb9l=\x9d\xbe\xc4h/!Uz\xc2Kl\x06\xf0\xad:\nUu\xa6\xafJ\xb4\x91\xd0Qs\x16=\xf9\xfc\a\x96\xbc\x1b\xc3t<(\aӶ\xa9m\xcaWL\xfe\xa61[\xbc6\x16f\x14\x99/\xbcz<\xe0Y\x03]\x8c\xa4\xf8\U000917b7\x8d\x91\x9bq\xac7\x89\xb5\xfdǜ3\x9f?\xff\xceX\xefw&\xcex\xf3\x19\xa8\xeft\xe7$\x83\xa3\x16\x9bC\xe3\xb0$\x84\xd0\xce\xf4ދ \xeb\x83>us\x8d\xf8n0\xe4\xcc\xc6\xe1̻߽\xb9\xfa\xf6\xaa\xf8\xb3z^,F\xfdƱ+\x10N%\xf7\xd8e\xe3\xca\xdf\x01\x00\x00\xff\xff\xec\xa0\xe0\xa1k\r\x00\x00"), diff --git a/internal/hook/hook_tracker.go b/internal/hook/hook_tracker.go new file mode 100644 index 0000000000..f4e2bb817e --- /dev/null +++ b/internal/hook/hook_tracker.go @@ -0,0 +1,137 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hook + +import "sync" + +const ( + HookSourceAnnotation = "annotation" + HookSourceSpec = "spec" +) + +// hookTrackerKey identifies a backup/restore hook +type hookTrackerKey struct { + // PodNamespace indicates the namespace of pod where hooks are executed. + // For hooks specified in the backup/restore spec, this field is the namespace of an applicable pod. + // For hooks specified in pod annotation, this field is the namespace of pod where hooks are annotated. + podNamespace string + // PodName indicates the pod where hooks are executed. + // For hooks specified in the backup/restore spec, this field is an applicable pod name. + // For hooks specified in pod annotation, this field is the pod where hooks are annotated. + podName string + // HookPhase is only for backup hooks, for restore hooks, this field is empty. + hookPhase hookPhase + // HookName is only for hooks specified in the backup/restore spec. + // For hooks specified in pod annotation, this field is empty or "". + hookName string + // HookSource indicates where hooks come from. + hookSource string + // Container indicates the container hooks use. + // For hooks specified in the backup/restore spec, the container might be the same under different hookName. + container string +} + +// hookTrackerVal records the execution status of a specific hook. +// hookTrackerVal is extensible to accommodate additional fields as needs develop. +type hookTrackerVal struct { + // HookFailed indicates if hook failed to execute. + hookFailed bool + // hookExecuted indicates if hook already execute. + hookExecuted bool +} + +// HookTracker tracks all hooks' execution status +type HookTracker struct { + lock *sync.RWMutex + tracker map[hookTrackerKey]hookTrackerVal +} + +// NewHookTracker creates a hookTracker. +func NewHookTracker() *HookTracker { + return &HookTracker{ + lock: &sync.RWMutex{}, + tracker: make(map[hookTrackerKey]hookTrackerVal), + } +} + +// Add adds a hook to the tracker +func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase hookPhase) { + ht.lock.Lock() + defer ht.lock.Unlock() + + key := hookTrackerKey{ + podNamespace: podNamespace, + podName: podName, + hookSource: source, + container: container, + hookPhase: hookPhase, + hookName: hookName, + } + + if _, ok := ht.tracker[key]; !ok { + ht.tracker[key] = hookTrackerVal{ + hookFailed: false, + hookExecuted: false, + } + } +} + +// Record records the hook's execution status +func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase hookPhase, hookFailed bool) { + ht.lock.Lock() + defer ht.lock.Unlock() + + key := hookTrackerKey{ + podNamespace: podNamespace, + podName: podName, + hookSource: source, + container: container, + hookPhase: hookPhase, + hookName: hookName, + } + + if _, ok := ht.tracker[key]; ok { + ht.tracker[key] = hookTrackerVal{ + hookFailed: hookFailed, + hookExecuted: true, + } + } +} + +// Stat calculates the number of attempted hooks and failed hooks +func (ht *HookTracker) Stat() (hookAttemptedCnt int, hookFailed int) { + ht.lock.RLock() + defer ht.lock.RUnlock() + + for _, hookInfo := range ht.tracker { + if hookInfo.hookExecuted { + hookAttemptedCnt++ + if hookInfo.hookFailed { + hookFailed++ + } + } + } + return +} + +// GetTracker gets the tracker inside HookTracker +func (ht *HookTracker) GetTracker() map[hookTrackerKey]hookTrackerVal { + ht.lock.RLock() + defer ht.lock.RUnlock() + + return ht.tracker +} diff --git a/internal/hook/hook_tracker_test.go b/internal/hook/hook_tracker_test.go new file mode 100644 index 0000000000..d104cc91d9 --- /dev/null +++ b/internal/hook/hook_tracker_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hook + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewHookTracker(t *testing.T) { + tracker := NewHookTracker() + + assert.NotNil(t, tracker) + assert.Empty(t, tracker.tracker) +} + +func TestHookTracker_Add(t *testing.T) { + tracker := NewHookTracker() + + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + + key := hookTrackerKey{ + podNamespace: "ns1", + podName: "pod1", + container: "container1", + hookPhase: PhasePre, + hookSource: HookSourceAnnotation, + hookName: "h1", + } + + _, ok := tracker.tracker[key] + assert.True(t, ok) +} + +func TestHookTracker_Record(t *testing.T) { + tracker := NewHookTracker() + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true) + + key := hookTrackerKey{ + podNamespace: "ns1", + podName: "pod1", + container: "container1", + hookPhase: PhasePre, + hookSource: HookSourceAnnotation, + hookName: "h1", + } + + info := tracker.tracker[key] + assert.True(t, info.hookFailed) +} + +func TestHookTracker_Stat(t *testing.T) { + tracker := NewHookTracker() + + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", PhasePre) + tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true) + + attempted, failed := tracker.Stat() + assert.Equal(t, 1, attempted) + assert.Equal(t, 1, failed) +} + +func TestHookTracker_Get(t *testing.T) { + tracker := NewHookTracker() + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + + tr := tracker.GetTracker() + assert.NotNil(t, tr) + + t.Logf("tracker :%+v", tr) +} diff --git a/internal/hook/item_hook_handler.go b/internal/hook/item_hook_handler.go index 38c982c550..9075bc50fa 100644 --- a/internal/hook/item_hook_handler.go +++ b/internal/hook/item_hook_handler.go @@ -82,6 +82,7 @@ type ItemHookHandler interface { obj runtime.Unstructured, resourceHooks []ResourceHook, phase hookPhase, + hookTracker *HookTracker, ) error } @@ -200,6 +201,7 @@ func (h *DefaultItemHookHandler) HandleHooks( obj runtime.Unstructured, resourceHooks []ResourceHook, phase hookPhase, + hookTracker *HookTracker, ) error { // We only support hooks on pods right now if groupResource != kuberesource.Pods { @@ -221,15 +223,21 @@ func (h *DefaultItemHookHandler) HandleHooks( hookFromAnnotations = getPodExecHookFromAnnotations(metadata.GetAnnotations(), "", log) } if hookFromAnnotations != nil { + hookTracker.Add(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase) + hookLog := log.WithFields( logrus.Fields{ - "hookSource": "annotation", + "hookSource": HookSourceAnnotation, "hookType": "exec", "hookPhase": phase, }, ) + + hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, false) if err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "", hookFromAnnotations); err != nil { hookLog.WithError(err).Error("Error executing hook") + hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, true) + if hookFromAnnotations.OnError == velerov1api.HookErrorModeFail { return err } @@ -240,6 +248,8 @@ func (h *DefaultItemHookHandler) HandleHooks( labels := labels.Set(metadata.GetLabels()) // Otherwise, check for hooks defined in the backup spec. + // modeFailError records the error from the hook with "Fail" error mode + var modeFailError error for _, resourceHook := range resourceHooks { if !resourceHook.Selector.applicableTo(groupResource, namespace, labels) { continue @@ -251,21 +261,30 @@ func (h *DefaultItemHookHandler) HandleHooks( } else { hooks = resourceHook.Post } + for _, hook := range hooks { if groupResource == kuberesource.Pods { if hook.Exec != nil { - hookLog := log.WithFields( - logrus.Fields{ - "hookSource": "backupSpec", - "hookType": "exec", - "hookPhase": phase, - }, - ) - err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.Name, hook.Exec) - if err != nil { - hookLog.WithError(err).Error("Error executing hook") - if hook.Exec.OnError == velerov1api.HookErrorModeFail { - return err + hookTracker.Add(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase) + // The remaining hooks will only be executed if modeFailError is nil. + // Otherwise, execution will stop and only hook collection will occur. + if modeFailError == nil { + hookLog := log.WithFields( + logrus.Fields{ + "hookSource": HookSourceSpec, + "hookType": "exec", + "hookPhase": phase, + }, + ) + + hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, false) + err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.Name, hook.Exec) + if err != nil { + hookLog.WithError(err).Error("Error executing hook") + hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, true) + if hook.Exec.OnError == velerov1api.HookErrorModeFail { + modeFailError = err + } } } } @@ -273,7 +292,7 @@ func (h *DefaultItemHookHandler) HandleHooks( } } - return nil + return modeFailError } // NoOpItemHookHandler is the an itemHookHandler for the Finalize controller where hooks don't run @@ -285,6 +304,7 @@ func (h *NoOpItemHookHandler) HandleHooks( obj runtime.Unstructured, resourceHooks []ResourceHook, phase hookPhase, + hookTracker *HookTracker, ) error { return nil } @@ -514,6 +534,7 @@ func GroupRestoreExecHooks( resourceRestoreHooks []ResourceRestoreHook, pod *corev1api.Pod, log logrus.FieldLogger, + hookTrack *HookTracker, ) (map[string][]PodExecRestoreHook, error) { byContainer := map[string][]PodExecRestoreHook{} @@ -530,10 +551,11 @@ func GroupRestoreExecHooks( if hookFromAnnotation.Container == "" { hookFromAnnotation.Container = pod.Spec.Containers[0].Name } + hookTrack.Add(metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "", hookPhase("")) byContainer[hookFromAnnotation.Container] = []PodExecRestoreHook{ { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: *hookFromAnnotation, }, } @@ -554,7 +576,7 @@ func GroupRestoreExecHooks( named := PodExecRestoreHook{ HookName: rrh.Name, Hook: *rh.Exec, - HookSource: "backupSpec", + HookSource: HookSourceSpec, } // default to false if attr WaitForReady not set if named.Hook.WaitForReady == nil { @@ -564,6 +586,7 @@ func GroupRestoreExecHooks( if named.Hook.Container == "" { named.Hook.Container = pod.Spec.Containers[0].Name } + hookTrack.Add(metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, hookPhase("")) byContainer[named.Hook.Container] = append(byContainer[named.Hook.Container], named) } } diff --git a/internal/hook/item_hook_handler_test.go b/internal/hook/item_hook_handler_test.go index 0912b1bdd2..f8efb5f089 100644 --- a/internal/hook/item_hook_handler_test.go +++ b/internal/hook/item_hook_handler_test.go @@ -108,6 +108,7 @@ func TestHandleHooksSkips(t *testing.T) { }, } + hookTracker := NewHookTracker() for _, test := range tests { t.Run(test.name, func(t *testing.T) { podCommandExecutor := &velerotest.MockPodCommandExecutor{} @@ -118,7 +119,7 @@ func TestHandleHooksSkips(t *testing.T) { } groupResource := schema.ParseGroupResource(test.groupResource) - err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, PhasePre) + err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, PhasePre, hookTracker) assert.NoError(t, err) }) } @@ -485,7 +486,8 @@ func TestHandleHooks(t *testing.T) { } groupResource := schema.ParseGroupResource(test.groupResource) - err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, test.phase) + hookTracker := NewHookTracker() + err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, test.phase, hookTracker) if test.expectedError != nil { assert.EqualError(t, err, test.expectedError.Error()) @@ -861,7 +863,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -892,7 +894,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -933,7 +935,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -973,7 +975,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -1021,7 +1023,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -1140,7 +1142,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -1152,7 +1154,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { }, { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/bar"}, @@ -1164,7 +1166,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { }, { HookName: "hook2", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/aaa"}, @@ -1178,7 +1180,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container2": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container2", Command: []string{"/usr/bin/baz"}, @@ -1192,9 +1194,11 @@ func TestGroupRestoreExecHooks(t *testing.T) { }, }, } + + hookTracker := NewHookTracker() for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := GroupRestoreExecHooks(tc.resourceRestoreHooks, tc.pod, velerotest.NewLogger()) + actual, err := GroupRestoreExecHooks(tc.resourceRestoreHooks, tc.pod, velerotest.NewLogger(), hookTracker) assert.Nil(t, err) assert.Equal(t, tc.expected, actual) }) @@ -1983,3 +1987,494 @@ func TestValidateContainer(t *testing.T) { // noCommand string should return expected error as result. assert.Equal(t, expectedError, ValidateContainer([]byte(noCommand))) } + +func TestBackupHookTracker(t *testing.T) { + type podWithHook struct { + item runtime.Unstructured + hooks []ResourceHook + hookErrorsByContainer map[string]error + expectedPodHook *velerov1api.ExecHook + expectedPodHookError error + expectedError error + } + test1 := []struct { + name string + phase hookPhase + groupResource string + pods []podWithHook + hookTracker *HookTracker + expectedHookAttempted int + expectedHookFailed int + }{ + { + name: "a pod with spec hooks, no error", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 2, + expectedHookFailed: 0, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"pre-1a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"pre-1b"}, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "a pod with spec hooks and same container under different hook name, no error", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 4, + expectedHookFailed: 0, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"pre-1a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"pre-1b"}, + }, + }, + }, + }, + { + Name: "hook2", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"2a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "2b", + Command: []string{"2b"}, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "a pod with spec hooks, on error=fail", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 3, + expectedHookFailed: 2, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"1a"}, + OnError: velerov1api.HookErrorModeContinue, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"1b"}, + }, + }, + }, + }, + { + Name: "hook2", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "2", + Command: []string{"2"}, + OnError: velerov1api.HookErrorModeFail, + }, + }, + }, + }, + { + Name: "hook3", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "3", + Command: []string{"3"}, + }, + }, + }, + }, + }, + hookErrorsByContainer: map[string]error{ + "1a": errors.New("1a error, but continue"), + "2": errors.New("2 error, fail"), + }, + }, + }, + }, + { + name: "a pod with annotation and spec hooks", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 1, + expectedHookFailed: 0, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls" + } + } + }`), + expectedPodHook: &velerov1api.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + }, + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"1a"}, + OnError: velerov1api.HookErrorModeContinue, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"1b"}, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "a pod with annotation, on error=fail", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 1, + expectedHookFailed: 1, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls", + "hook.backup.velero.io/on-error": "Fail" + } + } + }`), + expectedPodHook: &velerov1api.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + OnError: velerov1api.HookErrorModeFail, + }, + expectedPodHookError: errors.New("pod hook error"), + }, + }, + }, + { + name: "two pods, one with annotation, the other with spec", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 3, + expectedHookFailed: 1, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls", + "hook.backup.velero.io/on-error": "Fail" + } + } + }`), + expectedPodHook: &velerov1api.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + OnError: velerov1api.HookErrorModeFail, + }, + expectedPodHookError: errors.New("pod hook error"), + }, + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"pre-1a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"pre-1b"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range test1 { + t.Run(test.name, func(t *testing.T) { + podCommandExecutor := &velerotest.MockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + h := &DefaultItemHookHandler{ + PodCommandExecutor: podCommandExecutor, + } + + groupResource := schema.ParseGroupResource(test.groupResource) + hookTracker := test.hookTracker + + for _, pod := range test.pods { + if pod.expectedPodHook != nil { + podCommandExecutor.On("ExecutePodCommand", mock.Anything, pod.item.UnstructuredContent(), "ns", "name", "", pod.expectedPodHook).Return(pod.expectedPodHookError) + } else { + hookLoop: + for _, resourceHook := range pod.hooks { + for _, hook := range resourceHook.Pre { + hookError := pod.hookErrorsByContainer[hook.Exec.Container] + podCommandExecutor.On("ExecutePodCommand", mock.Anything, pod.item.UnstructuredContent(), "ns", "name", resourceHook.Name, hook.Exec).Return(hookError) + if hookError != nil && hook.Exec.OnError == velerov1api.HookErrorModeFail { + break hookLoop + } + } + for _, hook := range resourceHook.Post { + hookError := pod.hookErrorsByContainer[hook.Exec.Container] + podCommandExecutor.On("ExecutePodCommand", mock.Anything, pod.item.UnstructuredContent(), "ns", "name", resourceHook.Name, hook.Exec).Return(hookError) + if hookError != nil && hook.Exec.OnError == velerov1api.HookErrorModeFail { + break hookLoop + } + } + } + } + h.HandleHooks(velerotest.NewLogger(), groupResource, pod.item, pod.hooks, test.phase, hookTracker) + + } + actualAtemptted, actualFailed := hookTracker.Stat() + assert.Equal(t, test.expectedHookAttempted, actualAtemptted) + assert.Equal(t, test.expectedHookFailed, actualFailed) + }) + } + +} + +func TestRestoreHookTrackerAdd(t *testing.T) { + testCases := []struct { + name string + resourceRestoreHooks []ResourceRestoreHook + pod *corev1api.Pod + hookTracker *HookTracker + expectedCnt int + }{ + { + name: "neither spec hooks nor annotations hooks are set", + resourceRestoreHooks: nil, + pod: builder.ForPod("default", "my-pod").Result(), + hookTracker: NewHookTracker(), + expectedCnt: 0, + }, + { + name: "a hook specified in pod annotation", + resourceRestoreHooks: nil, + pod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + podRestoreHookWaitForReadyAnnotationKey, "true", + )). + Containers(&corev1api.Container{ + Name: "container1", + }). + Result(), + hookTracker: NewHookTracker(), + expectedCnt: 1, + }, + { + name: "two hooks specified in restore spec", + resourceRestoreHooks: []ResourceRestoreHook{ + { + Name: "hook1", + Selector: ResourceHookSelector{}, + RestoreHooks: []velerov1api.RestoreResourceHook{ + { + Exec: &velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + { + Exec: &velerov1api.ExecRestoreHook{ + Container: "container2", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + }, + }, + }, + pod: builder.ForPod("default", "my-pod"). + Containers(&corev1api.Container{ + Name: "container1", + }, &corev1api.Container{ + Name: "container2", + }). + Result(), + hookTracker: NewHookTracker(), + expectedCnt: 2, + }, + { + name: "both spec hooks and annotations hooks are set", + resourceRestoreHooks: []ResourceRestoreHook{ + { + Name: "hook1", + Selector: ResourceHookSelector{}, + RestoreHooks: []velerov1api.RestoreResourceHook{ + { + Exec: &velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo2"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + }, + }, + }, + pod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + podRestoreHookWaitForReadyAnnotationKey, "true", + )). + Containers(&corev1api.Container{ + Name: "container1", + }). + Result(), + hookTracker: NewHookTracker(), + expectedCnt: 1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, _ = GroupRestoreExecHooks(tc.resourceRestoreHooks, tc.pod, velerotest.NewLogger(), tc.hookTracker) + tracker := tc.hookTracker.GetTracker() + assert.Equal(t, tc.expectedCnt, len(tracker)) + }) + } +} diff --git a/internal/hook/wait_exec_hook_handler.go b/internal/hook/wait_exec_hook_handler.go index 04ad967a75..452b8c421c 100644 --- a/internal/hook/wait_exec_hook_handler.go +++ b/internal/hook/wait_exec_hook_handler.go @@ -39,6 +39,7 @@ type WaitExecHookHandler interface { log logrus.FieldLogger, pod *v1.Pod, byContainer map[string][]PodExecRestoreHook, + hookTrack *HookTracker, ) []error } @@ -73,6 +74,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( log logrus.FieldLogger, pod *v1.Pod, byContainer map[string][]PodExecRestoreHook, + hookTracker *HookTracker, ) []error { if pod == nil { return nil @@ -164,6 +166,8 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( err := fmt.Errorf("hook %s in container %s expired before executing", hook.HookName, hook.Hook.Container) hookLog.Error(err) errors = append(errors, err) + hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + if hook.Hook.OnError == velerov1api.HookErrorModeFail { cancel() return @@ -175,10 +179,13 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( OnError: hook.Hook.OnError, Timeout: hook.Hook.ExecTimeout, } + hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), false) if err := e.PodCommandExecutor.ExecutePodCommand(hookLog, podMap, pod.Namespace, pod.Name, hook.HookName, eh); err != nil { hookLog.WithError(err).Error("Error executing hook") err = fmt.Errorf("hook %s in container %s failed to execute, err: %v", hook.HookName, hook.Hook.Container, err) errors = append(errors, err) + hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + if hook.Hook.OnError == velerov1api.HookErrorModeFail { cancel() return @@ -226,6 +233,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( "hookPhase": "post", }, ) + hookTracker.Record(pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) hookLog.Error(err) errors = append(errors, err) } diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index 3e809ccfa5..fe632d1138 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -98,7 +98,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -167,7 +167,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -236,7 +236,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -305,7 +305,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -391,7 +391,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -440,7 +440,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -471,7 +471,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -502,7 +502,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -533,7 +533,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -574,7 +574,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -584,7 +584,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container2": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container2", Command: []string{"/usr/bin/bar"}, @@ -744,7 +744,8 @@ func TestWaitExecHandleHooks(t *testing.T) { defer ctxCancel() } - errs := h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer) + hookTracker := NewHookTracker() + errs := h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer, hookTracker) // for i, ee := range test.expectedErrors { require.Len(t, errs, len(test.expectedErrors)) @@ -997,3 +998,253 @@ func TestMaxHookWait(t *testing.T) { }) } } + +func TestRestoreHookTrackerUpdate(t *testing.T) { + type change struct { + // delta to wait since last change applied or pod added + wait time.Duration + updated *v1.Pod + } + type expectedExecution struct { + hook *velerov1api.ExecHook + name string + error error + pod *v1.Pod + } + + hookTracker1 := NewHookTracker() + hookTracker1.Add("default", "my-pod", "container1", HookSourceAnnotation, "", hookPhase("")) + + hookTracker2 := NewHookTracker() + hookTracker2.Add("default", "my-pod", "container1", HookSourceSpec, "my-hook-1", hookPhase("")) + + hookTracker3 := NewHookTracker() + hookTracker3.Add("default", "my-pod", "container1", HookSourceSpec, "my-hook-1", hookPhase("")) + hookTracker3.Add("default", "my-pod", "container2", HookSourceSpec, "my-hook-2", hookPhase("")) + + tests1 := []struct { + name string + initialPod *v1.Pod + groupResource string + byContainer map[string][]PodExecRestoreHook + expectedExecutions []expectedExecution + hookTracker *HookTracker + expectedFailed int + }{ + { + name: "a hook executes successfully", + initialPod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + )). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }). + Result(), + groupResource: "pods", + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "", + HookSource: HookSourceAnnotation, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + }, + }, + expectedExecutions: []expectedExecution{ + { + name: "", + hook: &velerov1api.ExecHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + Timeout: metav1.Duration{Duration: time.Second}, + }, + error: nil, + pod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithResourceVersion("1")). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + )). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }). + Result(), + }, + }, + hookTracker: hookTracker1, + expectedFailed: 0, + }, + { + name: "a hook with OnError mode Fail failed to execute", + groupResource: "pods", + initialPod: builder.ForPod("default", "my-pod"). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + Result(), + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "my-hook-1", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeFail, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + }, + hookTracker: hookTracker2, + expectedFailed: 1, + }, + { + name: "a hook with OnError mode Continue failed to execute", + groupResource: "pods", + initialPod: builder.ForPod("default", "my-pod"). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + Result(), + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "my-hook-1", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + }, + hookTracker: hookTracker2, + expectedFailed: 1, + }, + { + name: "two hooks with OnError mode Continue failed to execute", + groupResource: "pods", + initialPod: builder.ForPod("default", "my-pod"). + Containers(&v1.Container{ + Name: "container1", + }). + Containers(&v1.Container{ + Name: "container2", + }). + // initially both are waiting + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container2", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + Result(), + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "my-hook-1", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + "container2": { + { + HookName: "my-hook-2", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container2", + Command: []string{"/usr/bin/bar"}, + OnError: velerov1api.HookErrorModeContinue, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + }, + hookTracker: hookTracker3, + expectedFailed: 2, + }, + } + + for _, test := range tests1 { + t.Run(test.name, func(t *testing.T) { + + source := fcache.NewFakeControllerSource() + go func() { + // This is the state of the pod that will be seen by the AddFunc handler. + source.Add(test.initialPod) + }() + + podCommandExecutor := &velerotest.MockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + h := &DefaultWaitExecHookHandler{ + PodCommandExecutor: podCommandExecutor, + ListWatchFactory: &fakeListWatchFactory{source}, + } + + for _, e := range test.expectedExecutions { + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.pod) + assert.Nil(t, err) + podCommandExecutor.On("ExecutePodCommand", mock.Anything, obj, e.pod.Namespace, e.pod.Name, e.name, e.hook).Return(e.error) + } + + ctx := context.Background() + _ = h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer, test.hookTracker) + _, actualFailed := test.hookTracker.Stat() + assert.Equal(t, test.expectedFailed, actualFailed) + }) + } +} diff --git a/pkg/apis/velero/v1/backup_types.go b/pkg/apis/velero/v1/backup_types.go index 628eda3938..8c14e7934b 100644 --- a/pkg/apis/velero/v1/backup_types.go +++ b/pkg/apis/velero/v1/backup_types.go @@ -441,6 +441,11 @@ type BackupStatus struct { // BackupItemAction operations for this backup which ended with an error. // +optional BackupItemOperationsFailed int `json:"backupItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` } // BackupProgress stores information about the progress of a Backup's execution. @@ -458,6 +463,19 @@ type BackupProgress struct { ItemsBackedUp int `json:"itemsBackedUp,omitempty"` } +// HookStatus stores information about the status of the hooks. +type HookStatus struct { + // HooksAttempted is the total number of attempted hooks + // Specifically, HooksAttempted represents the number of hooks that failed to execute + // and the number of hooks that executed successfully. + // +optional + HooksAttempted int `json:"hooksAttempted,omitempty"` + + // HooksFailed is the total number of hooks which ended with an error + // +optional + HooksFailed int `json:"hooksFailed,omitempty"` +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true diff --git a/pkg/apis/velero/v1/restore_types.go b/pkg/apis/velero/v1/restore_types.go index 3438d8ff44..fa529e7c9a 100644 --- a/pkg/apis/velero/v1/restore_types.go +++ b/pkg/apis/velero/v1/restore_types.go @@ -345,6 +345,11 @@ type RestoreStatus struct { // RestoreItemAction operations for this restore which ended with an error. // +optional RestoreItemOperationsFailed int `json:"restoreItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` } // RestoreProgress stores information about the restore's execution progress diff --git a/pkg/apis/velero/v1/zz_generated.deepcopy.go b/pkg/apis/velero/v1/zz_generated.deepcopy.go index 69d7278065..7688fc0252 100644 --- a/pkg/apis/velero/v1/zz_generated.deepcopy.go +++ b/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -419,6 +419,11 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { *out = new(BackupProgress) **out = **in } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. @@ -802,6 +807,21 @@ func (in *ExecRestoreHook) DeepCopy() *ExecRestoreHook { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HookStatus) DeepCopyInto(out *HookStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HookStatus. +func (in *HookStatus) DeepCopy() *HookStatus { + if in == nil { + return nil + } + out := new(HookStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InitRestoreHook) DeepCopyInto(out *InitRestoreHook) { *out = *in @@ -1362,6 +1382,11 @@ func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) { *out = new(RestoreProgress) **out = **in } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus. diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index ed68862365..70fbf01379 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -302,6 +302,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger, itemHookHandler: &hook.DefaultItemHookHandler{ PodCommandExecutor: kb.podCommandExecutor, }, + hookTracker: hook.NewHookTracker(), } // helper struct to send current progress between the main @@ -427,8 +428,15 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger, updated.Status.Progress.TotalItems = len(backupRequest.BackedUpItems) updated.Status.Progress.ItemsBackedUp = len(backupRequest.BackedUpItems) + // update the hooks execution status + if updated.Status.HookStatus == nil { + updated.Status.HookStatus = &velerov1api.HookStatus{} + } + updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed = itemBackupper.hookTracker.Stat() + log.Infof("hookTracker: %+v, hookAttempted: %d, hookFailed: %d", itemBackupper.hookTracker.GetTracker(), updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed) + if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil { - log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress") + log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress and hook status") } skippedPVSummary, _ := json.Marshal(backupRequest.SkippedPVTracker.Summary()) log.Infof("Summary for skipped PVs: %s", skippedPVSummary) @@ -598,6 +606,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(log logrus.FieldLogger, discoveryHelper: kb.discoveryHelper, itemHookHandler: &hook.NoOpItemHookHandler{}, podVolumeSnapshotTracker: newPVCSnapshotTracker(), + hookTracker: hook.NewHookTracker(), } updateFiles := make(map[string]FileForArchive) backedUpGroupResources := map[schema.GroupResource]bool{} diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index ae8074521b..01258a4aa3 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -78,6 +78,7 @@ type itemBackupper struct { itemHookHandler hook.ItemHookHandler snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter + hookTracker *hook.HookTracker } type FileForArchive struct { @@ -184,7 +185,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti ) log.Debug("Executing pre hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePre); err != nil { + if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePre, ib.hookTracker); err != nil { return false, itemFiles, err } if optedOut, podName := ib.podVolumeSnapshotTracker.OptedoutByPod(namespace, name); optedOut { @@ -234,7 +235,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti // if there was an error running actions, execute post hooks and return log.Debug("Executing post hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost); err != nil { + if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost, ib.hookTracker); err != nil { backupErrs = append(backupErrs, err) } return false, itemFiles, kubeerrs.NewAggregate(backupErrs) @@ -293,7 +294,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti } log.Debug("Executing post hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost); err != nil { + if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost, ib.hookTracker); err != nil { backupErrs = append(backupErrs, err) } diff --git a/pkg/cmd/util/output/backup_describer.go b/pkg/cmd/util/output/backup_describer.go index b47d732d3b..324f7aee81 100644 --- a/pkg/cmd/util/output/backup_describer.go +++ b/pkg/cmd/util/output/backup_describer.go @@ -392,6 +392,12 @@ func DescribeBackupStatus(ctx context.Context, kbClient kbclient.Client, d *Desc } d.Printf("Velero-Native Snapshots: \n") + + if status.HookStatus != nil { + d.Println() + d.Printf("HooksAttempted:\t%d\n", status.HookStatus.HooksAttempted) + d.Printf("HooksFailed:\t%d\n", status.HookStatus.HooksFailed) + } } func describeBackupItemOperations(ctx context.Context, kbClient kbclient.Client, d *Describer, backup *velerov1api.Backup, details bool, insecureSkipTLSVerify bool, caCertPath string) { diff --git a/pkg/cmd/util/output/backup_structured_describer.go b/pkg/cmd/util/output/backup_structured_describer.go index 4a69fc057c..e7de9f776a 100644 --- a/pkg/cmd/util/output/backup_structured_describer.go +++ b/pkg/cmd/util/output/backup_structured_describer.go @@ -303,6 +303,11 @@ func DescribeBackupStatusInSF(ctx context.Context, kbClient kbclient.Client, d * backupStatusInfo["veleroNativeSnapshotsDetail"] = snapshotDetails return } + + if status.HookStatus != nil { + backupStatusInfo["hooksAttempted"] = status.HookStatus.HooksAttempted + backupStatusInfo["hooksFailed"] = status.HookStatus.HooksFailed + } } func describeBackupResourceListInSF(ctx context.Context, kbClient kbclient.Client, backupStatusInfo map[string]interface{}, backup *velerov1api.Backup, insecureSkipTLSVerify bool, caCertPath string) { diff --git a/pkg/cmd/util/output/restore_describer.go b/pkg/cmd/util/output/restore_describer.go index c64c1a88a5..3aed968c5e 100644 --- a/pkg/cmd/util/output/restore_describer.go +++ b/pkg/cmd/util/output/restore_describer.go @@ -180,6 +180,12 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel d.Println() describeRestoreItemOperations(ctx, kbClient, d, restore, details, insecureSkipTLSVerify, caCertFile) + if restore.Status.HookStatus != nil { + d.Println() + d.Printf("HooksAttempted: \t%d\n", restore.Status.HookStatus.HooksAttempted) + d.Printf("HooksFailed: \t%d\n", restore.Status.HookStatus.HooksFailed) + } + if details { describeRestoreResourceList(ctx, kbClient, d, restore, insecureSkipTLSVerify, caCertFile) d.Println() diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index ea0af47c97..7db2f6d4d8 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -325,6 +325,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( resourceModifiers: req.ResourceModifiers, disableInformerCache: req.DisableInformerCache, featureVerifier: kr.featureVerifier, + hookTracker: hook.NewHookTracker(), } return restoreCtx.execute() @@ -377,6 +378,7 @@ type restoreContext struct { resourceModifiers *resourcemodifiers.ResourceModifiers disableInformerCache bool featureVerifier features.Verifier + hookTracker *hook.HookTracker } type resourceClientKey struct { @@ -629,11 +631,6 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { updated.Status.Progress.TotalItems = len(ctx.restoredItems) updated.Status.Progress.ItemsRestored = len(ctx.restoredItems) - err = kube.PatchResource(ctx.restore, updated, ctx.kbClient) - if err != nil { - ctx.log.WithError(errors.WithStack((err))).Warn("Updating restore status.progress") - } - // Wait for all of the pod volume restore goroutines to be done, which is // only possible once all of their errors have been received by the loop // below, then close the podVolumeErrs channel so the loop terminates. @@ -668,6 +665,19 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { } ctx.log.Info("Done waiting for all post-restore exec hooks to complete") + // update hooks execution status + if updated.Status.HookStatus == nil { + updated.Status.HookStatus = &velerov1api.HookStatus{} + } + updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed = ctx.hookTracker.Stat() + ctx.log.Infof("hookTracker: %+v, hookAttempted: %d, hookFailed: %d", ctx.hookTracker.GetTracker(), updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed) + + // patch the restore status + err = kube.PatchResource(ctx.restore, updated, ctx.kbClient) + if err != nil { + ctx.log.WithError(errors.WithStack((err))).Warn("Updating restore status") + } + return warnings, errs } @@ -1963,6 +1973,7 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) { ctx.resourceRestoreHooks, pod, ctx.log, + ctx.hookTracker, ) if err != nil { ctx.log.WithError(err).Errorf("error getting exec hooks for pod %s/%s", pod.Namespace, pod.Name) @@ -1970,7 +1981,7 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) { return } - if errs := ctx.waitExecHookHandler.HandleHooks(ctx.hooksContext, ctx.log, pod, execHooksByContainer); len(errs) > 0 { + if errs := ctx.waitExecHookHandler.HandleHooks(ctx.hooksContext, ctx.log, pod, execHooksByContainer, ctx.hookTracker); len(errs) > 0 { ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully execute post-restore hooks") ctx.hooksCancelFunc()