diff --git a/changelogs/unreleased/6958-blackpiglet b/changelogs/unreleased/6958-blackpiglet new file mode 100644 index 0000000000..7b402258fb --- /dev/null +++ b/changelogs/unreleased/6958-blackpiglet @@ -0,0 +1 @@ +Change controller-runtime List option from MatchingFields to ListOptions \ No newline at end of file diff --git a/changelogs/unreleased/7069-27149chen b/changelogs/unreleased/7069-27149chen new file mode 100644 index 0000000000..243596d4ad --- /dev/null +++ b/changelogs/unreleased/7069-27149chen @@ -0,0 +1 @@ +improve discoveryHelper.Refresh() in restore \ No newline at end of file diff --git a/changelogs/unreleased/7117-allenxu404 b/changelogs/unreleased/7117-allenxu404 new file mode 100644 index 0000000000..2cfc179b2f --- /dev/null +++ b/changelogs/unreleased/7117-allenxu404 @@ -0,0 +1 @@ +Add hooks status to backup/restore CR \ No newline at end of file diff --git a/changelogs/unreleased/7130-qiuming-best b/changelogs/unreleased/7130-qiuming-best new file mode 100644 index 0000000000..f6f6c6f74f --- /dev/null +++ b/changelogs/unreleased/7130-qiuming-best @@ -0,0 +1 @@ +Node agent restart enhancement diff --git a/config/crd/v1/bases/velero.io_backups.yaml b/config/crd/v1/bases/velero.io_backups.yaml index 84769dcba9..41ce2ada8b 100644 --- a/config/crd/v1/bases/velero.io_backups.yaml +++ b/config/crd/v1/bases/velero.io_backups.yaml @@ -35,14 +35,6 @@ spec: spec: description: BackupSpec defines the specification for a Velero backup. properties: - backupConfig: - description: BackupConfig defines the configuration for the backup. - properties: - parallelFilesUpload: - description: ParallelFilesUpload is the number of files parallel - uploads to perform when using the uploader. - type: integer - type: object csiSnapshotTimeout: description: CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to ReadyToUse during creation, before @@ -485,6 +477,14 @@ spec: description: TTL is a time.Duration-parseable string describing how long the Backup should be retained for. type: string + uploaderConfig: + description: UploaderConfig specifies the configuration for the uploader. + properties: + parallelFilesUpload: + description: ParallelFilesUpload is the number of files parallel + uploads to perform when using the uploader. + type: integer + type: object volumeSnapshotLocations: description: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. @@ -543,6 +543,22 @@ spec: description: FormatVersion is the backup format version, including major, minor, and patch version. type: string + hookStatus: + description: HookStatus contains information about the status of the + hooks. + nullable: true + properties: + hooksAttempted: + description: HooksAttempted is the total number of attempted hooks + Specifically, HooksAttempted represents the number of hooks + that failed to execute and the number of hooks that executed + successfully. + type: integer + hooksFailed: + description: HooksFailed is the total number of hooks which ended + with an error + type: integer + type: object phase: description: Phase is the current state of the Backup. enum: diff --git a/config/crd/v1/bases/velero.io_restores.yaml b/config/crd/v1/bases/velero.io_restores.yaml index 3802c8aa6f..76344e27ff 100644 --- a/config/crd/v1/bases/velero.io_restores.yaml +++ b/config/crd/v1/bases/velero.io_restores.yaml @@ -387,14 +387,6 @@ spec: - name type: object x-kubernetes-map-type: atomic - restoreConfig: - description: RestoreConfig specifies the configuration for the restore. - properties: - writeSparseFiles: - description: WriteSparseFiles is a flag to indicate whether write - files sparsely or not. - type: boolean - type: object restorePVs: description: RestorePVs specifies whether to restore all included PVs from snapshot @@ -426,6 +418,14 @@ spec: restore from the most recent successful backup created from this schedule. type: string + uploaderConfig: + description: UploaderConfig specifies the configuration for the restore. + properties: + writeSparseFiles: + description: WriteSparseFiles is a flag to indicate whether write + files sparsely or not. + type: boolean + type: object required: - backupName type: object @@ -448,6 +448,22 @@ spec: description: FailureReason is an error that caused the entire restore to fail. type: string + hookStatus: + description: HookStatus contains information about the status of the + hooks. + nullable: true + properties: + hooksAttempted: + description: HooksAttempted is the total number of attempted hooks + Specifically, HooksAttempted represents the number of hooks + that failed to execute and the number of hooks that executed + successfully. + type: integer + hooksFailed: + description: HooksFailed is the total number of hooks which ended + with an error + type: integer + type: object phase: description: Phase is the current state of the Restore enum: diff --git a/config/crd/v1/bases/velero.io_schedules.yaml b/config/crd/v1/bases/velero.io_schedules.yaml index 4d2f57cc41..76d8c2884a 100644 --- a/config/crd/v1/bases/velero.io_schedules.yaml +++ b/config/crd/v1/bases/velero.io_schedules.yaml @@ -65,14 +65,6 @@ spec: description: Template is the definition of the Backup to be run on the provided schedule properties: - backupConfig: - description: BackupConfig defines the configuration for the backup. - properties: - parallelFilesUpload: - description: ParallelFilesUpload is the number of files parallel - uploads to perform when using the uploader. - type: integer - type: object csiSnapshotTimeout: description: CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to ReadyToUse during creation, @@ -522,6 +514,15 @@ spec: description: TTL is a time.Duration-parseable string describing how long the Backup should be retained for. type: string + uploaderConfig: + description: UploaderConfig specifies the configuration for the + uploader. + properties: + parallelFilesUpload: + description: ParallelFilesUpload is the number of files parallel + uploads to perform when using the uploader. + type: integer + type: object volumeSnapshotLocations: description: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. diff --git a/config/crd/v1/crds/crds.go b/config/crd/v1/crds/crds.go index 1c1cc08cac..3acbadc8ee 100644 --- a/config/crd/v1/crds/crds.go +++ b/config/crd/v1/crds/crds.go @@ -30,14 +30,14 @@ import ( var rawCRDs = [][]byte{ []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VAo\xe46\x0f\xbdϯ \xf6;\xec\xe5\xb3g\xb7\xbd\x14\xbem\xd3\x16\b\x9a\x04A\x12\xe4Nۜ\x19mdI\x95\xa8I\xa7E\xff{A\xc9\xcexl'\xb3Y\xa0\xbaY\xa2\x1e\xc9G>ZEQ\xacЩG\xf2AYS\x01:E\x7f2\x19\xf9\n\xe5\xd3O\xa1Tv\xbd\xff\xbczR\xa6\xad\xe0\"\x06\xb6\xdd\x1d\x05\x1b}C\xbf\xd0F\x19\xc5ʚUG\x8c-2V+\x004\xc62\xcav\x90O\x80\xc6\x1a\xf6Vk\xf2ŖL\xf9\x14k\xaa\xa3\xd2-\xf9\x04>\xb8\xde\x7f*?\xffP~Z\x01\x18쨂\x1a\x9b\xa7\xe8<9\x1b\x14[\xaf(\x94{\xd2\xe4m\xa9\xec*8j\x04}\xebmt\x15\x1c\x0f\xf2\xed\xdes\x8e\xfa\xe7\x04t7\x00\x1dґV\x81\x7f_<\xbeR\x81\x93\x89\xd3ѣ^\n$\x1d\ae\xb6Q\xa3\x9f\x19\x88\x83\xd0XG\x15\xdcH,\x0e\x1bjW\x00}\xa6)\xb6\x02\xb0m\x13w\xa8o\xbd2L\xfe\xc2\xea\xd8\r\x9c\x15\xf05Xs\x8b\xbc\xab\xa0\x1c\xd8-\x1bO\x89\xd8\a\xd5Q`\xec\\\xb2\x1d\b\xfb\xb2\xa5\xfe\x9b\x0f\xe2\xbcE\xa69\x980W\x1ec}88:A9\x12\x01\xa3\xb3\x8c\x18\xd8+\xb3]\x1d\x8d\xf7\x9f3\x15͎:\xacz[\xeb\xc8|\xb9\xbd|\xfc\xf1\xfed\x1b\xc0y\xebȳ\x1aʓר\xfdF\xbb\x00-\x85\xc6+ǩ9>\n`\xb6\x82V\xfa\x8e\x02\xf0\x8e\x06N\xa9\xedc\x00\xbb\x01ީ\x00\x9e\x9c\xa7@&w\xe2\t0\x88\x11\x1a\xb0\xf5Wj\xb8\x84{\xf2\x02\x03ag\xa3n\xa5]\xf7\xe4\x19<5vk\xd4_/\xd8\x01\xd8&\xa7\x1a\x99\xfa\x1e9\xaeTC\x83\x1a\xf6\xa8#\xfd\x1fд\xd0\xe1\x01<\x89\x17\x88f\x84\x97LB\t\xd7\xd6\x13(\xb3\xb1\x15\xec\x98]\xa8\xd6\xeb\xad\xe2Av\x8d\xed\xbah\x14\x1f\xd6IA\xaa\x8el}X\xb7\xb4'\xbd\x0ej[\xa0ov\x8a\xa9\xe1\xe8i\x8dN\x15)t\x93\xa4Wv\xed\xff|/\xd4\xf0\xf1$\xd6Y-\xf3Jby\xa3\x02\xa2\x16P\x01\xb0\xbf\x9a\xb38\x12-[\xc2\xceݯ\xf7\x0f0\xb8NŘ\xb2\x9fx?^\f\xc7\x12\ba\xcal\xc8\xe7\"n\xbc\xed\x12&\x99\xd6Ye8}4Z\x91\x99\xd2\x1fb\xdd)\x96\xba\xff\x11)\xb0Ԫ\x84\x8b4\x8b\xa0&\x88N\xd4Жpi\xe0\x02;\xd2\x17\x18\xe8?/\x800\x1d\n!\xf6\xdbJ0\x1e\xa3S\xe3\xcc\xda\xe8`\x18\x81\xaf\xd4k:\xd6\xee\x1d5R>aP\xae\xaa\x8dj\x926`c=\xe0̾<\x81^\x96\xae\xac<\xfc\xee\xd9z\xdcҕ͘S\xa3\xc5\xd8&w\x86\xe0d\xb2d\x19Ӳ\xe1\f\x1b\x80w\xc8#\xfd2*\xf32\x06\x16\xf3y\xa3\b\xa9\x10(r6h\x1a\xfa-u\x94i\x0egr\xba^\xb8\")\xed\xec3\xd8\r\x93\x19\x83\xf6\xb1.dR\x13\xf8h\xde\x15\xec\xe90?\x13\xe6݉1(\xd3J\x1b\xf4\xd3T\x9c\f\xd4K]ɴ\xe0O\xff\x9b\xe3E&vsw\x05\x13\x92\x83\xce\x14+\r\x8e\xdd!F\x98&\x94|\xc1a\x11\xe5\xa9L̑\x1a\xa2\xa0T\xa0A\x18M\xcc\x11HFKS) rO~\xaev\xa0\x04\x18\xd05hB2^i\x03\x8ahC\r\x10j\b%\xa5d\xc2\x10&\x88a\x05\x90?\xbd\xbd\xdb\x12\xb9\xfb\x1d2\xa3\t\x159\xa1ZˌQ\x0399I^\x15\xe0\xda\xfeySC-\x95,A\x19\x16\xe8\xecJKxZ_{\xc3{i)\xe0j\x91\xdcJ\r\xb8ax*B\xee\x89f\xc7c\x8eL7\xc3E9\xea\x00&\xb6\x12\x15\x1e\xf9\r\xb9\ae\xc1\x10}\x94\x15ϭ\xb0\x9d@Y\x82e\xf2 \xd8?jؚ\x18\x89\x9drj\xc0\v@S\x980\xa0\x04\xe5\xe4Dy\x057H\x92\x82\x9e\x89\x02\xdb\v\xa9D\v\x1eV\xd1\x1b\xf2\xabT@\x98\xd8\xcb7\xe4hL\xa9\u07fczu`&L\x9aL\x16E%\x989\xbfB\xf9g\xbb\xcaH\xa5_\xe5p\x02\xfeJ\xb3Ú\xaa\xec\xc8\fd\x96\x91\xafh\xc9ֈ\xba\xc0\x89\xb3)\xf2\xff\x17\x04@\xbf\xec\xe0j\xceV\x18\xb5QL\x1cZ?\xa0\xd4Op\xc0N\x00'_\xae\xa9\x1bECh\xfb\xc9R\xe7\xf3\xfb\xfb\x87\xb6\xec1ݧ>ҽ%\x90\r\v,\xc1\x98\u0603rL\xdc+Y L\x10\xb9\x93>\x14]\xce@\xf4ɯ\xab]\xc1\x8c\xe5\xfb\xdf+\xd0V\xc8\xe5\x86ܢ&!; U\x99[\xc9ܐ\xad \xb7\xb4\x00~K5<;\x03,\xa5\xf5\xda\x126\x8d\x05m%د\xec\xa8\xd6\xfa!\xe8\xb2\x11~9\x85p_B֙0\xb6\x15۳\f\xa7\x05\xd9K\xd5\xe8\v\xa7\xae6\x1d\x90\xf1)k\x8b\xab}+Ş\x1d\xfa\xbfEQqU;\xc8d\xf8\xa9R\r2\xf6k\f\x8fi\\\xf0W\xaa(\xe7\xc0?0\x0e\xfa\xb7\x92K\x9aǪ\xf5P\xbb\x1b\xb6\xb22n\xd1\x10U\xb1\x03eu\xc5\xde\xfeXw\x10\x05JH\x85\x8dQG\x94\xa0\xf6R\x15\xe4\xe9\b\x82T:L\fW\x03\xd4pd\xa4\xe6\xb2\xd5\"\aP\x83\x1a#2`K\xa6ٽ\xa0\xa5>J\xf3\xc0\n\x90\x95\x99a\xc7\xed\xfd\xb6\xd7 H\x85g\v\xea\xf7JCn\a\xf3D\x99\xb1\xac\x89 }{\xbf%_P\xd5\ax\xa8\xf2+ML\xa5\x04\x92\xe23\xd0\xfc\xfc \x7f\xd3@\xf2\n\xb5F\xa6\x00\xd9}Cv\xb0\x97\n\"p\x15\xd8\xf6\xb62(e%T#J\xb22\x1b\xf2p\x04+B\xb4\xe2\xc6+ \xa6\xc9\xeb\x9fH\xc1De`Hڑ\x99\x86D\xa1\x86\x16\xf2\x04j\x86^﨡\xbf\xdaz=2\xd9\xf6\x04\x01ؑ\xee<\xc9vg\xfbcdT^\xac\xc9v߂\xc84Y\xad\x88Td\xe5l\x91Ս\x9b\x02\x15\xe3f\xcdD\xab\x8f\b\xc4'\xc6y\xe8w\xd9\xc8\x1d\x01\x1d\xef\xf4\x83\xfc\xa0\xdd\x14\x9d#\xc4H\xb3\x16]\x9e\x8e`\x8e\xa0H)\x83\x15\x10\xc1\xdb\xce'\xa2\xcf\xda@\xe1\xa9\x12\xd6\xde@D\xd4K\x9c{\x10\xda\x12\xd5\xe3<\x1c\xa7\xa88\xa7;\x0eo\x88Qհ;G\x86\x9d\x94\x1ch\xdf\x0e\xe8\xd3\xe13hò\x19*\xac\xfadp\xad\"DP\xfe\a\x1c[L\"\x9aYf\xe8#\x10\x1a\xa8a\xed\x13\xce[D\xecP\x80\xfc\x97 \xef\xec\xe2\x99\xd9%-\xa6\xe4\xdc\xe2ɀ\xa32\x13\x92p)\x0e\xa0\\o\xd60\t\x92\xa3\xc0\xcaVN욥\x80\xdbŗ\xec+\xbb\x9cŔ\x94\x9dţ2\xc0\x846@\xf3\xcd\xea\x9a\f\x82\xaf\x19\xafr\xc8o\x9d5zo\xed\xe8\x8aW\x16\xd7s>\xd2:_#)\x126p\x13\x8d\xccim]\xaatS\xf1NA\x9ay6\xb7\x99\x1d̳R1\x89qzW\xb6м\x88Qq\xfea\xa2\r\xca\x0f\x13m\xa6\xfc0\xd1F\xcb\x0f\x13m\xbe\xfc0\xd1|\xf9a\xa2\x85\xf2\xc3D\xfba\xa2MU\x9b\xd2\xd6s\x18\xb9k\x8a#?\xceb\x91p\xac=\x85\xe2\x04|\x1f\x85\xe1\xe3\xbcS#3\xb7\xf1V\x918\xfe\xe4\xd8p\xddZJ\xeaPM;A\x82x\xbb\x8bN3\xc1\x9b\xdf\x10/\x1f:\xbd(^~;\xd9\xf8J\xf1\xf2\x1eþ\xd5}\xa5h\xf90\xfee\xd1\xf27>T\xa3\x00\x1a\xb6\xe7\xddY|>\xd6e\xaf\xb7\x01\xe0?8\xfev\x10\x1fv\x19\xe3\x9f=\xda~\x84\xf9\x89\x81\U0006bfec\xbe?J/\xa6\xed(5\ad\x8a\f*ܷ\xb6~e;\xb8\xab\x1bH\xf7}\n\xe7RiL\x8d\x98\x9f\xa2\xd7P˴\b\xf6\xbdNf\x03ŧү\x15i7:\xb7\x91&sw:#\xe3\xc1M\x00}\x16\xd9QI!+\xed\xf7\r,\xf4\xb7\xb8}\xe1\x8fB1\f,Q\xc1\xbe&GYE\"\xb6'h7\x13\xbf7\x1e\xb5\xe7Ϩ\xc1\xd0\xd3\xebM\xf7\x17#}\f\x1fyb\xe6\x18\xc1\x13\xef\xea\xd2<\xb7\x06h+ ?L8\x7f\xf3\xbf/HD*\"\x18\x1f[\xb0\xea\xf4\b\x9d\xa5\xe9S\xe96\x89\x16\xaf\xfc\xd3\x1b\x1ciQ~\x17\xc7\xf6uc\xf7F\x8c\xc0\xa5\x87\xd9\xe9W\x18ң\xf7\xa6\xc3\xed\x96\xc4\xec\xf5#\xf2F\x81\xceG\xea\xa5\xecM\xcdD\xe5]\x10\x8b\x97\x18\x87\xfd\xcdG\xef)\xd1v\x17\xc5\xd8͆*'F\xd6uc\xe6\xa6A.\x88\xa7K\"\xce|\xec\xdc\xe2\x889\x1f\xa169\x8e\xe48\xb9H\x04\xdc$\xe0\xd1踩\xb8\xb7\x99}\xefaL\\z\xb4\xdb$h\x8c\x84\x9b\x8fq\xbb^$\xfb5\xbc\xecqU3\x1b\xa76\xeb\x85O\xe37\x1b\x89\xb6$\xfel\x96b\x17ƚձd#\xfd.\x8d0\xebF\x90\x8d\x00M\x89+\x1b\x89\x1b\x1b\x818\x19M\x96\x1a-6\x02{fٝ\x94\x92\xc9\x1f\x97D\x89ųِ\xd9Ր\xffQ\xf2w)\x19\xa4\xea\x18\x97s\x0eͧ^u\xcb\xf9`cM\x1b\xab1;\x95\x99\xe3rc\xb5\xa8\xb8a%\xc7\xe3\xc5\x13ˣ>\xbb9¹N\f\xf1\xbb\xc4\xeb\x9a.\x99\t\xf9\xf4\xb9\x16\xe6M\xcf䦚<\x01\xe7\x84\xc6Dq0\xf2\xcc%d\xca\xe4\x1a\xec\na\xa7\xa7Oy\xe1\xf36\xdd8y\xc7\x1b\xa9\xb1\x13\x18s\x84\xc2B\x19O{2\xaaʧ\xcdIg\xf9ⷿW\xa0\xce\x04ӳ\xd4\xf6\xc5\xcc}(7-\xb5\xf5\x85\x82\xa2\xf0\xdaƥ\x01\xeb\x99\xd9\xcd\xf4$o\x85[\xf0\xa2`{8\"\x1c\xab!x\xcdk\xab\f\xad\xd70R5\xbe\x11+\xeb֑\xdf\xe7,\xd5\xd4\xcbD\xcf\xebh,w5f\x17\xf9gq7.w8&@\xa6^\x0eJ;\x10\x9f\xbd\f\xf4\\\x8eǜ\xeb\x91ls\xa5]\xf6y\x8eK>\v.\xf7,pA\x969!\xc9dJ\xb9\xc4\xf3,\xae\xc83:#\xcf\xe1\x8e\\\xe6\x90̀\xec]\xceI\xb9v\x93\x14\xec\x91|ޙ\x12\xac1\x7f$9}\x9d&\xe1\x1aM\xc2a\xe5\x1c\xa6\t\xd7e\x96]\x93I\xa0\xe13\xb9*\xcf\xe4\xac<\x87\xbb\xf2\xbc\x0eˬ\xcb2+93?/\xbb\xder\xf1\xe6\xbdT9\xa8ɳ\x8eTќ\x14ʞ\x7f\xd1\xed\xb3\xb7\xf3\x1fr\xca\xd9Z\x1dS6\xb6a]\xdfz\xcf\xc8\xcfL\xf8sT+\x84\xadu\xbfs\x00\xd3\x18\"\xf1\xfd\xff\xc6\xca\xf3i_ݩ\x8d\x86\x92*\xb4og\x92Ѭ(\xf9\xd9\xfa\r\x11\x98\xab6\x88\xcb\x04\"*|\xa1\xff;\xc9Y\x16\xb1\xb4\xa2Ʌ\\\xe5AJ\bLy\x94\xb5\x8f\xbeK[1nh\xa1Q\xd6M\xaf\xb8\x97\x9c˧\x85\xee8-\xd9\x7f`\xba\xec\xf9=\x9c\xb7w[\xac\x1a$\x05\xd3l\xd7\x11B5\xd2;\xb0+f3\x9c\xb1\x19\xbf\xddw F\"\xed\xea?QZ\xeb\x15\x9b\x8de]rQ\x7fV\xd3\xdcm\x1dv\x1b\x14\x16*\xceDb\xac\x8792\x95\xafK\xaa\xcc\xd9\xc5\x15\xdc\xd48\x8c\xef\xe3\x84usj\xb7ety\x19\xe6]\x8e\xd26\xa4_\xc6üs\xd9=\n\xedS\xf4\x12<Ư\xf2\xcd^\xe2\xbb\"\x1e\xe3&\xc8\x1a)\x15\xf9\x1c\rJ\xba\xda.\x96\xf6\xa9m\x7f\x95'x\x17\xdd\xcd\xea\x90\xe7\xbeW=\x12N\x14 \xba\xe4\xaeS\xd9A19\xe7e\xba(\x1e\x1f\x14\xba\xf6\xe9;\x13\xc7\xe2kG\x86\x122\x97\x06\xb8:\xbekc\xa7\xd7\xdd\x17t\xadj\x15\xe6\x8d\x1d\xef<\x85\xad\xab~\x82\xbb\xbf^?FJ\x1b\xa9\xe8\x01~\x91.\a\xf6\x1c\r\xba\xb5;\tн\xc9\x13b\x16\xc3l\x88\xb9\x02>\x1bw\x0fX\x13\x8a3\x98\x87\x87_\xdc\x00\f+`\xf3\xce'\u07b6\xdaN\x83\xa5f\x18\x98k\xb4\xb3\xff=F\xd6\v\x82\xf9d[\xfci\xe1\xad\x00Ý1\xecm\x11\xf6\xa7N\"\xe9@\xa29\x11\xfd\x12o\xd5\xda_j1ə\x1eQ\t\x1d\x83\xd3z\xd4\x00w^\xf1\xba\xf2u\xb3.\x8e\x19\x93ci\xdf1\xc3\xf6|\xe2w\x97\x88\xdb?\xf3\xe0\x83\xe6+\x85)\x13}\x92nL1\xf8\r\xb9\xdf;Q'\xfa\xad1\xd6Q\x8ei\xee\bz#m\xeb\x05N\x1aʛ\f\xec1\x95R7\xc1\x88\x95\xc9P\x15g\x80L0n*\xf7zl\xac\xb7>\xc6\xf9\x92\xb1\xd6m\xd3Ǫ\xab,\x03\xad\xf7\x15\xe7\xe7:\xbez\xc9\xc0c\xab˕H\xf1\x812~\x11\x1d\\\xc3\x11\"\xb8\xb1\x8d\xea\xd1$6\xfb\xa0N\x10y\x98\xbc\x83\xa5\xc0\x16\xbcu\xb0\x8c\x0e\x9e\x05>\xd6J\x1bZ\xcceN\xbf\x1d\xb6\xc0\xf7ETފΪ\xd3\x7f?Qݰ9fS6\xe0\\K4A-4\xc8\t\x9c@\x10)0j\x1esq\xba7p\xfam\"P\xdbP|X\xbe{\xb2 ,p\xc1\x93\xf4\xef\xa6<\xe0\xf2\xadN\xa0^\xea\t\x98uB\xf7\b\x11\x86\x92\xe9\\\xbb7\xd66\x82u\x14h\xd2\xd2\x1fյ\x99f]=\x9f\xac\xb4n\xef\xb7c-G%8T\x88\xf1o\xf0p\xc27*\xa9\xe1\xc8RU\xd4pds\n\xaa\xa3\x8e\"\x83k\x14\xd4Շ\x89su6\xe32Vr\x16\x00^/\n\xe9\xf4\xdd\xfd\xa2\x02\xb4\xa6\x87\x90j\xf9\xc9\x1a`\a\x10\x80\x9b\x11\x91\xd1\xf8\xed\xdd\xe6>J7Ѱ;\x87\xa2\x99\xa9\xa8\xef D\xfb\xb5j\xbd\x8c)`.\x0f\xee!\x04\x16^\"\n\x96\xe9B\x9a|-\x99J\xb1d\xdf\xd7\x15-m\xf0\b\x19\x19Ѽ\x1c\x05\x9c\x1d\x985\x03-\x93\x0eT\xed\xe8\x01֙\xe4\x1cP\xd7\x0e\xf1z\xce\xc9\xeao\xfd|\x06\xaag\x87\xf6\xa1]ןW8n\xbb<}\xd4\x05\xbe\xe2CB\x86)h^\xe6\x1a $\xb1\xe3E\x96\xab\xa3B\xf4\r\xab!\xa6\xed\xbaa\x82y\xbd\xeaw\xb5\xfc\x93V7\xde\x17\x8a;\xeb\x05\xfd]\xaa\x1bR0a\xff\xa1\"w\a\n\xa1\xf1\"\xfc1\x83\xf6\f\xdew\xb6N}\xf9\xb2eGB\x98\x10c\x9eZ\xfc\xc2ݚ|\x84\xa1c\xe1\xee\xd0A\x8eGh\xb1\x87\xbbl\x95\xad\xb8S\xf2\xa0@\x0fg՚\xfc\x8d2\xc3\xc4\xe1\x83Tw\xbc:0\xd1\xd8\x1b\x8b*\xdfQe\x18\xe5\xfc\xec\xf0\x89!\xca\x04\xe5\xec\x1f1\xee\xb4\x7f\x9c\aT\xab\xdb\xc8o\th\x8c\xfd\xf0\x0e\xecR;\xean\xc4\x05\xc1\xd3uN\x16|\xb5f˟\t'\xbbx_o'+\xd3Q~\x8d\xf2\x8c\br\xe8sC>J\x03\xe1$\x99ua\xda\xe5\x02\xb4Y\xc3~/\x95q'\f\xeb5a{\xef\xbeĶ\x88)\xe3\x18\t\xe3^\xfe\"\xcc4\xa1\x83\xcd|Ý\t\x85j\x033\xe4\x16\xf4\xec\xf6\x0fi\x96Y\xef\x18^iCyD#\x7fS\xa46\xfa\x89v\xbe@\xfe[ʶ\xea\xb6]\x7f\xf8<\x15\x82s\x94û\xb5n5\x8a\xae\xcd\x04\xf7\x95@\x90'Ō\xb1+@;T\x88\x18\xab\xf39'\xdaj\xc1\x8bީ\"\xceZ؎\x1f]v7$\xea\xcacƆ\x1f\x1c\xbe\xaf\xb4C\x12\x8c\f\xcb]$b:\xb4\xb5\xac̎T\x1c\xacP)Y\x1d\x8eA.G\xd6\xf2\x11\xb8y\x05\xf8\xa6\x17j\b\x1d\xc24L\xa5D\xeb(\xa7\xbe/֠K\xb3\xc7QL\xfdQtx}\xf2\x95O\x9c\xbe\xde+Y\xac=/0\xba\xe2\xc6\x1f\xaf(&\xad\xc3n\x8eQ\x92\x13\xf7\xa6\x92\xcfP\x8cbP\x96 \b\xd5\x1e\x9f\x84\xc4\x12\x17??\xa6\rU&\xd5\x0f\xba\xefT\x9eq\x81\x10r\x1c\xdf{\x7f|\xe4\x12l\xdc\xfa'\xc5j\xc07D3\x11\x1e\xbet\x87SN\x14\xb4\xf5\x8c\x14\xe0\xe6Z4pf\xe0\xd3t<\x98.\xfa\x7f\xac\xf3r\xaa\xd7\xc4\xf7)V\xf0\x97^\xf5\xde%*|\\\xab\xae\xe2-\xd7\b=\xfe\xc4\xf6.\x96'\xb3X\xff\xf9\x9f~9\xea\x94de\xbd\x9c4\xb0\xd0v\xaa-\xa5\x99\xa7\xb4\xee8X\xcbG\x03tm\xb7\x97\x8b\x8c\xf4\xd3en\xe75}\xce\xf0&\xebu<\xb1\xd3e\xde泹\x9a\xd7\x1d\xdd\x13\xc5\xe7\a\xe7\xe6\xd8\xdf|\xb5\x88\xaf\xe9!D\xbc\xcd\xc80j\xffs\xd6\xdbl9\x9b\x01Ǒׂz\x0e\xe8\x95\xdc\xcd\xe8:0\xf8\x88\n4o\xcdmߓ\xff\xf2\xbf\x01\x00\x00\xff\xffԋ\n\xaf2z\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec}O\x93\x1b+\x92\xf8ݟ\x82\xd0\xef\xe0\x99\x89\x96\xfc\xfc\xdbˆo\x9e\xb6\xbd\xa3x\xef\xd9\x1d\xee~\x9e\xcb^PUJ\xe25\x055@\xa9\xad\xd9\xd8\xef\xbeA\x02\xf5\x97\xaa\xa2d\xf5[φ\xb9\xd8]\x82$\xc9L\x92\xcc$\x81\xf5z\xfd\x82\x96\xec\v(ͤxCh\xc9\xe0\xab\x01a\xffқ\xc7\x7f\xd7\x1b&_\x9d^\xbfxd\"\x7fCn+md\xf1\x19\xb4\xacT\x06\xef`\xcf\x043L\x8a\x17\x05\x18\x9aSC\u07fc \x84\n!\r\xb5\x9f\xb5\xfd\x93\x90L\n\xa3$\xe7\xa0\xd6\a\x10\x9b\xc7j\a\xbb\x8a\xf1\x1c\x14\x02\x0f]\x9f~ڼ\xfe\xff\x9b\x9f^\x10\"h\x01oȎf\x8fU\xa97'\xe0\xa0\xe4\x86\xc9\x17\xba\x84̂<(Y\x95oH\xf3\x83k\xe2\xbbs\xa8\xfe\x15[\xe3\aδ\xf9\xb9\xf5\xf1\x17\xa6\r\xfeP\xf2JQ^\xf7\x84\xdf4\x13\x87\x8aS\x15\xbe\xbe Dg\xb2\x847\xe4\xa3\xed\xa2\xa4\x19\xe4/\b\xf1Xc\x97k\x8f\xf0鵃\x90\x1d\xa1\xa0\x0e\x17Bd\t\xe2\xed\xdd\xf6˿\xddw>\x13\x92\x83\xce\x14+\r\x8e\xdd!F\x98&\x94|\xc1a\x11\xe5\xa9L̑\x1a\xa2\xa0T\xa0A\x18M\xcc\x11HFKS) rO~\xaev\xa0\x04\x18\xd05hB2^i\x03\x8ahC\r\x10j\b%\xa5d\xc2\x10&\x88a\x05\x90?\xbd\xbd\xdb\x12\xb9\xfb\x1d2\xa3\t\x159\xa1ZˌQ\x0399I^\x15\xe0\xda\xfeySC-\x95,A\x19\x16\xe8\xecJKxZ_{\xc3{i)\xe0j\x91\xdcJ\r\xb8ax*B\xee\x89f\xc7c\x8eL7\xc3E9\xea\x00&\xb6\x12\x15\x1e\xf9\r\xb9\ae\xc1\x10}\x94\x15ϭ\xb0\x9d@Y\x82e\xf2 \xd8?kؚ\x18\x89\x9drj\xc0\v@S\x980\xa0\x04\xe5\xe4Dy\x057H\x92\x82\x9e\x89\x02\xdb\v\xa9D\v\x1eV\xd1\x1b\xf2\xabT@\x98\xd8\xcb7\xe4hL\xa9\u07fczu`&L\x9aL\x16E%\x989\xbfB\xf9g\xbb\xcaH\xa5_\xe5p\x02\xfeJ\xb3Ú\xaa\xec\xc8\fd\x96\x91\xafh\xc9ֈ\xba\xc0\x89\xb3)\xf2\xff\x17\x04@\xbf\xec\xe0j\xceV\x18\xb5QL\x1cZ?\xa0\xd4Op\xc0N\x00'_\xae\xa9\x1bECh\xfb\xc9R\xe7\xf3\xfb\xfb\x87\xb6\xec1ݧ>ҽ%\x90\r\v,\xc1\x98\u0603rL\xdc+Y L\x10\xb9\x93>\x14]\xce@\xf4ɯ\xab]\xc1\x8c\xe5\xfb?*\xd0V\xc8\xe5\x86ܢ&!; U\x99[\xc9ܐ\xad \xb7\xb4\x00~K5<;\x03,\xa5\xf5\xda\x126\x8d\x05m%د\xec\xa8\xd6\xfa!\xe8\xb2\x11~9\x85p_B֙0\xb6\x15۳\f\xa7\x05\xd9K\xd5\xe8\v\xa7\xae6\x1d\x90\xf1)kK\xa6ٽ\xa0\xa5>J\xf3\xc0\n\x90\x95\xe9\xd7\xe8!t{\xbf\xed5\b\xc8x\xd4P\xadT\x1ar;Ϟ(3\x16\xbd\x01Lb\x01\x91/\xa8a\x02<\xd44\x95&\xa6R\x02g\xe9g\xa0\xf9\xf9A\xfe\xa6\x81\xe4\x15\nk\xa6\x00\x87|Cv\xb0\x97\n\"p\x15\xd8\xf6\xb62(e\t\xa3\x11%Y\x99\ry8\x82%#\xad\xb8\xf1r\xcf4y\xfd\x13)\x98\xa8\fl\x06\xd0F\x18\x8cD\xa1\x86\x16\xf2\x04j\x86^﨡\xbf\xdaz=2\xd9\xf6\x04\x01ؑ\xee<\xc9vg\xfbcdT\x9e\xabd\xbboAd\x9a\xacVD*\xb2rK\xe0\xea\x06A\xdbEլ\x99h\xf5\x11\x81\xf8\xc48\x0f\xfd.\x1b\xb9#\xa0\xe3\x9d~\x90\x1f\xb4\x13\xd29B\x8c4k\xd1\xe5\xe9\b\xe6\b\x8a\x942,>\x11\xbc\xf7\x8c\x03\xd1gm\xa0\xf0T\t*?\x10\x11\xa7\x03\xe7\x1e\x84\xb6D\xf58\x0f\xc7)*\xce\xe9\x8e\xc3\x1bbT5\xecΑa'%\a\xda_~\xfat\xf8\fڰl\x86\n\xab>\x19\\\xab\b\x11\x94\xff\x01\xc7\x16\x93\x88f\x96\x19\xfa\b\x84\x06j\xd8e\x91\xf3\x16\x11;\x14 \xff)\xc8;\xab\xb33\xabI\x87\xd8\x12\xaf\xb3\x19p\\'\x84$\\\x8a\x03(כ]\x0f\x83\xe4(\xb0\xb2\x95\x13\xab*\x15p\xab\xf3ɾ\xb2ZtHgB\xec,\x1e\x95\x01&\xb4\x01\x9aoV\xd7d\x10|\xcdx\x95C~댠{k\xbe\xe5\xc1h\x1dh\xc2\x1e\xa3\xdeO6\xf6+(g\x19\xda^\xde\xccZ\xa3\x85\x18cV\xb3\x90\x9eKpF\xaae\x9cǰY![\xd3\\\x83\xb1UV\x7fY\xddX~F\x80v{\xed\xf6\xa1\tUPS \xae\xf9\" \xa1(\xcdy\xc8=f\xa0\x88\x10lRM$\xb2\x8e*E\xcf#\x8c\xab-\xed\xcbX7ּ\xc7<\x11\xaa\xfd\xc1\xec\xeb\xf7\xbb\x90\x81\x11\x88L\x7f\xaf\f\\\xcc2\x8d\xde\"e²\xca:n\x1dNit\x86\"ñ4\xb3\xb6\"\x13\x0e\x1e\xfa9\rc\xbe\x17\xba,\x95\xe41ѭ%Ƌ\xa4\xf5\x10i\xd4*\xfa\x8e\x89r\x94\xf2q\x8e\x10\x7f\xb3u\x1a_\x83d\x18\x80 ;8\xd2\x13\x93\xca\x0f\xbd\xb1\x03\xe0+d\x95\x89\xceejH\xce\xf6{P\x16Ny\xa4\x1a\xb4s7\xc7\t2n>\x93\x96r\x88\xfe\xd8\x1bG\xc3H+\xa98\xf21ԭ!\xd0_\xd1B\xb1\x88Z\v\x17WΜ\x9dX^Q\x8e\x8b(\x15\x99\x1b\x0f\xad\xf1\x8a-\xc6\x13L\x1e\xe0\xec\x96耹\xe5D\xc7\x1d\x91\x02\xac\tZX\x1flX5\xb6ȸ26\xec\x1d\xb5v\x86t\"\xaa*\x0e\xdaw\xe5\f\xbbF\a܌\x82\xae9\xe2\xfcwNw\xc0\x89\x06\x0e\x99\x91*N\x8e9&\xbb\x92\xa2\xd7F\xa8\x18\xd1p]\x87\xa0\x19\xd8\x04H\x82NՑeGg\xa6Y\tB8$\x97\xa0q\x96Ӳ\xe4\x91\x15\xa0)\x93\x9c\xf7\x9dLM\xf4\xa6\xccL\xf9>\xbc\xd8\xe4oJ\x82nlʌ\x96\xecR\xb6\x16\ab\xe4\xe4\xb0\xffo\x126\xa8\xfd\v\x84v;hz]\xa1\xb5$e\xa0\xd1`B\xcb\xe5\x860\x13\xbe\xceA\xb4NN\xd3\xff\xbf0c\x96K\xfc\xb6\xdf\xf2\xaa\x12?ɕ9\x88\x96+u\xf7\xff\x82L\xc1\xc5\xe2ޯ\x15\xc9\f\xf9\xa5\xddꆰ}͐\xfc\x86\xec\x197\xa0z\x9c\xf9\xa6\xf9r\rb\xa4\xacw\xb6\x14\xd4d\xc7\xf7_\xad奛\xbd\x9cD\xba\xf4\x1b;\xfb5\xd8\xf3݅y\x06.\xc1\xa00SP\xb8`\xf3\x03R\xb3\xf9\x82\x16\xd5ۏ\xefbѬnI\x90\xbc\xc1@\xde\xf6\x90mw\xed\x8d\xf2\xd4axӧ\xf6o\xdc6\xc2\r\xa1\xe4\x11\xce\xceb\xa1\x82X\xe6P\xdbш\xa73$\x0e\xeeg\xa0\x90=\xc2\x19\xc1\xf8\r\x8a\xd9֩\xa2\xe0\xca#\x9cS\xaa\xf5\bhqb\xdao\xbcXJ\xda\x0fH\b\x8cg\xa7\x13\x8f\xe0fS\xd0E\xf3\x83#\xe9\x8a$\x94@\xfb\v\x86Y\xb3\xad\xb5Q\x87\x8c}\xa9\x1d\x8b\xec,8\xb22q\xa0v\x99\xc3P\x82\xdc\xd7\xdbM_(gyݑ\x93\xfb\xad\x18\xb7\x86\xbb\xe5\xa34[qC\xde\x7fe\xda\xef\xf8\xbd\x93\xa0?J\x83_\x9e\x85\x9c\x0e\xf1\v\x88\xe9\x1a\xe2\xf4\x12Nm[:\xb4\xf7\xad\x12\x84ە\xad\xf3\xf0j\xf60M\xb6\xc2\xfa-\x9e\x1e\xb8\v麛^\x1f\xba\xa5\xa84nL\t)\xd6.\xf4\x12\xeb\xc9\x11;\x11\xa4T\x1d\x8e\fQ\xab;\x1d\x89\xf5\xc4˃]I\\{\xb7\xaf\xcai\x06y\xd8W\xc1\xdd@j\xe0\xc02R\x80:L-\x1c\xedRZ\xfd\x9e\x86B\xa2\xd6ue\xa1\x84\xa5-\xed\xa1x\xd5\x1d\r~w\xcb\xda\xce܄Z\x81ٳUG6\x01ǫΏ\b\x97X\xb4?f\xa9K\xf3\x1c\xd34(\xbf[\xa0\xf1\x17\xf0b\xb8\xf6;\xc4\xdc\nYPܜ\xf8/\xbb̡@\xff7))S\ts\xf8-\xa6cp\xe8\xb4\xf5Q\xacv7\xb6\a\xa6\x89\xe5\xef\x89\xf2\xe1\xf6rdp\xd2\xea\x16\xe0n!\x97\xfb\x81\xc5rC\x9e\x8eR\xbb5\x157EfA2MV\x8fp\xf6\x9bqm=\xb0ڊ\x95[\xe0\x17\xab\x9b\xdaZ\x90\x82\x9f\xc9\nۮ\xbe\xc5\bJ\x94\xc4\xc4j_\u05cfu\xfaɺ\xa0\xe5\xdaK\xaf\x91\x05\xcbF\xdba\xbaL\xaa\x89m}\xd0`A؆u\x8e\x885\x8f\xa7F\x9b$\xbf\xa5ԑ\x9d\xef\x11T\xee\xa46.\"\xd91g\x97D\xbf\x88\x93=\x1f\xf5\"t\xef\xb2t\xa4\n\xf9\x17V]\xf6\x02\xb5\x96\xdbzZ3\xbb=\x03\x1fIs@\xadC\xb6jf\xbe\xd3\xc3+\xb7g\x81\x9d\xd0\f\x8d\x92Y\xb8\xa5\x92\x19\xe8\xe8nqS\x12\xb4\xfcLp\xb1\x0e,R\xe7\xf8\xb8\xe4\x86\xe9`f(醬%\xd2B\x17\xe0\xfd\xd7V\xd4\xd3*\r\xfb\xf7\x9c\xf0-ŋ\xe0\\/\n\xda\xcf\xe2IB\xf1ֵ\f\xd3\xc4\x03r.\x85:T\xa8\"\xd2-O/H\xdf\xc3\xf2^0\xb1\xc5\x0e\xc8뫛\x03\xb5r\x8d\xe5r\xc4J\x8f\xe4\xbemC\xf4\xfa\x83\x18I戕Rb\xc4_A\x87s\xc3\xf8\xb850\x13A\ni\xdaa\b\v\xb7\x94\xf9KM\xf6Li\xd3F4U(\xe2\xb9\"\xb1\xb2\xd4\xe3\x12\uf57a\xc8\xe1\xfa\xe4Z\xb6\x02`G\xf9\x14r\xa1F\x93'b\x057\x93\x80\xb0=a\x86\x80\xc8d%0lc\xa7:v\xe1X\xe0\x14t2\xc9\xd2\x14\x84- \xaa\"\x8d\x00k\x94:&&\xe3;\xed\xea\x1f(\x8b\xed@\x0f\xcbB\xb6\x99\xb1\x94\xb1X\xe9\xb0-䎵\x93\xda\n\xfa\x95\x15UAhaI\x9f\xea.\xed]\xc6Y\x87\xe3u\xde\x19\xc2\xc5e\xc4H;\xa9J\x0e&uF\xba\f3;M4ˡ^\x98\xbd\x14HA(\xd9S\xc6G\xd2]\x86e\x11m\x97\xf8(^Y\\\xcf\xf9H\xeb|\x8d\xa4H\b\xe0&\x1a\x99\xd3ںT\xe9\xa6❂4\xf3l.\x98\x1d̳R1\x89yzW\xb6м\x88Qq\xfea\xa2\r\xca\x0f\x13m\xa6\xfc0\xd1F\xcb\x0f\x13m\xbe\xfc0\xd1|\xf9a\xa2\x85\xf2\xc3D\xfba\xa2MU\x9b\xd2\xd6s\x18\xb9\xd3q#?\xceb\x91\xb0\xad=\x85\xe2\x04|\x9f\x85\xe1\xf3\xbcS33\xb7\xf1V\x91<\xfe\xe4\xdcp\xddZJ\xeaTM;A\x82x\xbb\xc3>3ɛߐ/\x1f:\xbd(_~;\xd9\xf8J\xf9\xf2\x1eþ\xd5}\xa5l\xf90\xfee\xd9\xf27>U\xa3\x00\x1a\xc2\xf3n/>\x1f\xeb\xb2\xd7\xdb\x00\xf0\x1f\x9c\x7f;\xc8\x0f\xbb\x8c\xf1Ϟm?\xc2\xfc\xc4\xc4\xf8\xd5_V\xdf\x1f\xa5\x17\xd3v\x94\x9a\x032E\x06\x15\x8e\xf9Z\xbf\xb2\x9d\xdc\xd5M\xa4\xfb>\x85s\xa94\xa6f\xccO\xd1k\xa8eZ\x04\xfb^'\xb3\x81\xe2S\xe9\u05ca\xb4\x13\x9d\xdbH\x93\xb93\x9d\x91\xf1`\x10@\x9fEvTR\xc8J\xfb\xb8\x81\x85\xfe\x16\xc3\x17~+\x14\xd3\xc0\x12\x15\xeckr\x94U$c{\x82v3\xf9{\xe3Y{~\x8f\x1a\f=\xbd\xdet\x7f1\xd2\xe7\xf0\x91'f\x8e\x11<\x9f\x8e pw]\x1c\xda\t\xf9a\xc2\xf9\x03\xe7}A\"R\x11\xc1\xf8\u0602U\x9f\xca\xef,M\x9fJ\x17$Z\xbc\xf2O\a8Ҳ\xfc.\xce\xed\xeb\xe6\xee\x8d\x18\x81K7\xb3ӏ0\xa4g\xefM\xa7\xdb-\xc9\xd9\xebg\xe4\x8d\x02\x9d\xcf\xd4K\x89M\xcdd\xe5]\x90\x8b\x97\x98\x87\xfd\xcd[\xef)\xd9v\x17\xe5\xd8ͦ*'f\xd6us\xe6\xa6A.ȧK\"\xce|\xee\xdc\xe2\x8c9\x9f\xa169\x8e\xe4<\xb9H\x06\xdc$\xe0\xd1츩\xbc\xb7\x99\xb8\xf70'.=\xdbm\x124f\xc2\xcd\xe7\xb8]/\x93\xfd\x1a^\xf6\xb8\xaa\x99\xcdS\x9b\xf5§\xf1\x9b\xcdD[\x92\x7f6K\xb1\vs\xcd\xea\\\xb2\x91~\x97f\x98u3\xc8F\x80\xa6䕍䍍@\x9c\xcc&K\xcd\x16\x1b\x81=\xb3\xecNJ\xc9\xe4\x8fK\xb2\xc4◨\x90\xd9Ր\xffQ\xf2w)\x19\xa4\xea\x18\x97s\x0eͧ^u\xcb\xf9`cM\x1b\xab1;\x95\x99\xe3rc\xb5\xa8\xb8a%\xc7\xed\xc5\x13ˣ>\xbb9¹\xbe\x18\xe2w\x89\xc75\xdde&\xe4\xd3\xe7Z\x987=\x93\x9bj\xf2\x04\x9c\x13\x1a\x13\xc5\xc1\xc83w\x0fP&\xd7`W\b;=\xfd\x95\x17\xfe\xba\xa0\x1b'\xefx\"5\xb6\x03c\x8ePX(\xe3מ\x8c\xaa\xf2is\xd2Y\xbe\xf8\xed\x1f\x15\xa83\xc1\xebYj\xfbb\xe6<\x94\x9b\x96\xda\xfaBAQxm\xe3n\x9f\xea\x99\xd9\xcd\xf4$o\x85[\xf0\xa2`{8\"\x1c\xab!x\xcdk\xab\f\xad\xd70R5\x1e\x88\x95u\xeb\xc8\xefs\x96j\xeaa\xa2\xe7u4\x96\xbb\x1a\xb3\x8b\xfc\xb3\xb8\x1b\x97;\x1c\x13 S\x0f\a\xa5m\x88\xcf\x1e\x06z.\xc7c\xce\xf5H\xb6\xb9\xd2\x0e\xfb<\xc7!\x9f\x05\x87{\x16\xb8 ˜\x90d2\xa5\x1c\xe2y\x16W\xe4\x19\x9d\x91\xe7pG.sHf@\xf6\x0e\xe7\xa4\x1c\xbbIJ\xf6H\xde\xefLI֘ߒ\x9c>N\x93p\x8c&a\xb3r\x0eӄ\xe32ˎ\xc9$\xd0\xf0\x99\\\x95grV\x9e\xc3]y^\x87e\xd6e\x99\x95\x9c\x99\x9f\x97\x1do\xb98x/U\x0ejr\xaf#U4'\x85\xb2\xe7_t\xfb\xecE\xfeÝr\xb6Vǔ\x8d\x05\xac\xebS\xef\x19\xf9\x99\t\xbf\x8fj\x85\xb0\xb5\xeew6`\x1aC$\x1e\xffo\xac<\x7fۨ۵\xd1PR\x85;\xac\xbb\xb3K\xad\xd0\x1b\xf2\x9ef\xc7\x1e\xf4cԯ\xd8KUPCV\xf5\x96\xd7+\a\xdc\xfe\xbd\xda\x10\xf2A֛\xf6\xed\x9bd4+J~\xb6~C\x04\xe6\xaa\r\xe22\x81\x88\n_\xe8\xffNr\x96E,\xad\xe8\xe5B\xae\xf2\xe0J\b\xbc\xf2(ko}\x97\xb6b\xdc\xd0B\xa3\xac{\xbd\xe2^r.\x9f\x16\xba\xe3\xb4d\xff\x81\xb74\xcf\xc7p\xde\xdem\xb1j\x90\x14\xbcݹ\xce\x10\xaa\x91ށ]1\x9b\xe1\x8c\xcd\xf8\xed\xbe\x031\x92iW\xff\x89\xd2Z\xaf\xd8l\xec\xd6%\x97\xf5g5\xcd\xdd\xd6a\xb7Aa\xa1\xe2L$\xe6z\x98#S\xf9\xba\xa4ʜ]^\xc1M\x8d\xc3x\x1c'\xac\x9bSі\xd1\xe5ex\xddo\x94\xb6\xe1\xd6_\xdc\xcc;\x97ݭ\xd0>E/\xc1c\xfc(\xdf\xec!\xbe+\xe21n\x82\xac\x91R\x91\xcfѤ\xa4\xabE\xb1\xb4\xbf\xda\xf6Wy\x82w\xd1hV\x87<\xf7\xbd\xea\x91t\xa2\x00\xd1]\xee:u;(^\xcey\x99.\x8a\xe7\a\x85\xae\xfd\xf5\x9d\x89c\xf1\xb5#C\t7\x97\x06\xb8:\x1e\xb5\xb1\xd3\xeb\xee\v\xbaV\xb5\n\xf3Ǝw\x9eB\xe8\xaa\x7f\xc1\xdd_\xaf\x9f#\xa5\x8dT\xf4\x00\xbfHw\xf5\xf2\x1c\r\xba\xb5;\xf7n{\x93'\xe4,\x86\xd9\x10s\x05\xfc%\xd0=`M*\xf2\xe0\x1a\\\x8b\xe5\xc2[}\x8d\xe13\x83yx\xf8\xc5\r\xc0\xb0\x026\xef*\xb7\x97o\xb5\x9d\x06K\xcd00\xd7hg\xff{\x8c\xac\x17\x04\xef\x93m\U00067177\x02Lwƴ\xb7E\xd8W%\x974\au+Ş\x1df\x06\xf2[\xa7ro\x9d\xcc\xf0\xa3\x1f\\\xbd\xfa\x04\xf8K\xef\x06\xb4v\n\xe7\xc0?0\x0e\xda\xf5\x9b\xa0%\uf1adj\xa5Y\x15;g\x89\xed\xed\x8fu\a#ˋ\xc3\x1b\x83\xba%(k\xf9\xb8\xf0o\xa5\x83܍\x8f\xac!9\x13\x06\x0e\x91\x80\xf2\x84\x9aP\xc6/\xa2\x83k8B\x047\xb6х-\x89\xcd>\xcb\x16D\x1e&\xef`m\xb6\x05\x8f\x81,\xa3\x83g\x81O~ӆ\x16sW\xd9\xdf\x0e[\xe0;#*o\xa5\xcb\xd5\xf7\xb1?Qݰ9\xa6f\x1bp\xae%\xfa\x04\x16\x1a\xe4\x04N \x88]\x88\x1c}\xc3[8\xfd6\x11\xa8m(\xfe\x9c\x84\xd3\xf5A\xf3\a\xd7\u07bf\x9f\xf2\x80\xf6\x94:\x81z\xa9'`\xd67\xecG\x880\x94L\xe7k\xbf\xb1\xc6*\xac\xa3@\x93l\xb1\xa8\xae\xcd4\xeb\xea\xf9d\xa5u{\xbf\x1dk9*\xc1\xa1B\x8c\x7f\x83\x97,\xbeQI\rG\x96\xaa\xa2\x86#\x9bSP\x1du\x14\x19\\\xa3\xa0\xae>L\x9c\xab\xb3W`c%g\x01\xe0y\xaf\xf0\xbe\x81;\xf0U\x80\xd6\xf4\x10\xee\xbe~\xb2\x16\xf1\x01\x04`t(2\x1a\x1foo\x0e\buo~v\x1b\x8343\x15\xf5\x1d\x84\xf4\xcbV\xad\x971\x05\xcc\xe5\xc1\xbdL\xc1\u008bD\xc1UXH\x93\xaf%S)\xae\xc5\xfb\xba\xa2\xa5\r\x1auȈ\xe6\x05)\xe0\xec\xc0\xac]n\x99t\xa0jG\x0f\xb0\xce$瀺v\x88\xd7sNV\x7f\f\xeb3P=;\xb4\x0f\xed\xba~\x03\xc9q\xdb]\x9cH]&2>(d\x98\x82慮\x01B\x12;^\xe4J8*D߲\x1abڮ\x1b&\x98\u05eb>\xcc蟶\xba\xf1\xcei\xbfڠK\xb3\xc7QL}jLx\x84\xf5\x95\x7f\xc8a\xbdW\xb2X{^`\xb6\u05cd\xdf\xeeULZk\xc2\x1c\xa3$'\xee\x8d7\x7fc:\x8aAY\x82 T{|\x12.\xba\xb9x\xf5І*\x93\x1a\x06\xb8\xefT\x9e\x89\x00 \xe48\xbe\xf7~;\xdb]\xf8s\xeb\x9f8\xac\x01\xdf\x10\xcdDx\xff\xd5m\x96;Q\xd0D\n|&N\xaax\"\xdf\xc0\xa5\xef8\xf0]\xf4\xffX\xdf\xfdT\xaf\x89\xefS\x9c\xc0/\xbd\xea\xbdC\x9d\xf8\xd8_]\xc5;n\x11z\xfc\x89\xed]naf\xb1\xfe\xf3\xff\xfaa\xcdS\x92\x93\xf1rҿ@סv\x14f\x9e\xf6\xbb\xe3`-\x1f\r\xd0u]^.\xf2QO\x97E]\xae\x19r\tO\x13_'\x10q\xba,\xd8\xf2l\x91\x96\xeb\x8e\xee\x89\xe2s\xa8ss\xec\xef\xbeZ$\xd4\xe2!D\x82-\x91a\xd4\xe1\x97\xd9`K+\xd6\x12p\x1cy\xbd\xac\x17\x7f\xb9R\xb4%\xba\x0e\f>\xa2\x02\xcd[s\xdb\xf7\xe4\xbf\xfcO\x00\x00\x00\xff\xff8\xa76\x179}\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4YKs\xe3\xb8\x11\xbe\xebWt\xed\x1e|YQ3\x9bKJ\x97\x94FN\xaa\xa6\xe2\x89]#ǹ\xe4\xb0\x10Д\xb0\x06\x01\x06\x0fi\x94T\xfe{\xaa\x01\xf0!\x92\xb2\xe4\xa9$\x8b\x8bM\xb2\xd1\xe8\xfe\xfa\r\xcd\xe7\xf3\x19\xab\xe5\vZ'\x8d^\x02\xab%~\xf3\xa8\xe9\xc9\x15\xaf\xbfw\x854\x8b\xc3\xc7٫\xd4b\t\xeb༩\xbe\xa23\xc1r\xbc\xc7Rj\xe9\xa5ѳ\n=\x13̳\xe5\f\x80im<\xa3\u05ce\x1e\x01\xb8\xd1\xde\x1a\xa5\xd0\xcew\xa8\x8bװ\xc5m\x90J\xa0\x8d̛\xa3\x0f\x1f\x8a\x8f?\x17\x1ff\x00\x9aU\xb8\x84-㯡v\xdeX\xb6CexbY\x1cP\xa15\x8543W#\xa7\x13vքz\t݇\xc4!\x9f\x9e$\xff\x14\x99m\x12\xb3\x87\xcc,~W\xd2\xf9?_\xa6y\x90\xceG\xbaZ\x05\xcb\xd4%\xb1\"\x89\xdb\x1b\xeb\xff\xd2\x1d=\x87\xadS\xe9\x8bԻ\xa0\x98\xbd\xb0}\x06ษq\tqw\xcd8\x8a\x19@\x86&r\x9b\x03\x13\"\x82\xcdԓ\x95ڣ]\x1b\x15*ݞ%\xd0q+k\x1f\xc1L\xba@V\x06\x1am\xc0y\xe6\x83\x03\x17\xf8\x1e\x98\x83ՁIŶ\n\x17\x7fլ\xf9?\xf2\x03\xf8\xd5\x19\xfd\xc4\xfc~\tE\xdaU\xd4{暯\xc9FO\xbd7\xfeD\n8o\xa5\xdeM\x89\xf4\xc0\x9c\x7faJ\x8a(ɳ\xac\x10\xa4\x03\xbfGP\xccy\xf0\xf4\x82\x9e\x12B@\x10!4\b\xc1\x91\xb9|\x0e\xc0!q\x89\x18MK\xaaFg\x9d\x89M\xa2\xc0ˀK\x92\x9f\xded\xe9{l\x1b\xff.\xb8Ŗ\xa5\xf3\xac\xaa\xcf\xf8\xaevx\x89\xd9\x19\x14\xf7X\xb2\xa0|_U\xb2\x92\xea\xfb\xe5\xb9Z5\xf2B\xa4]g'ޟ\xbdK\xa7n\x8dQ\xc8\x12\x97Du\xf8\x98\xbc\x90\xef\xb1b\xcbLljԫ\xa7\xcf/\xbfۜ\xbd\x86)G\x1a\x04\x05\x19\x8e\xf5l\xb3G\x8b\xf0\x12\xe3/\xd9\xcde\xd5Z\x9e\x00f\xfb+r\xdf\x19\xb1\xb6\xa6F\xebe\x13,i\xf5rQ\xef\xed@\xa6;\x12;Q\x81\xa0$\x84ɏr\xbc\xa0Ț\x82)\xc1\xef\xa5\x03\x8b\xb5E\x87\xda\xf7\xe1m\x05+\x81\xe9,^\x01\x1b\xb4Ćb9(A\xb9\xeb\x80փEnvZ\xfe\xb3\xe5\xed\xc0\x9b\xec\xbc\x1e\x9d\x1f\xf0\x8c\xf1\xa9\x99\"W\r\xf8\x130-\xa0b'\xb0H\xa7@\xd0=~\x91\xc4\x15\xf0\x85\xfc]\xea\xd2,a\xef}햋\xc5N\xfa&\asSUAK\x7fZ\xc4t*\xb7\xc1\x1b\xeb\x16\x02\x0f\xa8\x16N\xee\xe6\xcc\xf2\xbd\xf4\xc8}\xb0\xb8`\xb5\x9cG\xd1uJ\x9a\x95\xf8\xd1\xe6\xac\xed\xee\xced\x1dEmZ1k\xbea\x01ʘ\xc9\v\xd2֤E\a4\xbd\"t\xbe\xfeq\xf3\f\xcd\xd1\xd1\x18C\xf4#\xee\xddFי\x80\x00\x93\xbaD\x9b\x8cXZSE\x9e\xa8Em\xa4\xf6\xf1\x81+\x89z\b\xbf\v\xdbJz\xb2\xfb?\x02:O\xb6*`\x1d\v\x13l\x11B\x1d㾀\xcf\x1a֬B\xb5f\x0e\xff\xe7\x06 \xa4ݜ\x80\xbd\xcd\x04\xfd\x9a:$N\xa8\xf5>4\xb5\xf0\x82\xbd&\xa3xS#?\x8b\x1f\x81NZ\xf2p\xcf<Ƹ\x18\xe0\x9aC\xfcr1m\xd6tp\xd3b\x9c\xa3s_\x8c\xc0ᗁȫ\x96\xf0L\xc6\x1am%],\x8bP\x1a;\xac\x18\xac\xcd\xc0\xfd\xd5d\xaab\xf4\ru\xa8Ƃ\xcc\xe1+2\xf1\xa8\xd5\xe9§\xbfY\xe9\xc7\a]0$\xad$\xe2\xe6\xa4\xf9\x13Zi\xc4\x15\xe5?\r\xc8[\b\xf6\xe6\betk\xedՉr\x90;i>ζ\xcdZ=}n2o\n\xa0\x1co\x19\xab\x02V9rM\t\x1f@HG\r\x80\x8bL\xc7`\xe9\xa0b\x83\xb0\x04oû\xd4\xe7F\x97r7V\xba\xdf\xd3\\\xf2\x98+\xac\aȭ\xe3I\x94\x9a\xc8;jk\x0eR\xa0\x9dS|\xc8R\xf2,I\xb0\xa9r\x95\x12\x95pcM/DYTŢ\xa0\xa8f\xea\x8a\r\xd7-a쀙\xd4Ƀ;\x061\xd9\xd8*\x97T\xedQ\x8b\xb6\x1b9\x93\xc6Ĭ\xe5P\xc0Q\xfa}J\x87j*\xee\xe0\xcdأ\xf5\x8a\xa7\xa9\xd7\x03ٟ\xf7H\x94\xa9\x80\"8\xe4\x16}\xf46T\xe4>\xe4J\x05\xc0\x97\xe0bB\x1d\xe6\x89f\xc5F\xad\xd9\xfd\x8a\xa71\xd0p\u0378\xb9\x85\xb9.\xf2\x1d\xb5\u038d\xc0\x16K\xb4\xa8\xfddR\xa7\x01\xc4j\xf4\x18\xf3\xba0\xdcQJ\xe7X{\xb70\a\xb4\a\x89\xc7\xc5\xd1\xd8W\xa9ws\x02|\x9e#h\x11NJŏ\xf1\xcf\x05\x95\x9f\x1f\xef\x1f\x97\xb0\x12\x02\x8cߣ%\xab\x95A5\x8e\xd6\xebo~\x8a5\xf6'\bR\xfc\xe1\xee{p1u\x8a\x9c\x1b\xb0\xd9D\xef?Q\xa3\x16\x85\"\x886\xc9*\xc6\x02UJ2v\x95\xad\x99r͔#Nu\x98\xfdE\x89\x89*\xc8TF}\xc5q2}#\xcc\x00\xbe\xcd;C\xcd+V\xcf\x135\xf3\xa6\x92|6\xd46\xb6\xc1W\"\xb2i\xbb\xa5\x16\x92S\xdbv\x1eI\xcd8\"κ\xf3\t\x18\x86\xfd\xfa\xa5\xfc1\rSR7W\xcf+\x12?\xf6i\xbb!.%\xb3\\\x11\x1dzj\xb7\x1ch\xa4\x8a\xc9\xec\x18\xe7\x98B\xb8њb\xd7\x1b`mb\xbcsÊ\xf0\xce|\xb2\r\xfc\x15'\x80\x1f\xa9\xf2)\x126\x18\xa7m$Kp\x18S\xf551\xe0zDp\xb6F{\x8b,\xeb\x15\x11\xb6E\x95\xc1z\x05۠\x85\xc2F\xa2\xe3\x1e5\xcd\x13\xb2\x14?\xfcf3\x93b\xce\xd3\b\x84\xe2+\x1e\xe4\xf8Nh\x8c\xee\xc3hG\x13\xf8m8\xd0\xc3/\xcdh\xbd\xb0\x99\xec\x97\t0J\xa9\xa8s\x9c\xc8\x13]\xc70\xbe\xbd\xfc\xb4y\xb8s\xb1\xe1G\xed\xa7\x9a\xc4#Z\x8c\xf3\x15\n\xea\xf9M\xbe\xc5\bΣ\x9dp\x80\xd6z\xd1栌\xde\r\x02'\xad|\xa7A\xfd\\r(cA\xa0\xa7Ҥw\xc0\xf7Lﰻ\xb3\xca\xf2\xbf-)\xb9\xcf\xc0g:\x0f\x91\xfa\x92{\xdcd\xd1g9\xd5ԏ\xee\x8b;\xe2\xe9\xbb\xe2F\xfaƲ\x17\x87\xa2+\xb8\x8f\xe8\x9b*M\xa0\xce}w\x7fܭ\xef\x1f\x86Ǘ\xd37 \xf1ޛ\xf37nA\xe0\xc8\\w\x87\xfe\xdb\xe1PQ\xb7z\xb5\x05\xfe\x92\xa8\xd2ec\xde\x02lk\x82\x7f+2\xef\xa6\x1c:\xff8\xf0\x1e\x19\xe3O\x1eך\f\xa2i,\u0083\xa5\xc1\xb3\xbbC\x8bIa\xaa\xb6\xdc~\x19\xb5\x1a\xfc2\xd3\xff6\xfe\xdd\xe6\x06\xbd&k\xed\xe8e\xaa\x97=\xbbf\x90\xfbo¶\xbdW^¿\xfe=\xfbO\x00\x00\x00\xff\xff\x80.\x12\xd3P\x1c\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4\x96M\x93\xdb6\x0f\xc7\xef\xfe\x14\x98y\x0e\xb9<\x92\xb3\xed\xa5\xa3[\xb3\xc9a\xa7mƳ\x9bɝ&a\x8bY\x8ad\x01\xd0[\xb7\xd3\xef\xde!)\xf9E\xb67\xdbCy\x13\t\x02\x7f\xfe@\x80j\x9af\xa1\xa2\xfd\x8a\xc46\xf8\x0eT\xb4\xf8\x87\xa0\xcf_\xdc>\xffĭ\r\xcb\xdd\xdd\xe2\xd9z\xd3\xc1}b\t\xc3#rH\xa4\xf1#n\xac\xb7b\x83_\f(\xca(Q\xdd\x02@y\x1fD\xe5iΟ\x00:x\xa1\xe0\x1cR\xb3E\xdf>\xa75\xae\x93u\x06\xa98\x9fB\xef\u07b7w?\xb4\xef\x17\x00^\r\u0601A\x87\x82k\xa5\x9fS$\xfc=!\v\xb7;tH\xa1\xb5a\xc1\x11u\xf6\xbf\xa5\x90b\aDž\xba\x7f\x8c]u\x7f,\xae>\x14W\x8f\xd5UYu\x96\xe5\x97[\x16\xbf\xda\xd1*\xbaD\xca]\x17T\f\xd8\xfamr\x8a\xae\x9a,\x00X\x87\x88\x1d|β\xa2\xd2h\x16\x00㱋\xcc\x06\x941\x05\xa4r+\xb2^\x90\xee\x83K\xc3\x04\xb0\x01\x83\xac\xc9F)\xa0\xbe\xf4X\x8e\ba\x03\xd2#\xd4p \x01\xd68*0e\x1f\xc07\x0e~\xa5\xa4\xef\xa0ͼ\xdaj\x9a\x85\x8c\x06\x15\xf5\x87\xf9\xb4\xec\xb3`\x16\xb2~{K\x02\x8b\x92ē\x88\x12\xd7\x06\x0ft\xc2\xf7\\@\xb1oc\xaf\xf8<\xfaSY\xb8\x15\xb9\xda\xec\xee*i\xdd㠺\xd16D\xf4?\xaf\x1e\xbe\xfe\xf8t6\r\xe7Z\xaf\xa4\x16,\x83\x9a\x94fp\x95\x1a\x04\x8f\x10\b\x86@\x13Un\x0fN#\x85\x88$v\xbaZu\x9c\x14\xcf\xc9\xecL»\xac\xb2Z\x81\xc9U\x83\\\xa0\x8d\x97\x00\xcdx\xb0\n\xd32\x10FBF_\xeb\xe8\xcc1d#\xe5!\xac\xbf\xa1\x96\x16\x9e\x90\xb2\x1b\xe0>$gr\xb1\xed\x90\x04\bu\xd8z\xfb\xe7\xc17\xe7s\xe6\xa0N\xc91?\xd3(\x97\xce+\a;\xe5\x12\xfe\x1f\x9470\xa8=\x10\xe6(\x90\xfc\x89\xbfb\xc2-\xfc\x961Y\xbf\t\x1d\xf4\"\x91\xbb\xe5rkej\x1a:\fC\xf2V\xf6\xcbR\xffv\x9d$\x10/\r\xee\xd0-\xd9n\x1bE\xba\xb7\x82Z\x12\xe1RE\xdb\x14\xe9\xbe4\x8ev0\xff\xa3\xb1\xcd\xf0\xbb3\xad\x17\x17\xa4\x8eR\xe8\xafd \x97yM{\xddZOq\x04\x9d\xa72\x9d\xc7OO_`\n]\x921\xa7_\xb8\x1f7\xf21\x05\x19\x98\xf5\x1b\xa4\x9a\xc4\r\x85\xa1\xf8Dob\xb0^ʇv\x16\xfd\x1c?\xa7\xf5`\x85\xa7+\x99s\xd5\xc2}餹\xa8S4Jд\xf0\xe0\xe1^\r\xe8\xee\x15\xe3\x7f\x9e\x80L\x9a\x9b\f\xf6m)8}\x04\xe6ƕ\xda\xc9\xc2Ծo\xe4\xebJ\xd1>E\xd49\x83\x19b\xdem7V\x97\xf2\x80M x\xe9\xad\ue9e2\x9d\xd1=\x14x{\xb6p\xbd\xa0\xf38\xb6\xc9\xf9\xca\xcd\xc3Cɝ%\x9c\xdd\xc2\x06.z\xee\xeb\\J3\xfc\x97dj'\x1e\xd9\xe8D\x84^N\xfa\xb3\xba\xb6\xe9\xad,\x90(\xd0\xc5\xecLԧbT^ze=\x83\xf2\xfbq#H\xaf\x04^\x90r\x19\xe8\x90r\x9fA\x03&]\xf0\x1b\xb1\x9c\xbe%\x91\x82F\xe6\xf6\xc2\xce\n\x0eW4\xbd\x92\x9d<|rN\xad\x1dv \x94\xf0Ff\x15\x91\xda\xcf\xd6ʛ\xf5\x1d\x04\xabls-\a\x87w\xfa\xbbI(\xb8}\x1a.#5\xf0\x19_\xae\xcc>\xf8\x15\x85-!ϯ|^\\Uz\x87\x9f\x817P\xbaz)/&9\xf7;sB\x91%\x90ڞr\xe5\xb4>\xf4\xef\x0e\xfe\xfa{\xf1O\x00\x00\x00\xff\xff\x045\f\xc6i\n\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VM\x93\xdb6\f\xbd\xfbW`\xa6\x87\xb43\x91\x9c\xb4\x97\x8eo\xad\x93\xc3N\xd24\xb3N\xf7NS\xb0\xc4.E\xb2\x04\xe8\xcd\xf6\xd7w@J\xfe\x94\xbd\xdeCu\x13\t\x82\x8f\x0f\x0f\x8f\xac\xaaj\xa6\x82y\xc0Hƻ\x05\xa8`\xf0;\xa3\x93?\xaa\x1f\x7f\xa5\xda\xf8\xf9\xf6\xfd\xecѸf\x01\xcbD\xec\xfb{$\x9f\xa2\xc6\x0f\xb81ΰ\xf1n\xd6#\xabF\xb1Z\xcc\x00\x94s\x9e\x95\f\x93\xfc\x02h\xef8zk1V-\xba\xfa1\xadq\x9d\x8cm0\xe6\xe4\xe3\xd6\xdbw\xf5\xfb\x9f\xebw3\x00\xa7z\\@㟜\xf5\xaa\x89\xf8OBb\xaa\xb7h1\xfa\xda\xf8\x19\x05Ԓ\xbb\x8d>\x85\x05\xec'\xca\xdaa߂\xf9Ð澤\xc93\xd6\x10\x7f\x9a\x9a\xfdl\x86\x88`ST\xf6\x1cD\x9e$\xe3\xdadU<\x9b\x9e\x01\x90\xf6\x01\x17\xf0E`\x04\xa5\xb1\x99\x01\fG̰\xaa\xe1t\xdb\xf7%\x95\xee\xb0W\x05/\x80\x0f\xe8~\xfbz\xf7\xf0\xcb\xeah\x18\xa0A\xd2\xd1\x04\xceD\x9d`\x06C\xa0`@\x00\xecw\xa0@9P\x91\xcdFi\x86M\xf4=\xac\x95~La\x97\x15\xc0\xaf\xffF\xcd@\xec\xa3j\xf1-P\xd2\x1d(\xc9WB\xc1\xfa\x166\xc6b\xbd[\x14\xa2\x0f\x18ٌ,\x97\xef@C\a\xa3'\xc0\xdf\xc8\xd9J\x144\"\x1e$\xe0\x0eG~\xb0\x19\xe8\x00\xbf\x01\xee\fA\xc4\x10\x91\xd0\x159\x1d%\x06\tRn8A\r+\x8c\x92\x06\xa8\xf3\xc96\xa2\xb9-F\x86\x88ڷ\xce\xfc\xbb\xcbM\u0090lj\x15\x8fr\xd8\x7f\xc61F\xa7,l\x95M\xf8\x16\x94k\xa0W\xcf\x101\xf3\x94\xdcA\xbe\x1cB5\xfc\xe1#\x82q\x1b\xbf\x80\x8e9\xd0b>o\r\x8f\xbd\xa3}\xdf'g\xf8y\x9e\xdb\xc0\xac\x13\xfbH\xf3\x06\xb7h\xe7d\xdaJE\xdd\x19F\xcd)\xe2\\\x05Se\xe8.\xf7O\xdd7?ġ\xdb\xe8\xcd\x11V~\x16\x99\x11G\xe3ڃ\x89\xac\xf9+\x15\x10\xd5\x17\xc1\x94\xa5\xe5\x14{\xa2eHع\xff\xb8\xfa\x06\xe3ֹ\x18\xa7\xec\x17\xe5\xec\x16Ҿ\x04B\x98q\x1b\x8c\xa5\x88Yy\x92\x13]\x13\xbcq\x9c\x7f\xb45\xe8N駴\xee\r\xd3(f\xa9U\r\xcbl(\xb0FH\xa1Q\x8cM\rw\x0e\x96\xaaG\xbbT\x84\xff{\x01\x84i\xaa\x84\xd8\xdbJp腧\xc1\x85\xb5\x83\x89\xd1\xc9.\xd4\xeb\xa4\xd5W\x01\xb5TO\b\x94\x95fctn\r\xd8\xf8\bj\xdf\xf9\x03\x81\xf5Q\xe6\xe9\xce\xcd\xe0Tl\x91OGO\xb0|\xcbA\xb2\xfdS\xa7\x8e\x8d\xe6G\xac\xdbZ\xbc\x82\x06 \xc5=~\xaa\xcf2^\xc6\x00\x93\xea\x9dD2\x8aXh\x10^\xc5\nĤ\x0e1\x9do-\x1f\xba\xd4OoP\xc1\xef\x19\xf3g\xdf^\x9d_z\xc7\"\xf7\xabA\x0fަ\x1eWN\x05\xea\xfc\v\xb1w\x8c\xfd\x9f\x01c\xb91\xaf\x86\x8e\x17\xef\ue5ba\x12\x98\xec\xc5}\xefQ\xfc\x1e/\x9ft\b\xb8)\xcb\r\x98\x86ț\x0e\xba\\ݽ\x86\xc2\v\xe1\xaf(ҝ\xdb\xf8\xe9\xb8\v\xed=~\xf9\x1a\x7fY\xab\xf2\x10\x18\xb5*K\xca݆\xf0)\xad1:d\xa4\xbd\xcd>\x19\xee&3\x02}S\xe6\x84H\xf9\x01\xa4\xd5\xe9\xd3K\xbe5B\x83\x16\x19\x1bX?\x97\x1b\xe9\x99\x18\xfbs\xdc\x1b\x1f{\xc5\v\x90˻b3!#\x97\xacUk\x8b\v\xe0\x98.\xa9l\xf2\xe0\xa1S4цGg\xfe*1S\xc2\xd85\xe3Ue\xc0\xc5{\xa3\x82/\xf841\xfa5z\x8dDx\xdeF\x17O2\xd9\x04g\x83$/\xac急\xe1\xe1>\x8c\xfc\x17\x00\x00\xff\xff\t\x15i;\xcd\r\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4YKs\x1b7\xf2\xbf\xebSt9\a\xfdSe\x0ec\xff\xb7\xb6\xb6x\xb3\xe5͖v\x13Yeʾ\xa4r\x00\a=3\x88f\x00\x04\xc0P\xe2\xa6\xf2ݷ\x1a\x0fr\x1e )\xa9\xd6\u07b9H\x04\x1a\x8d\x1f\x1a\xfd\xc6b\xb1\xb8`Z|Ac\x85\x92+`Z\xe0\xa3CI\xbflq\xff7[\b\xb5ܾ\xb9\xb8\x17\x92\xafષNu\x9fЪޔ\xf8\x01+!\x85\x13J^t\xe8\x18g\x8e\xad.\x00\x98\x94\xca1\x1a\xb6\xf4\x13\xa0T\xd2\x19նh\x165\xca\xe2\xbe\xdf\xe0\xa6\x17-G㙧\xad\xb7?\x14o\xde\x16?\\\x00H\xd6\xe1\n\xb4\xe2[\xd5\xf6\x1dnXy\xdfk[l\xb1E\xa3\n\xa1.\xacƒx\xd7F\xf5z\x05\x87\x89\xb06\xee\x1b0\xdf*\xfeųy\xef\xd9\xf8\x99VX\xf7\xaf\xdc\xecO\xc2:O\xa1\xdbްv\x0e\xc2OZ!\xeb\xbeef6}\x01`K\xa5q\x057\x04C\xb3\x12\xf9\x05@<\xa2\x87\xb5\x00ƹ\x17\x1ako\x8d\x90\x0e\xcd\x15qH\xc2Z\x00G[\x1a\xa1\x9d\x17ʭ\xe2\x10\x00B@\b\xd61\xd7[\xb0}\xd9\x00\xb3p\x83\x0f\xcbkykTm\xd0\x06x\x00\xbfY%o\x99kVP\x04\xf2B7\xccb\x9c\r\xe2]\xfb\x898\xe4v\x04\xda:#d\x9d\x83q':\x84\x87\x06%\xb8FX\b\xa7\x85\af\t\x8eq\xfe\x94\xf9\x8d\xfd<-\xb7\x8euz\x84\xe0\xca ;,\r\x108s\x98\x03\xb0\x97'\xa8\n\\\x83$y\xafXLH!k?\x14n\x02\x9c\x82\rz\x88ȡ\xd7\x19d\x1a\xcbB+^\xc8\xc4t\x04\xebf2zN6D\xff\xdfF5\x02t\xab\xf8\v\xa0\x98\x9d\xb8\x01\ng ,\xb0\xb84\x9c\xe2 \xe8\xe4\x8e>\xfd}}\aik\x7f\x19S\xe9{\xb9\x1f\x16\xda\xc3\x15\x90\xc0\x84\xacȬ\xe9\x12+\xa3:\xcf\x13%\xd7JH\xe7\x7f\x94\xad@9\x15\xbf\xed7\x9dpt\xef\xbf\xf7h\x1d\xddU\x01W>S \xf7\xd4k\xd2\\^\xc0\xb5\x84+\xd6a{\xc5,~\xf5\v I\xdb\x05\t\xf6iW0Lr\xa6\xc4Aj\x83\x89\x94\xa2\x1c\xb9\xafIޱ\xd6X\xd2\xed\x91\x00i\xa5\xa8D\xf4P\x952\xc0\xa6\xe4ňq\xdep\xe9\xcbz\xa7)\xd1\x04\xd9\xfbܚ\x84M\x0e|jr\x98\x81r\xc6\x14\xa0\x9dz\xd9\xfd\x1a\x83ZY\xe1\x94\xd9\x11\xe3\xe0`\x8b\x19\x87#\xd7@\x9fT\x1cϜ\xe3Fq\xcc\xc1\xa6\xa5\xe0\x1a\x16\xb4\x95\xf2+\xf2G\xbd\x94\xf3]\xe8S\xf2Y\xc0\xb4\xe2gp\xc5\x1d\x19\x18\xacР,19\xaeS\xc9C\x06\xd90\xac\xcf1\x1eW\n8\xe1ճ\x88\xdf\xdd^'O\x9e\x84\x18\xb1\xbb\xf9\xbeg\xe4C_%\xb0\xe5>Н\xdf\xfb\xf2\xba\n\x9by\x9f\xe6\x140\xd0\x02C\x1a\xb8\x0f\x12 \xa4u\xc88\xa8*ˑj\x12 \xc37\x18W\xbc\x0e\x1e,\xba\xcaCh!\xd9\x03#\xdf)8\xfcs\xfd\xf1f\xf9\x8f\x9c\xe8\xf7\xa7\x00V\x96h}\x16\xec\xb0C\xe9^\xef\x13s\x8eV\x18\xe4\x94fc\xd11)*\xb4\xae\x88{\xa0\xb1\xbf\xbc\xfd5/=\x80\x1f\x95\x01|d\x9dn\xf15\x88 \xf1\xbd[NJ#l\x10Ǟ#<\b\u05c8i0\xddK\x80\xd4+\x1e\xfb\xc1\x1fױ{\x04\x15\x8f\xdb#\xb4\xe2\x1eW\xf0ʧ5\a\x98\x7f\x90\xed\xfc\xf9\xea\b\xd7\xff\v\xa6\xfd\x8a\x88^\x05p\xfb8<4\xba\x03\xc8`yF\xd45\x1e\xb2\xaa\xe9\xe7\x83\n\xb9\xea\xefA\x19\x92\x80T\x03\x16\x9e1\xdd^p\x94\xc8g\xa0\x7fy\xfb\xebQ\xc4cy\x81\x90\x1c\x1f\xe1-\x88X\xdahſ/\xe0\xcek\xc7N:\xf6H;\x95\x8d\xb2xL\xb2J\xb6\xbb\x90\xe7n\x11\xac\xa2B\t\xdbv\x11\xf2 \x0e\x0flGRH\x17G\xfa\xc6@3\xe3Njk\xca~\xee>~\xf8\xb8\n\xc8H\xa1j\xef\x89)jV\x82\xb2\x19JcB,\xf6\xda8\v\xe6\xe9\xb3}P\x1f\xa7\xa0l\x98\xac1\x9c\x17\xa1\xea):\x16\x97/\xb1\xe3yJ\x92\xbeLj2u\x1c\xff\xb3\xe0\xfe\xc4\xc3\xf9\f\xfa\t\x87\x1bV\x19'\x0fw\xdfo\xd0Ht\xe8\xcf\xc7Ui\xe9h%jg\x97j\x8bf+\xf0a\xf9\xa0̽\x90\xf5\x82Ts\x11t\xc0.}\x99\xba\xfc\xce\xffy\xf1Y|E\xfb\xd4\x03\x8d*\xed\xafy*\xda\xc7._t\xa8\x94\xc3>=\x8e]\xaecf5]Kf\xf1Ј\xb2I\xc5I\xf4\xb1G\x8cIP&̃kfr\xf7\xd5U\x99\x04\xda\x1bB\xb4[\xc4^ڂIN\xff[a\x1d\x8d\xbfH\x82\xbdx\x92\xf9~\xbe\xfe\xf0m\x14\xbc\x17/\xb2\xd5#\tx\xf8\x1e\x17\aX\x8b\x8e\xe9E\xa0fNu\xa2\x9cPSVz\xcdI\xf0\x95@s&\x8d\xfb4\"N\x89f&\xbf\xdd\xd3<+\x8ft\xac\xce$n\xc3\xd6\xe1\xa9\xf4\ue93cƍ\x1bV[`\x06\x81A\xc74\xdd\xf3=\xee\x16!!\xd0LP4\xa7\x80\xbd\xef\x8a\x00Ӻ\x15\xd9\xc0\x1d\xc3~LY\xa3$\xa8,g\xb5=v\xf6쭥.\xd0\x1a\x1d\x95\xad\xdfD\x0e\x9f'{>Y&\xf9\xc4a\x89\x1dZ\xcb\xeas\xa6\xf8s\xa0\nM\x8f\xb8\x04\xd8F\xf5n\xdf\xf5\x18ŌK\x1bu\xeay\x8d\x97l?a\xacΌ\xea=\x1b랶\xf5k\x86\x8e\xe0\xf0P\xe9Qm0\x9f7\xbd\xc4'\x00\xf8\x17\xb8s\b\x89&g`{\xefu\xd2\xc2\xe0\x84S\xbe\xc1\x87\xcc\xe8\xec\xe5p8y\x95L&3\xf7\xa3\xb7\x86g\x9d?ntN\x04\x91\f\x1a\xd5&cV\x8e\xb5 \xfbn\x83\x86\xe4\xb0\xd99\xb4cw\x9ekq\xf9\xd2\xf8 \xc6\xc1\xfat\x7f\x81S\xac\xf6K&}+چ\xb0ͅ\xd5-\xdbe\x18\xa7\x83\xf8\xf4\x97\x8c\x8b\\\xc0A\x9f\x93Qk<\x16\xe5O\xb7\xe6<\xa6\x0fJ\x1e)֒=\v\xe9\xfe\xfa\x97\x13ɲ\x90\x0e\xebIp\x88\xf3$\xce\xf7\xb4\xcb\xd7\xd9\xe1D\x12c%ӶQ\xee\xfa\xc3\x19-X\xef\t\x935\x1c\xf2h\xef\xfb|\xa3<\x12EU\xc8]\xd5\u07b7<\xcbT\xc7o\xd6砎\x88\xcfD\xa1\xf8Z\x9e\x8bAk\xd4̐\xa5\xfbg\x95\xab\xe9\xbb\xdfk\xb0\xc2\xf7\x8a)\xf5\f\xb9h\xe8\xe4X\nN\x94Z)\x83\x19\x97\t\xf3\xb02\n\"c\xf8\xdf2~d\xf5d6\xe8\x91\xf3\x01\xef\xf8\xde0\x1c\xe97\xfb\xb7\xb4\x15\xfc\xf1\xe7\xc5\x7f\x02\x00\x00\xff\xff\xb6]>s\xd5\"\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xc4YKs#\xb7\x11\xbe\xf3Wt\xad\x0f\x8a\xabv\x86\xdeM*\x95\xe2mW\x8aSJl\xadj\xa9\u074b\xcb\apМ\x819\x03\xc0x\x90b\\\xfe\xef\xa9\x06\x06\xe4\xbcHJ\xaaȞˮ\x80F\xe3Ç~\xa1\x99eٌi\xf1\x15\x8d\x15J.\x80i\x81\x8f\x0e%\xfde\xf3\xcd?l.\xd4|\xfbn\xb6\x11\x92/\xe0\xda[\xa7\x9a\xcfh\x957\x05\xde\xe0ZHᄒ\xb3\x06\x1d\xe3̱\xc5\f\x80I\xa9\x1c\xa3aK\x7f\x02\x14J:\xa3\xea\x1aMV\xa2\xcc7~\x85+/j\x8e&(O[o\xbf\xcb߽Ͽ\x9b\x01H\xd6\xe0\x02\xb4\xe2[U\xfb\x06\rZ\xa7\f\xda|\x8b5\x1a\x95\v5\xb3\x1a\vR^\x1a\xe5\xf5\x02\x8e\x13qq\xbbq\x04}\xaf\xf8נ\xe7s\xd4\x13\xa6ja\xdd\x7f&\xa7\x7f\x10\xd6\x05\x11]{\xc3\xea\t\x1ca\xd6\nY\xfa\x9a\x99\xf1\xfc\f\xc0\x16J\xe3\x02\xee\b\x8af\x05\xf2\x19@{\xce\x00-\x03\xc6y`\x8e\xd5\xf7FH\x87\xe6\x9aT$\xc62\xe0h\v#\xb4\v\xcc\x1c\xf4\x80Z\x83\xab\x90\xb6\f\xac2!\x85,\xc3P\x84\x00N\xc1\n\xa1E\u00832\x80_\xac\x92\xf7\xccU\vȉ\xb8\\+\x9eˤ\xb3\x95\x89\x9c\xdf\rFݞ\xcea\x9d\x11\xb2<\x85\xec\xff\f\xaa\x87\xe7^\xf1'\"y\xa80\xc8$4^\u05caq4\xb4y\xc5$\xaf\x11\xc8@\xc1\x19&\xed\x1a\xcd\t\x14i\xd9\xc3^\xf7\x91|I\xfa:3\xcfa\xe79TD\xd9\xde\xf6_\xbbC\x97\xf6\xbdW\xbc]\x00\xadQ\x83u\xccy\v\xd6\x17\x150\vw\xb8\x9b\xdf\xca{\xa3J\x83\xd6N\xc0\b\u2e6e\x98\xed\xe3X\x86\x89\xd7űV\xa6an\x01B\xba\xbf\xff\xed4\xb6vQ\xee\x94c\xf5ǽC\xdbC\xfa0\x1c\x8eh\xc9\xd9\xca\xf6\xfa\xff\x14\xb8+\x82t\xa3d\x9f\u05cf\x83\xd1)\xb0\x1d\xa5)\xde\xe6\x85\xc1\x10j\x1fD\x83ֱF\xf7\xb4~(\xfb\xfa8sq No\xdf\xc5PVTذE+\xa94\xca\x0f\xf7\xb7_\xff\xba\xec\r\x03h\xa34\x1a'Rt\x8d_'ytF\xa1\xcf\xec\x15)\x8cR\xc0)k\xa0\x8dN\x11ǐ\xb7\x18\xa2\xb3\b\v\x06\xb5A\x8b2摞b !&A\xad~\xc1\xc2\xe5\xb0DCj\xc0V\xca\xd7!\x02m\xd180X\xa8R\x8a\xff\x1et[\xf2=ڴf\x0e\xdb\x10\x7f\xfcB\f\x96\xac\x86-\xab=\xbe\x05&94l\x0f\x06i\x17\xf0\xb2\xa3/\x88\xd8\x1c~$\v\x11r\xad\x16P9\xa7\xedb>/\x85KI\xb3PM\xe3\xa5p\xfby\xc8\x7fb\xe5\x9d2v\xceq\x8b\xf5܊2c\xa6\xa8\x84\xc3\xc2y\x83s\xa6E\x16\xa0ː8\xf3\x86\x7fc\xda4k\xafzXGN\x17\xbf\x90\xeb\xce\xdc\x00%;\x10\x16X\xbb4\x9e\xe2Ht\nٟ\xff\xb9|\x80\xb4u\xb8\x8c!\xfb\x81\xf7\xe3B{\xbc\x02\"L\xc85\x05]\xbaĵQMЉ\x92k%\xa4\v\x7f\x14\xb5@9\xa4\xdf\xfaU#\x1c\xdd\xfb\xaf\x1e\xad\xa3\xbb\xca\xe1:T\x12\x14/\xbd&\xcb\xe59\xdcJ\xb8f\r\xd6\xd7\xcc\xe2\xab_\x001m3\"\xf6iW\xd0-\x82\x86\u0091\xb5\xceD\xaa`N\xdcװ*Yj,\xe8\xfa\x88AZ*֢\b\xbeA\xe1\a\xd8H>艹v]\xfaV\xac\xd8x\xbdtʰ\x12\x7fPQ\xe7Ph\x80\xed\xe3Ԛ\x04Nvr^T\x0e6J\x8e\x94\x02\xd4i\xf1\xaeB\x83\xdd5\x06\xb5\xb2\xc2)\xb3'\xc51[\xe6#\r'.\"\x1cY\xf1\vǠp\x1f\x1c\xc2\xe0\x1a\r\xca\x02S\x848W\xc9L\x9c\xa2\x93\xd0\xc7\x10OS\x0fg\xa2\xe7$\xe0\x0f\xf7\xb7)b&\x86[\xe8n\xbc\xef\x05z\xe8[\v\xacyH(\x97\xf7\xbe\xba]\xc7\xcdB\xecp\n\x18h\x81\xb1\"=\x04c\x10\xd2:d\x1c\xd4zR#\xbd\r\x80\x1c\xcc`\xbb\xe2m\x8c\x14mH:\x86p\xa2\x1e\x18\xc5(\xc1\xe1\xdf\xcbOw\xf3\x7fM1\x7f8\x05\xb0\xa2@kC\xbe\xc6\x06\xa5{{\xc8\xd9\x1c\xad0ȩp\xc1\xbcaR\xacѺ\xbc\xdd\x03\x8d\xfd\xe9\xfd\xcf\xd3\xec\x01|\xaf\f\xe0#kt\x8doAD\xc6\x0f\xe1/ٌ\xb0\x91\x8e\x83F\xd8\tW\x89a\xd2:0@\xd6\xd5\x1e{\x17\x8e\xeb\xd8\x06A\xb5\xc7\xf5\b\xb5\xd8\xe0\x02ބJ\xf0\b\xf37r\xac\xdfߜ\xd0\xfa\x97\xe8@oH\xe8M\x04w\xc8w]\x8f<\x82t\x15s\xe0\x8c(K<\x16\xa2\xc3/\x04o\n\x89߂2ĀT\x1d\x15A1\xdd^\x8cG\xc8G\xa0\x7fz\xff\xf3I\xc4}\xbe@H\x8e\x8f\xf0\x1e\x84\x8c\xdchſ\xcd\xe1!X\xc7^:\xf6H;\x15\x95\xb2x\x8aY%\xeb}\xac\xf6\xb7\bV5\b;\xac\xeb,\xd6\x1b\x1cvlO,\xa4\x8b#{c\xa0\x99qg\xad5U\x19\x0f\x9fn>-\"22\xa82\xc4;\xcaNkAU\x03\x95\v1\xe7\x05k\x1c%\xcd\xf4Y\x1f\xcd\xc7)(*&K\x8c\xe7EX{\xcaB\xf9\xd5K\xfcx\x9c\xfa\xd37Q\x02\f\x03ǟ\x96D\x9fx\xb8P\xa9>\xe1pݷ\xd6\xd9\xc3m\xfc\n\x8dD\x87\xe1|\\\x15\x96\x8eV\xa0vv\xae\xb6h\xb6\x02w\xf3\x9d2\x1b!ˌL3\x8b6`\xe7\xe1\xc9<\xff&\xfc\xf3Ⳅ\xd7\xf5S\x0f\xd4{\xf4\xbf\xe6\xa9h\x1f;\x7fѡR\xad\xf8\xf4\x8bE\x96f\x82*$*\x82b\xd59]\xd73\xadk*l\xdaR\xea\xd0T,\x94\\\x8bқ\xf0z9Eʤ+t\x1b\x8c\x17n\xefKG4]݅\x16\xa7\xab\xa6.\xaf\xd7\xf8\x1c\xa3E\xe9\x9b1\x94\f6J\v61N\x86;rZ\x9ax3.\xf6\xce\xdc\\\xf4\x8a\v\x1c\xb4\xfd\xb8\x89\xc7e\xebT\xb1\xd8\x0e#\xf4\xa0\v\xae5\x9d\xa5\x9e\xebl\x06\x7f\xf5\xf4r\xe8#̦\x9f\xcc\x03\x19\xad\xf8lHZ7N\r&\x8fQf8\xd1w\xe0\xc1l\xafO\xdc=\u0378\xdb\x10\x9a\x90\xcf\xe97\xc4\xc6g\xcb{L{.\xb5C\xe9\xcd\xf7\xe2\x8eC\xa1\xe8=\xd5\xebY^\xb0\x81\xeb\xf1\x8a\xd0\xde3\xbc\xf5\t\xd1`x\xc6Ǟ\xed\x8eٴ\xc9\xd4}CG_\\\x1aJ\rR\x87<\xbcv\xe81\xb6f\xa2F\x0e\x87\x9f\x9e\xc2\xef\v6\xf4\xb9\xae\xa6\x8a\xfb\xa4\xc8[\xe4!\x98N\x80\x1e\xafK\xadc\xce\x1cf\xa4b$!}]\xb3U\x8d\vpƏ\xa7ϸW\x83ֲ\xf2\x92\x7f\xfd\x18\xa5b#\xa4]\x02l\xa5\xbc;tBZGk\xa9\xb8\xb2\xad\x15<\xaf\x1bS1{\t\xca=\xc9LY\xdc\xc1\xe5ϛ\x1c\x9c\tew\xb8\x9b\x18\x1d\xb5\xf2\xbb\x93\xd7Ʉ&\xe6\xbe\x0f\xd6\xf1,\x02ڍ.qЊA\xa5\xead\xdd\xcaQ\xa6\xf6\xcd\n\r\x11\x11~?H\x8c\xa4\xc01\xd5Z\nO\xd2#\x93G\r)\x16FU\xed#\xbb`2tZm\xccv\\X]\xb3\xfd\x84\xdet\x92Pu\x92\xf9\x92\x1f\x1d-&y!\xb9\xff\x89\xecx\xbe%v\xf8}d\xba&\x9e\xfa\xb5e\xea\x16\xba?\x9d\f\xe6\x0f?\f\xbd\xce\x0eg\x92\xbfu̸\xa7\x86\xbdeO\xf8R\xc4\v\xaa\xa7\xe3]7t\x8d\x03U\x7f\x9b?2FM\x125\x1a\f\xc8yGw\xdbN\xee\x8e\xf8\xd5\xe1ǒ\x05\xfc\xf6\xfb\xec\x7f\x01\x00\x00\xff\xff\xb1\x168S\xd6 \x00\x00"), - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xdc=Ms\x1b;rw\xfd\n\x94spR%\xd2\xeb\xe4\x92\xd2M\xf1\xb3\xb3\xcc\xee\x93U\x92\xcb\xef\f\xce4I\xac0\xc0<\x00C\x9aI忧\xd0\xc0`\xbe03\x18J|\xeb\rn\x1a\x02\r\xa0\xbb\xd1߀V\xab\xd5\r-\xd9wP\x9aIqGh\xc9\xe0\x87\x01a\xff\xd2\xeb\x97\x7f\xd7k&?\x1c?\u07bc0\x91ߑO\x956\xb2x\x02-+\x95\xc1/\xb0c\x82\x19&\xc5M\x01\x86\xe6\xd4л\x1bB\xa8\x10\xd2P\xfbY\xdb?\tɤ0Jr\x0ej\xb5\a\xb1~\xa9\xb6\xb0\xad\x18\xcfA!\xf0z\xea\xe3\x9f\xd6\x1f\xffu\xfd\xa7\x1bB\x04-\xe0\x8e(\xd0F*\xd0\xeb#pPr\xcd\xe4\x8d.!\xb30\xf7JV\xe5\x1di~pc\xfc|n\xadOn8~\xe1L\x9b\xbf\xb4\xbf\xfe\x95i\x83\xbf\x94\xbcR\x947\x93\xe1G\xcdľ\xe2T\x85\xcf7\x84\xe8L\x96pG\x1e\xec4%\xcd \xbf!\xc4/\x1d\xa7]\xf9U\x1f?:\x10\xd9\x01\n\xea\xd6C\x88,A\xdc?n\xbe\xff\xdbs\xe73!9\xe8L\xb1\xd2 \x02\xfc\xda\bӄ\x92\xef\xb87\xbb\x00\xc451\aj\x88\x82R\x81\x06a41\a \xb4,9\xcb\x10\xd5\x01\"!r\x17Fi\xb2S\xb2h\xa0mi\xf6R\x95\xc4HB\x89\xa1j\x0f\x86\xfc\xa5ڂ\x12`@\x93\x8cWڀZ\aX\xa5\x92%(\xc3jĺ\xd6b\x97\xd6\xd7\xde^\xde\xdb\xed\xba^$\xb7|\x02n\xc9\x1ee\x90{\f\xd9՚\x03\xd3\xcd\xd6\xfa\xdb\xf1[\xa2\x82\xc8\xed\xdf 3k\xf2\fʂ!\xfa +\x9e[\xf6:\x82\xb2\xc8\xc9\xe4^\xb0\xff\x0e\xb0\xb5ݨ\x9d\x94S\x03\x9e\xdeMc\u0080\x12\x94\x93#\xe5\x15\xdc\x12*rR\xd03Q`g!\x95h\xc1\xc3.zM~E\U0008877c#\acJ}\xf7\xe1Þ\x99\xfa\x98d\xb2(*\xc1\xcc\xf9\x03r<\xdbVF*\xfd!\x87#\xf0\x0f\x9a\xedWTe\af 3\x95\x82\x0f\xb4d+\\\xba\xc0\xa3\xb2.\xf2\x7f\nd{\xdfY\xab9[\xce\xd3F1\xb1o\xfd\x80l>A\x01\xcb\xf0\x8e\x97\xdcP\xb7\x8b\x06\xd1\xf6\x93\xc5\xce\xd3\xe7\xe7om>c\xba\x8f}\xc4{\x8b\xf9\x1a\x12X\x841\xb1\x03刈\xdcfa\x82\xc8KɄ\xc1?2\xce@\xf4ѯ\xabm\xc1\x8c\xa5\xfb\xef\x15h\xcb\xd0rM>\xa1\xec [ U\x99S\x03\xf9\x9al\x04\xf9D\v\xe0\x9f\xa8\x86\xab\x13\xc0bZ\xaf,b\xd3H\xd0\x16{\xfd\xce\x0ek\xad\x1fj\xe15B/\x7f\xfa\x9fK\xc8:'\xc6\x0ec;\x7f\xcc\xc9N\xaa\x8ep\xb0C\xd6\x1d\xa0\xf1Ck\x9b;\xfdV\x82\xf5\x7f\xe9-\xe5?BG\xcb?v\x11\x95`\xbfW\x80\"ΝX\x18\x88\x94\x01HR\xaf\x0f\xd9b=\xf8}\x04\xa7\xb6\xc1\x8f\x8cW9\xe4A\xda\x0e\xf6\xd2[\xf1\xe7\xc1\x00\xd4:\x94\t\xcb\xffV\xfc\xdbe\x8b\xe6W+N#+\xa6\n\x88\xe5@&\x1c<\xc2\x04n6\x8aiۘ\x81\"\xb2\xb8\xc9\xdd\x11\"*\xce\xe9\x96\xc3\x1d1\xaa\x82\x11\xccP\xa5\xe8y\x041\xb5\nN\xc5K\xe8\xef\x05\x02g\x19\xb4\x15\x85C\x8dS2T\rWD~r\xac0m\xc5Y\xbd\xcbG\xc9Yv\x9eEMlP}\xdc\xfc\xe1\xab9x\v\azdRE\xb6dO\xa4\xed\xdaR\xa4\x8d0\x95V\x96y \xf9e\x1b\x8e\"\xeb \xe5\xcb\x1c\xed\xffl\xfb4R\x9bdh\xbc\x85\xadxj{%\xba\x05\x02? \xabLd\x99\x84\xe4\x15*\x10\xa9H)\xb5\x19\xa7\xfb\xb8\xec!N\x1c\x8c1-\x99b\x9a\xc1μ\xa8\xac)g7\xda\x11\x9bR\x80]ka)\xd7\xf4U\xb2r}\xfb\xfa\xad\x85\xf18FȖjȉ\xf4\\_q\xd0~\xae\x1c\xc9\xdfȕ\xdbQ\xd0a\xf3\xce\xd2\xe0t\v\x9ch\xe0\x90\x19\xa9\x86\x98L\xc1\xa7k)\xb2r\x04\x8f\x11\xa9\xd9e\xfffc\x13 \x89e\xf3Ӂe\ag\x04X\xdeD8$\x97\xa0QpXC\xf5<\xb6I2G{?ɔ\xe8h\xda̙\xeaË\x89\x93\xa6%\x88ۦ\xcd\bށ`\xf1ߣ\x9a\xb3i\xff?\x11[k\x92\v\x98v3\x18\xfa\xb6L\x8bN\x955\xf67;\x02Eiη\x84\x99\xfa\xeb\x1cD\xcayk\xfe\x7f`\xc2,\xe7\xf8M\x7f\xe4\x9br\xfc$U\xe6 Z\xaa\x84\xe9\xff\x01\x89\x82\xca\xe2\xd9\xeb\x8ad\x82\xfc\xb5=ꖰ] H~Kv\x8c\x1bP=ʼ꼼\x052R\xf4\x9dm\x055\xd9\xe1\xf3\x0fk\xd9\xe8&Δ\x88\x97\xfe`g\x12\xd7>BW1\xcf\xc0%\xe8\xbe2\x05\x85s\x8b\xbf!6\x9b/\xe8O\xdc?\xfc\x02\xf9\x14zH\x1a\xe7\r6r\xdf[l{jo\xe7\xa7nÛ>\xc1gr\x01\x8f[B\xc9\v\x9c\x9d\xc5B\x05\xb1ġ\x06\xedݨ\xf74D\x0eF^\x90\xc9^\xe0\x8c`|(evt*+\xb8\xf6\x02\x11s?\xd6:\b\xb4k\xf2\x0e\xaeä\xfd\x80\x88@\xc7;\x1dy\x04\xc3b\xb5,\x9a\xdf\x1cI\x17$u\xabq\x7f\xc16\x03\xd9Z\xe1C$\xec{\xedHdO\xc1\x81\x95\x89\x1b\xc5\xe8\xa1\x06<-u`\xec;\xe5,\x0f\x139\xbe߈qk\xb8\xdb\x1e\xa4و[\xe7\x91i\xe4\x92_$\xe8\ai\xf0\xcbU\xd0\xe9\x16~\x012\xdd@<^\u0089m\x8b\x87v\x84-\x81\xb9]۸@J \x0f\xd3d#\xac\xe3\xe2\xf1\x81\xf1R7ݴ~趢\xd2\x18B\x13R\xacPU\xaec39d'\x82\x94\xaaC\x91\xe1\xd2¤n\xc2D\xb0߬&q\xe3]\x04\x98\xd3\f\xf2\xda\xdbĸ%5\xb0g\x19)@\xed\xa7\x14G\xbb\x95V\xbe\xa7-!Q꺶\x90\xc3\xd2T{ݼ\xe8\xce\xe7\x17\xb3\xb2'7\xa1WM\xecٮ#\xe1\xca\xf1\xae\xf3;B\x15\x8b\xf6\xc7,vi\x9ec\n\x89\xf2\xc7\x05\x12\x7f\x01-\x86\xba\xdf-\xcciȂ\x96\xf6\xfc\xfe\x8fUs\xc8\xd0\xffKJ\xcaT\xc2\x19\xbe\xc74\x11\x87\xceX\x1f\x18kOcg`\x9aX\xfa\x1e)\x1f\x06\xc2#\x9b\x93V\xb6\x00w\x8a\\\xee\x06\x16\xcb-9\x1d\xa4v:uǀ\xc7B6\xdd\xc64y\xf7\x02\xe7w\xb7\x039\xf0n#\xde9\x05\xbfX\xdc\x04kA\n~&\xefp\xec\xbb\xd7\x18A\x89\x9c\x98\xd8\xed\xc7\xea%\x84\xe4V\x05-W\x9e{\x8d,X6:ND\xc3\xe3M\xeb\xb0S;D\xde\xc4ƽy<\xb5\xdb$\xfe-\xa56\x7f\x8e\a\xfaF\xd6\xf3X\x8f\xe8ڴ\x91x٬\xad\xefc_A\x18[\vpg@\xf9\xe0\x9f\x13е\xe7\xf0J\x9fj.\xb8\x17\x02{4\x04d-\x82g\xb8ɥJR\x96\xb8\xc4ڴxYh\xa7\x7f\xfeъMړm\xffno䭭\xe1L\x16\x05\xed'\a\x93\x96\xfaɍ\xacy\xda\x03r\xd4W\xfb\n\xcfs\xba\x99X\xf3\x10\xa6\x05O\xcc\x1c\x98 \xb4\x16\x1b\xa0\xbe\xb9]@\x1at]D\xce\x1aՁ\xa0\xe1\x03j\xaaT\x93J\xe6\xe4t\x00\x05\x1d\xae\x18\x06ʭ\xa5\x99\bRHӎGX\xb8\xa5\xcc\xdfk\xb2cJ\x9b\xf6BS\x19\xaeҩ찐\xc2vw\xdfX\x01\xb22\x17\xd0\xe0s3\xba\x93\xd7-\xe8\x0fVT\x05\xa1\x85\xac\x12\x8c\x02\u05ec~aEH\xbez\n\x9c(3!\x0f\x85\x91\x19#-\x95J\x0e&\x95\xc4[\xd8Yq\x94I\xa1Y\x0e\xaa.\x0ep\x94e\xd2\x1e\xdc\x1de\xbc\x8a\xa5}bm\xa9{+>+u\x91w\xfbՍlE\x1b\x0f\xf2\xd4EP2\n\x0e\xf4\b\x84\xed\b3\x04Df\xe9\x02ʉl\x9c\xc2#\x03Q\x93̖i\x02\xde6\x10U\x91\x86\x80\x15\x9el&&\x83i\xed\xee_(\xe3\xd7 \x9b\xe5\xbc/R=\x01\xcd/\t\xc0\xfc\xd6\x1aN@\xe8Ja\xe2މ\x97\x13\xe3ik\xb6\x94#\x9cV\";\x00\xca)\xd1\x11\x1fāgB\x1b\xa0\xa9\xbc`\xad\xa6J\b&\xf6i\xb4K\x0eq6͡z+%\a\xda/x\x8a5\x8b\xeb\xcb\xc5\xd0o\xcd\xe8?D\f\x05\n\xa4\x9b\v[\xf0\xa4\xf2\xb2\x88\x1a\x03E\xe9Λ$\xaa\x12m\xeds\x05)\xb4\xc4\a\xf7\xabxK\xe7\x9a\t\x96@\xd8^΅\x99\xb6eiA\\ղ\xb4\x13\x04\xa3\xe2\x92\xf0٦\x03\xc0\x9e\xce\xdaI\xc1\xb5\a\xaeY`en\x81\xd0<\x87\xdc\x05&\xad\xa9\xe2}\x16W^6R\xaa\x10\xdd\xddr31\x89\xb2u\xebx\xa4\x18\x8aUGXU\xe2EȓX\xa1'\xaf\x17\v\x90\xf4\xd0\xe0\x9bNo.\x96D\x7f\xa4\x14\xea\xf2k:O\xd5\xc6\xd3\x15\xa4L2\xdf,\x8a\x86Lq\xc1\x9c\\s\xa5\xcb#?ήbj\xfe\x89\xc1>\xd1\xfc\xc9\xd5\x1c\xa7ֳm\xe2\xa3Z\xc6\xdf\xe9\x00\xe6\x00\xaa.f^a\xddvLN7\xf9\xe8Ə\t\x05n\x96\x7fjS\xd8\x15^\xf6J\xde⎎\xb5\x02n-cӊ\x1bW~\xac\xaa\b\x13%\x15~\xc5-\x83\x94ʉ\xb9z\x89n\r`\xa8W\xa8\x8b\x00e=Id\x87\x8e\x96\xaeҷ\x9d\x8c\xef\x16>`ȯ^\xe9߽<0\xa1\xa6a\xa6\x92a\xbahr\n_C\xb6ic\xac\xe1A\xdf\xcfW\xd3\xfe\\\xe83P|-\xfd9\x185@\xbb\x18\x8c\f镃\xa0\xe4\xb6.;\x16\x16P\x16\x13.\xf6\x14\xfap\xa0\x85x\x9f\xe1I\x945`\x8d\xa1f\x7f\xda|u;\xd3\xe4#9\xc8*RR7\x81\x9d\x99\x02\x8b\xf1\xb2\n\x9fD\x00C\x8f\x1f\xd7\xdd_\x8c\xf4E\x16\x18\xf9\x8a\xec\x0e\x1d\x95&\x9a\xcaDΎ,\xaf(\xef\x1c\xb2\x16[4\xdcC\xa4\"\x82\xf1X~ղU=\xbe\xc3F\xe4k\xe9\xf2,\x8b\xc5Ѵ\x89\x98V\x8bqq\x05F\xb7\xc2bDI-M9\xa4\x97\x9a\xa6\xd7XL\x17E,\xa9\xac\xe8\xd7M\x8c\x02\x9d\xaf\xa7H\xb1\xeegj'.\xa8\x98H\xac\x96{u\x82$\xa5&\xe2\xa2J\x88ق\xb2\xc4\xfa\x87ne\xc34\xc8\x05U\x0fIș\xafpX\\\xd7\xe0\xeb\b&\xf7\x91\\\xcd\x10\xa9S\x98\x04\xd42/*\x19M)\xb5\xe2W\xf1\xa5\xae\xe8M]ß\xbạ\x9a\x01٫\x01O\xa9\xeeNJ\xe3%\xe7lR\xb2l\xf3\x99\xe3\xe9\xaa\xed\x84j\xed\x84l\xd0\xdcJ\x13\xaa\xb2\x97Uc'\xe0\xf0J\xbe֕\xbc\xadk\xf8[\xd7\xf5\xb8f}\xaeYΙ\xf9yY\x15\xf5\xc5I\x86:\x1d\xfd sx\x94\xca\xcc9\b\x8f\xfd\xfe\x91\x14`\xcbi\x92<'\xa2\xee\x1a\xcb4X\xdb\xdf\xdb\xfd\x97m*\x9e\xad\xab\xcd\xdf_en\xd76\x97[x\xeau\x1f\\\xa1݁\x02\xe1\x1e\x96\xf8\xaf\xe7\xaf\x0f\x01~\xcc\x1e\xf5Fo\xefM\x03g`\xe4\x1e9>\xfb\xe4\vn\x1c\xb6P\x87\xbfq\x92\x80\x96\xec?\xf1ͮ\xf9\x80\xcc\xfd\xe3\x06\xbb\xd6\xd6\x12\xbe\xf5\x15\x12\xfa!\xf7\xb6\x05\xab=\x02FF\xb9\x7f\xb3\xeb@\x8c\x94\x9d\x86?\t\xbe\x98Tk/6V\x93劐\xec\xa9{ܸխ\xc9\x17k\xba\x893\x91\x8e\xf1\x0eL嫒*sF\xeeзa\r\xe3A\x99Z\x87L\x85NFE\xed\xf0-\xa8(n\xeb'\xa10\x01w.\xbb\xd9\xcc>F/Y\xc7\xf8\xed\x89\xd9{\x13o\xb8\x8equ\xbcBLE>G+ \xde,$\xe5\xc5\xd0')v,\x12\x8f\x8a\xdd\x7fp}{\x02 Ï\x95jނ\x9a|\x86g\xfaL\x9e\x143\xf0\\R\xa5\xe1\v\xe3cz\xbe[D\xd8\x1b\xe2\xb4\xf8\x8eӽ˂\xe7,\xa3\x06\x82\x00\xc6\x19F8~\x87\xe35\xc2\xe2.\xa1)G\xcc\xf5\xe9B\xc9\t\"y\xc4<~\x9f\xd3&O\xa1\xe3\xb4\x1a\xa1\x9c\x87hZd\xa9v\xc1&.aM!#\xba\xdf \x01/\xd9o\x18\x9c\xbe\xdf\xc6\xea\xe5\xe7ƖZ\xb2\xf9q;\xfb\r\xd0\xe1D\xfa%\xb8p#\xc7\x0e\x1e\xee/\xb2\xf2E\xe4\xf6\xd1\x06\x10\xd6\xc0\xc4\x1a\x9c\xe85W4:\x96\xe1Bw\xac\xcc9\xa7\xab\xd3\xf9u\xd64Nlm\xe9\x9f\xd7\n>\x063\xe6s\x8a=\xfc\xbd\u05fdw\xd5\xc5Z\xc6\rDo\xc3F\x90\xf3\xcflW\xff7\x8a-\x87\x7f\x19\xf4\xf8\x83\xaf\xac\x9c\xa8\x12L\xec\xe76\xff\x9b\xef\x16q\a<\x84\x88C\x10\xd9Dp\x11\x169\x04\xf5\"G\x1e\\\x0fN\xc2+\\\x82\xa8:\x19|DF\xce[H\xf63\xf9/\xff\x17\x00\x00\xff\xff\xb2%\xffT#f\x00\x00"), - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec=Ms\x1c\xb9nw\xfd\n\x96r\xd8$\xa5\x19\xaf\x93KJ7=ٛ\xa8\x9e\xd7VYZ\x9fr\xe1tc4\\u\x93\xfdH\xf6ȓW\uffe7\x00\x92\xfd5\xcdn\xf6H\xca~D\xbc\xd8\xea!A\x10\x00A\x00\x04\xc9\xd5ju\xc6+\xf1\r\xb4\x11J^2^\t\xf8nA\xe2_f\xfd\xf8\x1ff-Ի\xfd\xfb\xb3G!\xf3Kv]\x1b\xabʯ`T\xad3\xf8\x00[!\x85\x15J\x9e\x95`y\xce-\xbf\x80v=\xa2\x85\x1b$L\x03J\\\x1e\x81\x8a&\x91\x86\x02m;\xb6\xad\xd1ZZ3\xd4\x04Q\x19\x11\xd2X\xe0\xf9\xfa\xfc\xb5\x98\a߳\xa2\xce!\xbf.jcAߡ\xaf\x9e\x87XEd\xd9\xe91\xf1\xe3$\x00o'\x17\"\x03\xe4C\xe6*\xad($\x10#Rk2\x1f*p\x11\nd\xaaǴ\xb5\x85;\xaa\u0080\xc5*\xe7\xffz\x1eS\xa2(\x01\xfd\xde\xfb\xfd\x18\xc654\xd4\xe8i\xd4\b\xc4F\xcfBY\xd9ø\x1c\t\vet\xed\x9eQ9\v\xd8˵\xe6cJ5\f\xa7\t\xbd\x9c\xce\xde\x18\x88\x01\x83e\xa8\xf6\x1b\xb1x\xd8\xff\xffG&\x9f\xc4VC\x01G.$\xb2\xb3\x10\xc6\xf6\xb89\xf4\\\x1b\xccv\xdc\x12Mѻ\x14\xd2\xc1D\xe56g\xd9\xfeNhv\xcaL\x88\x89~#i^\x9cw<&T\x7f@\x82\xed\x94zL!\xd2\x7fa\xbd6\xa2\xc12\x8a}\xb3\r\xec\xf8^(m\x86a1\xf8\x0eYm\xa3z\x82[\x96\x8b\xed\x164¢Hn\x13\xf8}\x9eߤ\xa7\x19\x7f4\xae\x96\xe9\xc8<\xa2Fl(\xe4\xf8L\xb8J\x888Z\xf1\xb4\xba\xe7b/\xf2\x9a\x17\xb4\xd0s\x99\xb9\xf1\xf1\x06\xbf\xb8\xb74)\x10G\xf8;s\"\x8c\x02\xb9\xd4\v\x87(\th^\x97J\xc7,OW\x8e\xc1\xc4ɰ\xe1\x14ň\xc5\x0eڢkt2\x1d*\u0380m\xf5\xceE\xcb)\x17I,\xf8\x06\nf\xa0\x80̪\tg2E\b\\I՟\x11ʎhҾ\x134\xabDۂ\x0e\xe6Nd;gn\xa2\x94\x11,\x96+0\xa41xU\x15\x91U\xa8-\xb3\x92\xe1;\x9bS\x1amIP\x1fC\xb81EҖD\x1dܖ\x19mܧz#6oD\xef\xa1)\x9f%\xec7G\xcd_^ؑ\xdc\x02\f\x19}du]0a\xc3\xd7\x14\xa8=;\xd0\xfc\xc9\x18w\xdal\xb9\x19\xb6~\xf1\xd9\xf2\"\\k\xd0\xf8\x930\x8d\x16\xab;\xbfV-bاn\xcb\v&\xb6\r\xc3\xf2\v\xb6\x15\x85\x05\xb2\xa5\xe6\x10\xed\x18:\xb3\x9c{I\x02\xa5\xae\xbdXJn\xb3\xdd\xc7f\xff!\xa1ŀVC\x00\xce.\x0f>\f\xf1 \x01$k\x8c\n\xda:\x13\x1aJ\xb7%wO\xf3\xa3\xfdB\x16\xe0\xd5\xe7\x0f\xb1Hb\xbf$J\xeaѠ\xae\x06\x96N\x17\x05\x1a`\x12\xc8Π\xc8Lk|<\xb7\xf1z\xc18{\x84\x83\xb3\xacF\xc3Cc\x05Y\xcb\x1b\x90\x1ah\x17\x98\xd4\xc8#\x1c\b\x94\xdf\xd6M\x82\xb7DT\\y\x84Cj\xd5\x01Q\x11?\xbf\xc7ᨋ\x1fh\x14)S\xa9-\rQ\xfd\xdcaV\xa5\r\x96-SJ\xa1\x04\x8a\x9f8\xec\x86a\xbd\\\x86G8\xfc`\x1c\xfbp\xd6\xecD\xb5\x80\x02\xa8\xb0)$\xa3\xb6\xcd&\xfe7^\x88\xbc\xe9\x8c\xe6\xc9\x02\x887\xf2\x82}V\x16\xff\xf9\xf8]\x18DQ\xe6\xec\x83\x02\xf3YY\xfa\xf2\xaa$v\x838\x91\xc0\xae1MK\xe9\x96\x05\xa4ˢ\xfe[\x1ch\tE\x11m\xd8&\f\xbb\x91\xe8\x9f9\xfa,a\xd3\x0e\x02r\x0e\xad\xb26\x94\x06 \x95\\\xb9\x90\x96\xefm\x01\xd0.^\x9eUJ\xf78u\xb1\x10\xe2(\x8a\x1e\xbd{\\\xad\xdc/G\t\x14SECU\xf0\f\xf2\xb0\xcbF\xd9\x1a\xdc\u0083\xc8X\t\xfa\x01X\x85\xebF\xbaP-\xd0䮜 \x85\xe9\xa6E(~Y\x98\xd8\xf5\xed\x96\x15\xce\xfaĚ\x81\xcdI\xd5gvfǫ\xa7\x8d\x92\x96w\xb2\x87\x92\xa8\xdf\xcd%\\\xb6\xb2,\xe4ױ\r\xe2\x90t\xe6G\xc9i\xe3\xe9︼\x92x\xff#m5\xe4B\x9b5\xbb\xa2L\xca\x02\xba\xedC\x94\xb0\xd3U\x12H\xc4D\x18\x86r\xb2\xe7\x05\x9a\x0f\xa8\xbc%\x83\xc2\x19\x13j{dA\xa5\xa9\x98\xa7\x9d2n\xcdo6\xc6\xce\x1f\xe1\xe07g\xbbZ\xe2\xfcFF\xa3\xf6\xfd\x82:\xffHi5V\x8b\x92Ł\x9d\xd3o\xe7d\x98-\x99\"'\x18o\v\xa4zA\xd5\xef\xab\xc7z\x03Z\x82\x05\xb3*y\xb5\xf2\xb3\xc1\xaa2\xba\xc7\xe9\n\xe5;.q#\xd0Oo\xb2:\xf0\xff!+\x10\xcd\xff9\n$χJ\x99H\xa6E\x04\xad[e\xac\v\x1e\xf6L\xf5\x91\xe8b\x8a\xe7\xe8#\x8e\x8co-hf\xac\xd2!\xe7\x04U\xf6 \xb8\x8eRc\xe6\xe5\xc6\xed\x13\xf9H\xa6\x03\x8c\x0e\xeay\xab]\xdczp\xee\xf6\xaa\xf0\xff\xf3032\xb4\bv\xa5U\x06&\x9a\x8dЖ\xc4Ug&\xd8\xdb\x04z\xb9s\xfc\xb6Ij=%\f\x1d\xca23\x1eI{\x82S\xf4\xf1{'f\x8d*\f\xffN\x11\xe5Spd\x94\x84_\x96|\x98\r\x9a\x8c\xee\xb5k\x1d&\xa0\a\xe6\x9c-\xfdP\x93BZfs{\x91\xfc\xbd\x19-\xa5\x907\xd4\x11{\xffj\x86\x0e\v\xcb@,#i\xac\f\xd8\xe1۷\fi>\xa4\xfa\xbe,\xe4\x14*\xda\xe7\xd1\xd0\xe3\xec\xf1.H:\xa7\x18\x1a\xe2R\xd9n\xa0\xc7\xf7\xf4\x83a[\xa1\x8dm\x11^\x00U\x98\x89\xac\xa7\xd1\xe1\x9d\xe0\x9fʏZ\x9f\xec\x9e~q\xad;!ɝz\xf2\x99\xb8K\x9c\xf2@\xfc\x1d\xdf\x03\x13[&,\x03\x99\xa9ZR\xb0\f\xd5\x05v\xb3\x00\xa2c\xa2[L\x12\xd7\xccNcY\x97\xe9\x04Y\x91t\n9\x1bY\xeb6\xf9\x89\x8b\xb4\xc8\x16;\x8d\xadv*\x89r\xac\xf4Sx}6e7\x05\xb6\xe4\xdfEY\x97\x8c\x97Ȗ%>\xe7\xd6\xe5a\x86\xfcl\xc7\xeb'.l\x93H\x8b\xecY\xa6M3UV\x05X\b\x19\x96\x99\x92F\xe4И\x0f\x9e\xff\xa3\x89ű\xc2ٖ\x8b\xa2\xd6\vt\xf4b\xce,\xf5\xf9\xbczzyG.\x1d\x91\x15\x1131`\xbf\xc0\xe0\x9e_?*\xbd\xccd\xbe\xd5\xf0\xf2\xa6i\xa5\x85\xa2\x1c\xd8\x19\xebt\x16&Y\xaf}\xeb\xd4\v/\x97\x87\x98y:\v\x950y3O\x9b\xf2f\x9e\xbe\x99\xa7o\xe6頼\x99\xa7o\xe6\xe9\x9by:^\xde\xcc\xd3Ny3O\x93\u05cf\x14\fW\x14\xb9\x9d\xa8\x90\x84Ub\xfa\xc6\x1c\xda3}\xf9,%\x7f\x16dIv\xf5\xcdxˑ\xb3@\x8bΐ\x98\u03a2פ[\xe3\x94\f\x93\xc9\x1d\xc6L\xb0\xc2_\xe0\xacM@\xe0\xe4\xb367\x93\x00^\xf0\xac\x8d\xc7t\x18;\x7f\xc1\x936\x81\x16\xcb\x0fa\\\xf84\xa6\x12x\xd8\x12r9(y\xacۘ\x15\xdb\xc3c\xb4\xceo\x9cu\x7f\x94\xady\xba\xc8\xfc\x9f\x9c߉\x88\xcd\xd1\xe9\xd4\bTaP\xae\xfe\x18\x9c8\x89\xf6Qj\xbb\xff\xc5F\xd7\x12\xd6)^w\x1fD7ղ\x9f\xf2\xfa\xc7\x11\xecS$9\xf5\xfcM\xcc8\x8f\xeb\xb6\x0e1u\xef\xbc\xc7\uf6d6\x16\xca/\x95_\xc9\xd2Ͻߌ4{\xc6\xc9wn\x0e2\xdbi%Um|\x84\a{\xb8\xcaܝ\r\xa1#\xb3D\x19\xbcg;UG\xcex\xcc\xd05!\xf36\x9eo\xeb38\xc0\xf2\xfd\xfbu\xff\x17\xab|\xf6m\x04\xeb'aw\xee\xe6\x03\x9e\xe7h\xa8w\x8e\xf8\x84\xc9\xeb/\xcf\x19\n^\x04\xa2\xd2L\x8a\xc2Ie\x80\xd0_@\xbfT.\xe4w\xb2\xdd2\x1fxJ\xcf\xd1]\x9a\x99\xdb\xe4R\xce[\xc9\xcf\xc8\xc7]vXj6\xf76\x05i\x96\x92q;\x9eK;\x03uI\x9emjL1!\xa76=\x936\x8d<\x8c\xae\xbcJ͟M\xf6BSse_'C61/\xb6\x93\xed:\v\xf2\xc4l\xd8d\x82\xa5e\xbe&\xe7\xbbv\xb2X\xe7\xa95\x91\xe5:\x9e\xbb:\vr,\xb75%c5\t\xd7\xe4<\xd5&\xfbt~g\xe4Y٩/\x7f\x0e\xe6%\xe3\x16ӹ\xa6I\x19\xa6I\xb1\x8dy\x9c\x93rH\x97f\x8e&Qui\x96h\x93\x01:\xd1qRn\xe8q\xde\xe7\xd4Pf3B\xe3ٞS`\xc7\xf2@\x13r<'@v\xb3?\x17\x9b\x01\xb3\xd24[ai\xee\xe6\xf8Ev\xa1̯\xce\xc5o!\xb3\xcf%\x93\xd2=\xa39Ź\xfb2h\x82\xd2\x12\xec\xc41C<\xee*;\xf3\xfc\x04C<\x02\xf2f\xcbʺ\xb0\xa2*:7\xc9\xd9\x1d\x1c\x9a+\x7f~Utp}s h_\xbe6\"\x1f\x03\xd9w)\xb8aOP\x14\xf8\xef\x11\x152woc\xa6V\x80\xabT|#\xd0_u\xe4/}\xbcpa1:\xd5O+`\x89\x90\xa6/\xc0\x9a\\J\xa6\xcdcg\xd5ӷ\xbfՠ\x0f\x8c\xee\xdc\nvPT\xcc\xdaӞ~2\x1b\xf4\t\x83\xf2\xf1Z\xcc\xdd\x1e\xdaWF\xf1\xd9Ш\x00v%\xdd\xc2<ĕ`\xa1\xd6iݩ)e\x8b\xdeS\f\x84T\r\x84H\xfb\x14\xeb{\xc9\xf1\xc7\xd7p\xae^½J2D^\xc3\xc5z-'k\xa9\x9b\xb5$y#\xe9\xf8\xe2k8[KܭE6c\xfa\xf1\xc4\xd7:\x96\xf8\nn\xd7Ɏ\xd7\"ҥ\x1e;\\\xec~%\x8co\xe6\x98ᑍ\x96\x002z\xbcp\xdc\x05K\x80xt\xacp\xd6\tK\x99\aC7\xedه\x04\x93\x13\x99\x16\xed\xa6\xa7&!\xa5mt\xcf\x1f\xfeK<\xf4\x97\xb8\r\x9e\x82}\xe2\xe1\xbe\xe5\x87\xfa\x12\xe9|\xa2{6\xd9u\xe2\xe1\xbdE\x0eډ.\xda$ĩ\xc3z\xd3N\xdat\x00nxH\xef\x04s\"A\xc2\x12\xaa,?h\xf7\xec\xcd\x18\xa5sг\xfbZK\xc4yV\x90\a~T\xbf\xff\xc1\x8eN\xb8\x11\x15ku\xf7\xccb\x1cUͽ#\x19\xfb\xab\x90~\xb7\x1e\x05\xb7c\x93\xf46\xdeZ\x83)\xbe\xaf\xd3Z\xa9\xfe\x06|\xb7cg\xa0\xe2\x9a\xf6\xf17\a\x97\x14d\xd6\xec#\xcfvM\x0f\x11\x90\xd4\xef\x8e\x1b\xb6U\xba䖝7[\xa1\xef\\\a\xf8\xf7\xf9\x9a\xb1\x9fT\x93>ҹW,\x02Ո\xb2*\x0e\xe81\xb1\xf3.\x98\xe7\tNT`\x03>\xb7\xaa\x10Y\xc4B\x1c\xbd\x92\xce58\xbaЇ.\xcd\xcb:Y\x101\x1f\x05\x9b\v\x7fa\xe1\xe0R\xe1\xad*\n\xf5tb\xe8\x82W\xe2?\xe9\xc1\x99\xb4\xd8\xd8\xd5\xed\rU\x0fRE\x8f\xd54\xd9s\x8d\x8cm`Z\xa1\xb7\x03'ӣ\vu${\xb5\xf9s\x02\"=\xf7\x10\xec\f\xaf\xc63\x85Z\xec\xf6\xc6a\xb9&\xc1\xe2\xf2\xc0\x94\xbf\xd0_\xe8|Uq\x1d\xdd\xd4c^\x1e\xccE\x0fð\x8e\xcfE\xb0&\x97\xb5\xe3\xe7+\xba\xa5G\xf3\xf0\x92\x05m\xf6\x1e\xaa\xfe6:Q\xbaC\xcf\xe7\xe04}py\xf6\xc8\xf2+\xe04m2\xad\x88\x8a\x91\x9f\xa2\xe9x/\x1e=4\xfe\x12\xf9\x9f\xd5\x1e>D\xa3\x88\xfd\xb7\x1d\x06MF\x12\xe8\x02ԩk\xd3۬\xb9\xf8u\xd6/\x90\x11\x17P\xf1\x17_/\x18\x9fo1\xf6\x9c\x86\xbf\xff;\xc0\x9eX\xdbp\xca\xde~#w\xb3Q\x97~\x86{g2\x84\n\aW\xb5F@\xc6\xde\xc3x)jY\xa5\xf9\x03|R\xeeɒ\x14j\xf5[\xf4^\xad\xf1\xc6\\\xc8&\xf6s-\xa6\xcc\xfd؆\x00\xdbC\x06G\xd7\xd1#\xb6'\u07b6om\x910\xb8\xfb\xfbOn@V\x94\xb0\xfe\xe0\x1f\xd6@\xbdk\x00)\x1d\x06\xea\x1am\xe2\xdai\xa7\x9e\xe8>\xf7\xee\xbb\"\x9d\x97\x9b\x80\x0e5P\xda\xe8I\xa3\xd9\xf7\x1e\x84\b\xa4K\x11\xf6o\xe3-;6T\x87\x89S)dj\x1b\x85ōQ\x99 \xb3\x8bb\xe6t\x96\xe0\xf5n\"\x9e2\x9f'\xf4gm\xe0˓\x04\xfd5LTs#c/0\xf4H\xf8\xcbQ\xc3\xe8\xeb\vV\x91\xb17\xa8>f\xe4KO \xe3\xde\xee\b\xc1\x7fa\x9a\xa7u\x8eI73\xff\xe3s\x7f|\xa5Z\x8d\xbff\xb3j\x1e\xd89K\xa0\xac{\x9b\xa4\x0fx\xfc\xed$\xf7\x88I\xc6+[\xeb\xf0\x9cM\xad\xe9Zf\x04\x02\xee\xd6\xe2\xd3^Oj_\xa3\x9b\xe1e\xfb>]\x1b\x0e\x9d}\ro\x84\x7f\xcd\xfbG\xd1\a\x81\x9c#\xe1^\xab[!\xfc\xd3\xd89:\x0f\xe8\x1a뙑\xdeb\x9d\xe6\x1c\x98'45\f\xd7_\xdf\xc5P\x1f?سb\x9f\xe1ؐ_\xb1\x8f\x12\aq\xbc\xbe\xbb\xd3;\x90S\x10u\xec\xe5\xb8\xc9!\xee\x9bVttjD[\xf4\xd5ܠ\xfa \xb1\x93\x9e3i\xaa\xb8cRcl\xfdg\xb1u\x11\xee\f\xc7\xf4/G5\xa2\x8akRi\xc5\x14\xd6\xe8\x94:\xfah@\xef\xe9\xfd\x90 $~\r\xef~\xa97\xedm\xe4\xec\xef\xff8\xfb\xdf\x00\x00\x00\xff\xff\xe7\xb2\xe8\xf4^t\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xdc=]s\x1b9r\xef\xfe\x15(\xe7\xc1I\x95H\x9f\x93\x97\x94\xde\x14\xaf\x9de\xee\xd6VI\x8e\xf7\x19\x9ci\x928a\x80Y\x00C\x99I忧\xd0\xf8\x98\x0fbf0\x94\xb8\xb7wx\xd3\x10h\x00ݍ\xfe\x06\xb4Z\xad\xdeК}\a\xa5\x99\x14\xb7\x84\xd6\f~\x18\x10\xf6/\xbd~\xfaw\xbdf\xf2\xfd\xf1Û'&\xca[\xf2\xb1\xd1FV\x0f\xa0e\xa3\n\xf8\tvL0äxS\x81\xa1%5\xf4\xf6\r!T\bi\xa8\xfd\xacퟄ\x14R\x18%9\a\xb5ڃX?5[\xd86\x8c\x97\xa0\x10x\x98\xfa\xf8\xa7\xf5\x87\x7f]\xff\xe9\r!\x82VpK\x14h#\x15\xe8\xf5\x118(\xb9f\U0008dba1\xb00\xf7J6\xf5-i\x7fpc\xfc|n\xad\x0fn8~\xe1L\x9b?w\xbf\xfe\x85i\x83\xbfԼQ\x94\xb7\x93\xe1G\xcdľ\xe1T\xc5\xcfo\bх\xac\xe1\x96|\xb1\xd3Դ\x80\xf2\r!~\xe98\xedʯ\xfa\xf8\xc1\x81(\x0ePQ\xb7\x1eBd\r\xe2\xee~\xf3\xfd\xdf\x1e{\x9f\t)A\x17\x8a\xd5\x06\x11\xe0\xd7F\x98&\x94|ǽ\xd9\x05 \xae\x899PC\x14\xd4\n4\b\xa3\x899\x00\xa1u\xcdY\x81\xa8\x8e\x10\t\x91\xbb8J\x93\x9d\x92U\vmK\x8b\xa7\xa6&F\x12J\fU{0\xe4\xcf\xcd\x16\x94\x00\x03\x9a\x14\xbc\xd1\x06\xd4:ª\x95\xacA\x19\x16\x10\xebZ\x87]:_\a{yg\xb7\xebz\x91\xd2\xf2\t\xb8%{\x94A\xe91dWk\x0eL\xb7[\x1bn\xc7o\x89\n\"\xb7\x7f\x85¬\xc9#(\v\x86\xe8\x83lxi\xd9\xeb\b\xca\"\xa7\x90{\xc1\xfe'\xc2\xd6v\xa3vRN\rxz\xb7\x8d\t\x03JPN\x8e\x947pC\xa8(IEOD\x81\x9d\x854\xa2\x03\x0f\xbb\xe85\xf9\x05\xc9#v\xf2\x96\x1c\x8c\xa9\xf5\xed\xfb\xf7{f\xc21)dU5\x82\x99\xd3{\xe4x\xb6m\x8cT\xfa}\tG\xe0\xef5ۯ\xa8*\x0e\xcc@a\x1a\x05\xefi\xcdV\xb8t\x81Ge]\x95\xff\x14\xc9\xf6\xae\xb7Vs\xb2\x9c\xa7\x8dbb\xdf\xf9\x01\xd9|\x82\x02\x96\xe1\x1d/\xb9\xa1n\x17-\xa2\xed'\x8b\x9d\x87O\x8fߺ|\xc6\xf4\x10\xfb\x88\xf7\x0e\xf3\xb5$\xb0\bcb\a\xca\x11\x11\xb9\xcd\xc2\x04Q֒\t\x83\x7f\x14\x9c\x81\x18\xa2_7ۊ\x19K\xf7\xdf\x1aЖ\xa1\xe5\x9a|D\xd9A\xb6@\x9a\xba\xa4\x06\xca5\xd9\b\xf2\x91V\xc0?R\rW'\x80Ŵ^Y\xc4摠+\xf6\x86\x9d\x1d\xd6:?\x04\xe15B/\x7f\xfa\x1fk(z'\xc6\x0ec;\x7f\xcc\xc9N\xaa\x9ep\xb0C\xd6=\xa0\xe9Ck\x9b;\xfdV\x82\r\x7f\x19,\xe5?bG\xcb?v\x11\x8d`\xbf5\x80\"ΝX8\x13)g IX\x1f\xb2\xc5\xfa\xec\xf7\x11\x9c\xda\x06?\nޔPFi{\xb6\x97\xc1\x8a?\x9d\r@\xadC\x99\xb0\xfcoſ]\xb6h\x7f\xb5\xe24\xb1b\xaa\x80X\x0ed\xc2\xc1#L\xe0f\x93\x98\xb6\x8d\x19\xa8\x12\x8b\x9b\xdc\x1d!\xa2\xe1\x9cn9\xdc\x12\xa3\x1a\x18\xc1\fU\x8a\x9eF\x10\x13Tp.^b\x7f/\x108+\xa0\xab(\x1cj\x9c\x92\xa1\xea|E\xe4\x0f\x8e\x15\xa6\xad8\v\xbb\xbc\x97\x9c\x15\xa7YԤ\x06\x85\xe3\xe6\x0f_\xe0\xe0-\x1c\xe8\x91I\x95ؒ=\x91\xb6kG\x91\xb6\xc2TZY恔\x97m8\x89\xac\x83\x94Os\xb4\xff\xd9\xf6i\xa56)\xd0x\x8b[\xf1\xd4\xf6Jt\v\x04~@ј\xc42\t)\x1bT R\x91Zj3N\xf7q\xd9C\x9c8\x18cZ2\xc54g;\xf3\xa22P\xcen\xb4'6\xa5\x00\xbb\xd6\xcaR\xae\xed\xabd\xe3\xfa\x0e\xf5[\a\xe3i\x8c\x90-\xd5P\x12鹾\xe1\xa0\xfd\\%\x92\xbf\x95+7\xa3\xa0\xe3来\xc1\xe9\x168\xd1\xc0\xa10R\x9dc2\a\x9f\xae\xe5\xc8\xca\x11<&\xa4f\x9f\xfdۍM\x80$\x96͟\x0f\xac88#\xc0\xf2&\xc2!\xa5\x04\x8d\x82\xc3\x1a\xaa\xa7\xb1M\x929\xda\xfbI\xa6DG\xdbf\xce\xd4\x10^J\x9c\xb4-CܶmF\xf0\x9e\t\x16\xff=\xa99\xdb\xf6\x8f\x89ؠI.`\xda\xcd\xd9\xd0\xd7eZt\xaa\xac\xb1\xbf\xd9\x11\xa8js\xba!̄\xafs\x10)\xe7\x9d\xf9\xff\x8e\t\xb3\x9c\xe37Ñ\xaf\xca\xf1\x93T\x99\x83h\xa9\x12\xa7\xff;$\n*\x8bG\xaf+\xb2\t\xf2\x97\xee\xa8\x1b\xc2v\x91 \xe5\r\xd91n@\r(\xf3\xa2\xf3\xf2\x1a\xc8\xc8\xd1w\xb6U\xd4\x14\x87O?\xace\xa3\xdb8S&^\x86\x83\x9dI\x1c|\x84\xbeb\x9e\x81K\xd0}e\n*\xe7\x16\x7fCl\xb6_П\xb8\xfb\xf2\x13\x94S\xe8!y\x9cw\xb6\x91\xbb\xc1b\xbbS{;?w\x1b\xde\xf4\x89>\x93\vx\xdc\x10J\x9e\xe0\xe4,\x16*\x88%\x0e5h\xef&\xbd\xa7s\xe4`\xe4\x05\x99\xec\tN\bƇRfG粂kO\x900\xf7S\xad\x87@\xbb&\xef\xe0:L\xda\x0f\x88\bt\xbc\xf3\x91G0,\x16d\xd1\xfc\xe6H\xbe \t-\xe0\xfe\x82mF\xb2u\u0087H\xd8wڑȞ\x82\x03\xab37\x8a\xd1C\rxZB`\xec;嬌\x139\xbe߈qk\xb8߾H\xb3\x117\xce#\xd3\xc8%?I\xd0_\xa4\xc1/WA\xa7[\xf8\x05\xc8t\x03\xf1x\t'\xb6-\x1e\xba\x11\xb6\f\xe6vm\xe3\x02)\x910֝\xc6\xce\xc04\xb1\xf4=R~\x1e\bOlNZ\xd9\x02\xdc)r\xb9;\xb3Xn\xc8\xf3Aj\xa7Sw\fx*d\xd3oL\x93\xb7Opz{s&\a\xden\xc4[\xa7\xe0\x17\x8b\x9bh-H\xc1O\xe4-\x8e}\xfb\x12#(\x93\x133\xbb\xfdX=Őܪ\xa2\xf5\xcas\xaf\x91\x15+Fljdx\xbcm=v\xea\x86\xc8\xdbظ7\x8f\xa7v\x9bſ\xb5\xd4\xe6\xe7t\xa0od=\xf7aDߦM\xc4\xcbfm}\x1f\xfb\x8a\xc2\xd8Z\x80;\x03\xca\a\xff\x9c\x80\x0e\x9e\xc3\v}\xaa\xb9\xe0^\f\xec\xd1\x18\x90\xb5\b\x9e\xe1&\x97*\xc9Y\xe2\x12k\xd3\xe2e\xa1\x9d\xfe\xe9G'6iO\xb6\xfd\xbb\xbb\x91\u05f6\x86\vYUt\x98\x1c\xccZ\xeaG72\xf0\xb4\a䨯\xf6\r\x9e\xe7|31\xf0\x10\xa6\x05\x9f\x9990Ah\x10\x1b\xa0\x82\xb2Qp\xa0G lG\x98! \nK\x17PNd\xe3\x14\x1e\x19\x88\x9al\xb6\xcc\x13\xf0\xb6\x81h\xaa<\x04\xac\xf0d31\x19L\xebv\xffL\x19\xbf\x06\xd9,\xe7}\x96\xea\x01hyI\x00\xe6\xd7\xcep\x02B7\n\x13\xf7N\xbc<3\x9e\xb7fK9\xc2i#\x8a\x03\xa0\x9c\x12=\xf1A\x1cx&\xb4\x01\x9a\xcb\v\xd6jj\x84`b\x9fG\xbb\xec\x10g\xdb\x1c\xaa\xb7Rr\xa0Â\xa7T\xb3\xb8\xbe\\\f\xfdڎ\xfe]\xc4P\xa4@\xbe\xb9\xb0\x05O*/\x8b\xa81P\xd5\xee\xbcI\xa2\x1a\xd1\xd5>W\x90BK|p\xbf\x8a\xd7t\xae\x99`\x19\x84\x1d\xe4\\\x98\xe9Z\x96\x16\xc4U-K;A4*.\t\x9fmz\x00\xec\xe9\fN\n\xae=r\xcd\x02+s\v\x84\x96%\x94.0iM\x15ﳸ\xf2\xb2\x91R\x85\xe4\ue59b\x89Y\x94\r\xad\xe7\x91b(V\x1daՈ'!\x9f\xc5\n=y\xbdX\x80\xe4\x87\x06_uzs\xb1$\xfa=\xa5P\x9f_\xf3y*\x18OW\x902\xd9|\xb3(\x1a2\xc5\x05sr͕.\x8f\xfc8\xbb\x8a\xa9\xf9'\x06\xfbD\xf3GWs\x9c[϶I\x8f\xea\x18\x7f\xcf\a0\aP\xa1\x98y\x85u\xdb)9\xdd\xe6\xa3[?&\x16\xb8Y\xfe\t\xa6\xb0+\xbc\x1c\x94\xbc\xa5\x1d\x1dk\x05\xdcXƦ\r7\xae\xfcX5\t&\xca*\xfcJ[\x069\x95\x13s\xf5\x12\xfd\x1a\xc0X\xaf\x10\x8a\x00e\x98$\xb1CGKW\xe9\xdbM\xc6\xf7\v\x1f0\xe4\x17V\xfa7/\x0f̨i\x98\xa9d\x98.\x9a\x9c\xc2\xd79\xdbt1\xd6\xf2\xa0\xef\xe7\xabi\xffX\xe83P}\xad\xfd9\x185@\xfb\x18L\f\x19\x94\x83\xa0\xe4\xb6.;\x16\x16P\x96\x12.\xf6\x14\xfap\xa0\x85xW\xe0I\x94\x01\xb0\xc6P\xb3?m\xbe\xba\x9di\xf2\x81\x1cd\x93(\xa9\x9b\xc0\xceL\x81\xc5xY\x85O\"\x80\xa1\xc7\x0f\xeb\xfe/F\xfa\"\v\x8c|%v\x87\x8eJ\x1bMe\xa2dGV6\x94\xf7\x0eY\x87-Z\xee!R\x11\xc1x*\xbfj\xd9*\x8c\xef\xb1\x11\xf9Z\xbb<\xcbbq4m\"\xe6\xd5b\\\\\x81ѯ\xb0\x18QRKS\x0e\xf9\xa5\xa6\xf95\x16\xd3E\x11K*+\x86u\x13\xa3@\xe7\xeb)r\xac\xfb\x99ډ\v*&2\xab\xe5^\x9c ɩ\x89\xb8\xa8\x12b\xb6\xa0,\xb3\xfe\xa1_\xd90\rrA\xd5C\x16r\xe6+\x1c\x16\xd75\xf8:\x82\xc9}dW3$\xea\x14&\x01\x8f\xd60LU'L\xa3\xd42/*\x1bM9\xb5\xe2W\xf1\xa5\xae\xe8M]ß\xbạ\x9a\x019\xa8\x01ϩ\xee\xceJ\xe3e\xe7lr\xb2l\xf3\x99\xe3\xe9\xaa\xed\x8cj\xed\x8cl\xd0\xdcJ3\xaa\xb2\x97Ucg\xe0\xf0J\xbe֕\xbc\xadk\xf8[\xd7\xf5\xb8f}\xaeYΙ\xf9yY\x15\xf5\xc5I\x86\x90\x8e\xfe\"K\xb8\x97\xca\xcc9\b\xf7\xc3\xfe\x89\x14`\xc7i\x92\xbc$\"tMe\x1a\xac\xed\xef\xed\xfe\xcb6\x95\xce\xd6\x05\xf3\xf7\x17Yڵ\xcd\xe5\x16\x1e\x06\xddϮ\xd0\xee@\x81p\x0fK\xfc\xd7\xe3\xd7/\x11~\xca\x1e\xf5F\xef\xe0M\x03g`\x94\x1e9>\xfb\xe4\vn\x1c\xb6P\x87\xbfr\x92\x80\xd6\xec?\xf1ͮ\xf9\x80\xcc\xdd\xfd\x06\xbb\x06k\t\xdf\xfa\x8a\t\xfd\x98{ۂ\xd5\x1e\x11#\xa3ܿ\xd9\xf5 &\xcaN\xe3\x9f\x04_L\nڋ\x8d\xd5d\xb9\"${\xea\xee7nuk\xf2ٚn\xe2D\xa4c\xbc\x03S媦ʜ\x90;\xf4M\\\xc3xP&萩\xd0ɨ\xa8=\x7f\v*\x89\xdb\xf0$\x14&\xe0Nu?\x9b9\xc4\xe8%\xeb\x18\xbf=1{o\xe2\x15\xd71\xae\x8eW\x88\xa9\xc4\xe7d\x05ī\x85\xa4\xbc\x18\xba\xff>'\xd6\x1eb\xc7iyf=\xd9\x10\xd6I\xe0ǎG\x91\xa6\x05\xad\xf5!\x11\x15z\x99LÇ\xaa\f5M\xe6~\\\xdfޖXq\xe8\b\xa0g\b\"Ju\x9e\xed\x1b\xacɞU\a\b\xd50\x9aЂ\xf1\x9b\x8ec\xfe\xfb\xa4<3\x9f\x05\xb9\xf8A\x10\x87\x9e\x11Q\x81\x91&+\xc6\x02/\xb4x\xb9 \xd99k\xc2e\x14\xb6N\u06dd\x99/J\\\xfc\x96\xc4<\xb2\x12\x88\x1a{F\"穈\xbf)>'D\x92.\x0eP6\x1c2\x1ex{\xect\x9d\x7f\xe2-\x00N\x9dI\xd9\x7f\xe4\xcdⵣ^\xad\xc1\xdb\x7fL\xce#\xddC\x1e)\xf1\xee\x82t\x96\xbd{u\xaa\xb0v\xbcn\x8a\x02\xb4\xde5M\x17ˡR\x9cBu,e-\xd7\x1d\xd8\x0e\fZ\x9d\x164\x94\x04\x8e \x88\xe5M\xca8\x94Su\xd3\xdf0^\xa9\x8e\xa0\xde\xe9\b\a+\xb9,_?\x1a\xaaL\\\xfa\xf9Q\xdcIUQsKJj`eG_f\x9a\xa4\x9f\x8dTj>\xb5\x84\xb7\x85\xfc\x01\xc0+>H^\xce\xfd\x1d\x9f\n\xb4\xa6\xfb\xe07=\x83\x02\xb2\aaQ<\xf52^{Mʋ\xceX\xf0g\xb1E\v\xd3P?\x813Qb\xd2-\x01\xd2?\xb7\x8a\xae\xd8~T`1a`\x7f\x96\xee\xf2W\xb4\x1e\x80\xea\xe1\xf3\xbcg\x88\xf8\xdc\xed냕\x0e\a\xee-\x18\xea\xaa\xf3\xf05Wâ{8\xa2\x06\xec̋\x84\xebAʧ,\x1b\xf2\xe7ر\x8d\x950\xe1\xf8\b/\xa6mec\xba֡\x1c\x8b\xac⫉\xafl\v\"\xcc;wSe,\x00x\xfe,c\x1c\x10\xbd1i('\xa2\xa9\xb6\xa0\x90!c\x87\xc3\xc4m\xed\xc7\xf06,秛!\xe4\xc1c\xd1-\xec)\x88Hz/\x03:7xC\xe4j\x00\xc4u\x9fx8\x92\xe0K\xbfAa\x8f=U7\xc5\xd1\x11ǟqQ\xb9\bv\xbdǰ\xeb\x16\xef\x9c\x10\x10i\x1f\x8a\x84\xccq8\x16\x17,}\xc2Z\xab\x0fTϙi\xf7\xb6O\xbc\xba\xdb\xd1I\xd1B{\x189\x92髄+\xf2\x05\x9e\x13_\x1d\xb20\xff\x92\xd6$+\xb2\x11\xf7J\xee\x15\xe8s\xc6Y\xe1\xdd2&\xf6\x9f\xa5\xba\xe7͞\x89X\xfa\xbb\xac\xf3=U\x86YVv\xebI\x8c\xfd\x18tY\xe2\xb7\xf9\xd1#?LȨ\xda\xefy6\x18\xe9\xba\xcd\xc9'/@\xdf\xe9V_\xa4\x02w\x1eښ|\x91\x06B\x8e\x90\xf5\x812M\xb6\xa0\xcd\nv;\xa9\x8c\x8b\x1d\xafV\x84\xed\xbc\x9d\x92\x8aqR\xc6\xd1\xc7qok[\xc7'V\xb5E\x8b\xdbDZ\x14*\x05t\x8e*zr\xe10Z\x14ր\x85\xf7\xdaД!\xfd\"1\x8aN\x95\xe7\xe6\x9cC\xbe\xe9\xf6\x8f\xa1\xa4x\xc0\x11\x9cC\x1d\xde\xe9u\x1a8Y\x1fA\xf0\xfeh\xe7I\x01\xa2\xad6\xbb\xe4\xb8\x13'h6\xe3\x0eb\xff\"T\xec<&\xa7\xfc6z\xaf\b\x8f\xd7@3\x1d\x86Z\x9a\x15\a*\xf6\x96}\x94l\xf6\x87\xc0\x82c\x86\xca\bв\xc1Xs\x8d'U\x87\\\xbbi\x94褊|\xf6\xbdl\x97;\x05\xf4b\x89\xa9ڛ\x00\xad̘м)c\x7fd\xf0\b\xfeS\x81\xec8\x84\xea\x93(\xa6\xaf'8?\x8fM\xdcB\x9cBFr\xbfQ\x02^\xb2\xdf88\x7f\xbf]\xe5ݺ\x12K6?\xee߿\x02:ƌ\x82y\\L\x1b\b\xb8\xbf\xc4\xca\x17\x91\xbbk`\x04S\"\x15M\xb4\xc6\xc52\\螓5\x17\xec\xe9u~\x993\x89\x13[W\xf2\x8f\xeb\x04\x1e\xa3\x19\xf3)\xc7\x1d\xfc>\xe8>\xb8\xebe\x1d\xc3\x16\xa2w\xe1\x12\xc8\xf9g\xb6\v\xff\x8ee\xcb\xe1_\xcez\xfc\xcew\xb6\x9e\xa9\x12L\xec\xe76\xff\xab\xef\x96\xf0\x86=\x84\x84?\x9c\xd8D\xf4\x90\x17\xf9\xc3a\x91#\xffq \xfa\xc8/\xf0\x88\x93\xea\xe4\xec#2r\xd9A\xb2\x9f\xc9\x7f\xf9\xff\x00\x00\x00\xff\xff\xbb\x8aQ\x92$i\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec=Mw$)rw\xfd\n\x9e|\x18\xdbOU=m_\xfctk\xab{\xecz;ӭ\xd7\xd2\xf6\xc9\x17*3J\xc5(\x13r\x81,uy\xdf\xfew\xbf\b \xbf*\xc9$K\x92gfW\\\xba\x95\x05A\x10\x11\xc4\a\x04\xb0Z\xad.x%\xbe\x816B\xc9k\xc6+\x01\xdf-H\xfcˬ\x1f\xffì\x85zwx\x7f\xf1(d~\xcdnjcU\xf9\x15\x8c\xaau\x06\x1fa'\xa4\xb0Bɋ\x12,Ϲ\xe5\xd7\x17\x8cq)\x95\xe5\xf8\xd9\xe0\x9f\x8ceJZ\xad\x8a\x02\xf4\xea\x01\xe4\xfa\xb1\xde¶\x16E\x0e\x9a\x80\x87\xae\x0f?\xae\xdf\xff\xdb\xfa\xc7\v\xc6$/ᚙl\x0fy]\x80Y\x1f\xa0\x00\xad\xd6B]\x98\n2\x04\xfa\xa0U]]\xb3\xf6\a\xd7\xc8w落\xf3\xed\xe9S!\x8c\xfdS\xef\xf3\xcf\xc2X\xfa\xa9*j͋N\x7f\xf4\xd5\b\xf9P\x17\\\xb7\xdf/\x183\x99\xaa\xe0\x9a}Ʈ*\x9eA~\xc1\x98ǟ\xba^1\x9e\xe7D\x11^\xdcj!-\xe8\x1bU\xd4e\xa0Ċ\xe5`2-*K#\xbe\xb3\xdcֆ\xa9\x1d\xb3{\xe8\xf6\x83\xe5W\xa3\xe4-\xb7\xfbk\xb66To]\xed\xb9\t\xbf:\x129\x00\xfe\x93=\"n\xc6j!\x1f\xc6z\xfb\xc0n\xb4\x92\f\xbeW\x1a\f\xa2\xccrb\xa0|`O{\x90\xcc*\xa6kI\xa8\xfc'\xcf\x1e\xebj\x04\x91\n\xb2\xf5\x00O\x8fI\xff\xe3\x1c.\xf7{`\x057\x96YQ\x02\xe3\xbeC\xf6\xc4\r\xe1\xb0S\x9aٽ0\xf34A =l\x1d:?\x0f?;\x84rn\xc1\xa3\xd3\x01\x15\x84w\x9di \xb9\xbd\x17%\x18\xcb\xcb>\xcc\x0f\x0f\x90\x00\x8cHT\xf1ڐp\xb4\xado\xbb\x9f\x1c\x80\xadR\x05py\xd1V:\xbcw\xb2\x97\xed\xa1\xe4\u05fe\xb2\xaa@~\xb8\xdd|\xfb\xf7\xbb\xdeg6\x90%O)&\f\xe3\xec\x1bM\f\xa6\xfdLev\xcf-Ӏ\x9c\ai\xb1F\xa5a\x15\xa8\x9b7 \x19S\x9aU\xa0\x85\xcaE\x16\xb8B\x8d\xcd^\xd5Eζ\x80\fZ7\r*\xad*\xd0V\x84\xa9\xe7JG\xa3t\xbe\x0e0\xfe\x01\a\xe5j9I\x04C\xc2\xe7'\x14\xe4\x9e\x0en~\b\xd3\xe2OL\xea\x01fX\x89K\xa6\xb6\xbfBf\xd7\xec\x0e4\x82\tXgJ\x1e@#\x052\xf5 \xc5\xff6\xb0\rJ\xbd%a\xb4\xe0\xf5A[h\x02K^\xb0\x03/j\xb8b\\\xe6\xac\xe4G\xa6\x01{a\xb5\xec\xc0\xa3*f\xcd~Q\x1a\x98\x90;u\xcd\xf6\xd6V\xe6\xfaݻ\aa\x83&\xcdTY\xd6R\xd8\xe3;R\x8ab[[\xa5ͻ\x1c\x0eP\xbc3\xe2a\xc5u\xb6\x17\x162[kx\xc7+\xb1\"\xd4%i\xd3u\x99\xffS\xe0\xa8\xf9\xa1\x87\xeb\xc9|s\x85\x14\xe1\x04\aP#:\x81qM\xdd(ZB\xe3'\xa4\xce\xd7Ow\xf7]a\x12fH}\xa2{G\xc2Z\x16 \xc1\x84܁\x9f\xd1;\xadJ\x82\t2\xaf\x94\x90\x96\xfe\xc8\n\x01rH~SoKa\x91\xef\x7f\xa9\xc1X\xe4՚ݐyA9\xac+\x9c\x81\xf9\x9am$\xbb\xe1%\x147\xdc\xc0\xab3\x00)mVH\xd84\x16t-㰲\xa3Z\xe7\x87`\xde\"\xfc\ns\xfc\xae\x82\xac7e\xb0\x9d؉\x8c&\x06i\xcfF\x05\f4\xa8+㳖~!55\xfc:\xc0\xc3\xe9\xb2\xd0+\x18\xb4\x1fvO\x1cn\xcd\x18ʕ\x83\x86:E\xaa!wǴ`\x87\x12\x1e\xca\f&}\xad\x97j\xdfN`2\xaf\xea\xd6\x11\x1cO\xb8J?AY\xa1ژA\xf1\xdeWC\x14\x91>y\xe35\x05\xc3\x1fԬ\xf2ڕ\x9d(7\xean\x0fȷ\x83Ƚ\xf6:\xe1*\x9b\xe4,\x96̈;\xc9+\xb3W\x16m\x9c\xaa\xedX\xad\xc1\x00n\xee6\x83F\x1d\xce#VdÉ\xd1V\xb1'.N9\xed\n\xca\xe5\xcd݆}C\x97\b\x02L\xe6,9\xb3\xb5\x96\xa4\x8e\xbf\x02Ϗ\xf7\xea\xcf\x06X^\x93V\nv\xf9*\x02x\v;\x9c\xf4\x1a\x10\x066\x00\xadq\x0e\x18BM\xd5vM\x0eG\x0e;^\x17\xd6+9a\xd8\xfb\x1fY)dm\xe1\x94\xefl\x9a\xf7D$ny\xa9\x0e\xa0\x13h\xf8\x91[\xfe\v\xd6\x1d\x90\x0ea0\x02\xe2\xd9Od\xdc\x1e#\x03\xc5&['\xa9l\xb3\xeb@\x15\x86]^\xe2<\xbbt.\xf1啫[\x8b®\x84\xa4~\"0]\xefO\xa2(B\xff\xe7Q\xc3\x11\xd7\xf1\xd6ܫ\x9f\x8c\x13\xeb\x14\xe2D\x9a\x8e(\x98J\xe5\xec@\xf5b2&\n`\xe6h,\x94\x9eR\x1dυ\x88Kڱ(<\x18öǀ\xfb\xf8\xb8e]\x14|[\xc05\xb3\xba\x1e\xefvJ\x91\x8d\xd1\xe6+\x18+\xb2\x04\xca\\\x0eI\xe3Z\x8e\x10F\xd3\x0f\x11\xa2\f(\x80.\x0f\x7fD\xb7\xdbS\b}\xa7\xa2\xe8\x10w\x9e*\x8c\xfd\x8fd\x1f\xd1\xdcgh\x84\xaf\xbdq\x17P\x90C!\x15+\x94|\x00\xedzD\xc7)H\x98\x06\x94\xb8<\x02\x15-\xad\x86\x02]\x06\xb6\xab\xd1\b\xaf\x19j\x82\xa8\x8c\bi,\xf0|}\xf9Z̃\xefYQ\xe7\x90\xdf\x14\xb5\xb1\xa0\xef0\x04\xccC\b<\xaae\aL\xfc4\t\xc0\xbb_\x85\xc8\x00\xf9\x90\xb9J+\x8a4cDj=\xb1c\x05.\xf0E\xa6zL[\x17\xab\xa3*\fX\xacr\xf9\xaf\x971%\x8a\x12\xd0\xef\xbdߏa\\CC\x8d\x9eF\x8d@l\xf4,\x94\x95=\x8eˑ\xb0PF\x888\xabr\x16\xb0\x97k\xcdǔj\x18N\x13џ\xcf\xde\x18\x88\x01\x83e\xa8\xf6\x1b\xb1x\xd8\xff?\"\x93\xcfb\xab\xa1u,.$\xb2\xb3\x10\xc6\xf6\xb89\f\x88\x1a\xcc0vF\x9ab\xd0\"\xa4\x83\x89ʭü\xdf3\xcdΙ\t1\xd1o$͋\xf3\x9eDŽ\xea\x0fH\xb0\xbdR\x8f)D\xfao\xac\xd7\x06\xca,\xa3%U\xb6\x85=?\b\xa5\xcdp\xb5\x05\xbeCVۨ\x9e\xe0\x96\xe5b\xb7\x03\x8d\xb0h\x81\xb0YO\x9c\"\xd6t\x98\xc0:\n(Za0\xae\x96\xe9\xc8<\xa2Fl(\x14\x8eE\xa12B\x1c\xbdx\xb2\xee\xb98\x88\xbc\xe6\x05\x19z.37>\xde\xe0\x17sOf\x04\xe2\x04\x7f\xe7N\x84Q \x97zQ\xb6\x92\x80\xeeu\xa9t\xcc\xf3t\xe5\x14L\x9c\f[N\xc1q,$m\x8b\xae\v0\x1e\x15\xe7\xc0\xb6z\xe7\xaa\xe5\x94[\xa0*\xf8\x16\nf\xa0\x80\xcc*\x1d'O\x8a\x10\xb8\x92\xaa?#\x94\x1dѤ\xfd hV\x89\xb6\x05\x03̽\xc8\xf6\xce\xddD)#X,W`Hc\xf0\xaa*\"V\xa8-\xb3\x92\xe1;\x9bS\x1amIP\x1fC\xb81EҖD\x1dܖ\x19mܧz#6oD\xef\xa1)\x9f%웓\xe6//\xecHn\x01\x86\x9c>\U000bab98\xb0\xe1k\nԞ\x1fh\xfe\xce\x18w\xdel\xd9\f[\xbf\xf8ly\x11\xae5h\xfc\x9d0\x8d\x8c՝\xb7U\x8b\x18\xf6s\xb7\xe5\x15\x13\xbb\x86a\xf9\x15ۉ\xc2\x02\xf9Rs\x88v\x1c\x9dYν$\x81Rm/\x96\x92\xdbl\xff\xa9Y\xd6Nh1\xa0\xd5\x10\x80\xf3\xcbC\fC\f<\x9d.\n4\xc0$\x90\x9dA\x91\x9b\xd6\xc4xn?\xef\x8aq\xf6\bG\xe7Y\x8d.\x0f\x8d\x15d-o@j\xa0\xcdER#\x8fp$P~\xb70\t\xde\x12Qq\xe5\x11\x8e\xa9U\aDE\xfc\xfc>\x85\xa3.~\xa0Q\xa4L\xa5\xb64D\xf5s\x87Y\x956X\xb6L)\x85\x12(~\xe6\xb0\x1b\x86\xf5\xb6\xc8\x1f\xe1\xf8\x83q\xec\xc3Y\xb3\x17\xd5\x02\n\xa0¦%\x19\xb5k\xf6\x86\xbf\xf1B\xe4Mg4O\x16@\xdc\xc8+\xf6YY\xfc\xe7\xd3wa\x10E\x99\xb3\x8f\n\xccge\xe9˫\x92\xd8\r\xe2L\x02\xbb\xc64-\xa53\vH\x97E\xfd\xb78\x90\tE\x11m\xd8&\f\xdbH\x8c\xcf\x1c}\x96\xb0i\x0f\x019\x87VY\x1b\xda]\x96J\xaeܒ\x96\xefm\x01\xd0.^\x9eUJ\xf78u\xb5\x10\xe2(\x8a\x1e\xbd{\xb4V\ue5d3}\xf9\xa9\xa2\xa1*x\x06y\xd8e\xa3$\x00n\xe1Ad\xac\x04\xfd\x00\xacB\xbb\x91.T\v4\xb9+gHa\xbak\x11\x8a7\v#{\xdace\x85\xb3>\xb1f`sR\xf5Ȏ\xfft\xf5\xb4Q\x92y'\x7f(\x89\xfa\xdd\x14\xb5e\x96e!\xbfN}\x10\x87\xa4s?JN\x1bO\x7fE\xf3J\xe2\xfd\xb74kȅ6k\xf6\x81\x12\xf4\n\xe8\xb6\x0f\xab\x84\x9d\xae\x92@\"&\xc20\x94\x93\x03/\xd0}@\xe5-\x19\x14ΙP\xbb\x13\x0f*M\xc5<\xed\x95q6\xbf\xd9\x18\xbb|\x84\xa3ߜ\xedj\x89ˍ\x8c\xae\xda\xf7\v\xea\xfc\x13\xa5\xd5x-J\x16GvI\xbf]\x92c\xb6d\x8a\x9c\xe1\xbc-\x90\xea\x05U\xbf\xaf\x1e\xeb-h\t\x16̪\xe4\xd5\xca\xcf\x06\xab\xca\xe8\x1e\xa7+\x94F\xb7$\x8c\xc08=x<ظI6C\xf7\x7f\x8e\x02\xc9\xf3\xa1R&\x92i\x11A\xebV\x19\xeb\x16\x0f{\xae\xfa\xc8\xeabJ\xe4\xe8W\x1c\x19\xdfY\xd0\xccX\xa5Cb\x17\xaa\xec\xc1\xe2:J\x8d\x99\x97\x1b\xb7O\xe4W2\x1d`\fP/[\xed\xe2\xec\xc1\xa5۫\xc2\xff\xcf\xc3\xcc\xc8\xd1\"ؕV\x19\x98h6B[\x12\xad\xce\xccbo\xb3\xd0\xcb]\xe0\xb7KR\xeb)\xcbС,s㑴g\x04E\x9f\xbew֬Q\x85\xe1\xdf)\xa2|\x0e\x8e\x8cr\xbb˒\x0f\x93\f\x93ѽq\xad\xc3\x04\xf4\xc0\\\xb0\xa5\x1fjRH\xcb|n/\x92\xbf7\xa7\xa5\x14rC\x1d\xb1\xf7\xaf\xe6\xe8\xb0`\x06b\x19Ice\xc0\x0e߾eH\xf3!5\xf6e!UM\xd1>\x8f\x86\x1egOwA\xd29\xc5\xd0\x11\x97\xcav\x17z|O?\x18\xb6\x13\xda\xd8\x16\xe1\x05P\x85\x99\xc8z\x1a\x1d\xde\x19\xf1\xa9\xfc\xa4\xf5\xd9\xe1\xe9\x17\u05fa\xb3$\xb9WO>\xc1sIP\x1e\x88\xbf\xe7\a`bDŽe 3UKZ,Cu\x81\xdd,\x80\xe8\x98\xe8\x8cI\xa2\xcd\xec4\x96u\x99N\x90\x15I\xa7\x90\xb3+k\xdd&?q\x91\xb6\xb2\xc5\xcec\xab\x9dJ\xa2\x1c+\xfd\xccP\x9fM\xd9\xcd\xe4-\xf9wQ\xd6%\xe3%\xb2eI̹sy\x98!\xed\xd7\xf1\xfa\x89\v\xebOS\xb8M\xd9e\xda4SeU\x80\x85\x90a\x99)iD\x0e\x8d\xfb\xe0\xf9?\x9a\xaf\x1a+\x9c\xed\xb8(j\xbd@G/\xe6\xccҘϫ\xa7\x97\x0f\xe4\xd2\x11Y\x111\x13\x17\xec\x178\xdc\xf3\xf6\xa3\xd2\xcb\\\xe6[\r/\xef\x9aVZ(ʁ\x9d\xf1Nga\x92\xf7\xda\xf7N\xbd\xf0ry\x8c\xb9\xa7\xb3P\t\x937\xf7\xb4)o\xee\xe9\x9b{\xfa\xe6\x9e\x0eʛ{\xfa枾\xb9\xa7\xe3\xe5\xcd=\xed\x947\xf74\xd9~\xa4`\xb8\xa2\x95ۉ\nIX%\xa6o̡=ӗ\xcfR\xf2gA\x96dWo\xc6[\x8e\x9c\x05Zt\x86\xc4t\x8c^\x93n\x8dS2L&w\xa64\xc1\v\x7f\x81\xb36\x01\x81\xb3\xcf\xdal&\x01\xbc\xe0Y\x1b\x8f\xe9p\xed\xfc\x05O\xda\x04Z,?\x84q\xe5ӘJ\xe0aK\xc8\xe5\xa0\xe4\xb1nc^l\x0f\x8f\xd1:\xbfq\xd6\xfdI\xb6\xe6\xf9\"\xf3\xffr~'\"6'\xa7S#P\x85A\xb9\xfacp\xe2,\xdaG\xa9\xed\xfe\x17\x1b]KX\xa7x\xdd5\x03\xddT\xcb~\xca\xeb\x1fG\xb0ϑ\xe4\xd4\xf371\xe7<\xae\xdb:\xc4Խ\xf3\x1e\xbfoZZ(\xbfTޒ\xa5\x9f{ߌ4{\xc6\xc9wn\x8e2\xdbk%Um\xfc\n\x0f\xf6\xf0!sW\x01\x84\x8e\xcc\x12e\xf0\x9e\xedU\x1d9\xe31Cׄ\xcc\xdbx\xbe\xad\xcf\xe0\x00\xcb\x0f\xef\xd7\xfd_\xac\xf2ٷ\x11\xac\x9f\x84ݻ\xfb\x18x\x9e\xa3\xa3\xde9\xe2\x13&\xaf\xbf\x93e(x\x11\x88J3)\n'\x95\x01B߀~\xa9ܒ\xdf\xd9~\xcb\xfc\xc2Sz\x8e\xee\xd2\xcc\xdc&\x97r\xdeK~F>\xee\xb2\xc3R\xb3\xb9\xb7)H\xb3\x94\x8c\xdb\xf1\\\xda\x19\xa8K\xf2lS\xd7\x14\x13rj\xd33i\xd3\xc8\xc3\xe8&\xa5\xd4\xfc\xd9\xe4(45W\xf6u2d\x13\xf3b;ٮ\xb3 \xcf̆M&XZ\xe6kr\xbek'\x8bu\x9eZ\x13Y\xae㹫\xb3 \xc7r[S2V\x93pM\xceSm\xb2O\xe7wF\x9e\x95\x9d\xfa\xf2\xe7`^r\xddb:\xd74)\xc34imc\x1e\xe7\xa4\x1cҥ\x99\xa3IT]\x9a%\xdad\x80Nt\x9c\x94\x1bz\x9a\xf795\x94ٌ\xd0x\xb6\xe7\x14ر<Є\x1c\xcf\t\x90\xdd\xec\xcf\xc5n\xc0\xac4\xcdVX\x9a\xbb9~?Z(\xf3ֹ\xf8-d\xf6\xb9dR\xba\xe74\xa7\x04w_\x06MPZ\x82\x9f8\xe6\x88\xc7Ce瞟\xe1\x88G@nv\xac\xac\v+\xaa\xa2sA\x99\xddñ\xb9\xf2\xe7WE\a\u05f7G\x82\xf6\xe5k#\xf21\x90\xfd\x90\x82\x1b\xf6\x04E\x81\xff\x9eP!s\xd7\x01fj\x05h\xa5\xe2\x1b\x81\xfe\xaa#\x7f\x97\xe0\x95[\x16\xa3S\xfdd\x01K\x844}\x01֤)\x99v\x8f\x9dWO\xdf\xfeR\x83>2\xbas+\xf8AQ1kO{\xfa\xc9l0&\f\xca\xc7k1w)e_\x19\xc5gC\xa3\x02\xd8\a\xe9\f\xf3\x10W\x82\x85Z\xa7\r\xa7\xa6\x94-FO1\x10R5\x10\"\xedS\xbc\xef%\xc7\x1f_#\xb8z\x89\xf0*\xc9\x11y\x8d\x10뵂\xac\xa5a֒䍤㋯\x11l-\t\xb7\x16\xf9\x8c\xe9\xc7\x13_\xebX\xe2+\x84]g\a^\x8bH\x97z\xecpq\xf8\x950\xbe\x99c\x86'>Z\x02\xc8\xe8\xf1\xc2\xf1\x10,\x01\xe2ɱ\xc2\xd9 ,e\x1e\fôg\x1f\x12LNdZ\xb4\x9b\x9e\x9a\x84\x94\xb6\xd1=\x7f\xf8/\xf1\xd0_\xe26x\n\xf6\x89\x87\xfb\x96\x1f\xeaK\xa4\xf3\x99\xe1\xd9d\u05c9\x87\xf7\x16\x05hg\x86h\x93\x10\xa7\x0e\xebM\ai\xd3\vp\xc3Czg\xb8\x13\t\x12\x96Pe\xf9A\xbbgo\xc6(\x9d\x83\x9e\xdd\xd7Z\"γ\x82<\x88\xa3\xfa\xfd\x0fvt\u008d\xa8X\xab\xbbg\x16\xe3\xa8j\xee\x1d\xc9؟\x84\xf4\xbb\xf5(\xb8\x1d\x9f\xa4\xb7\xf1\xd6:L\xf1}\x9d\xd6K\xf5\x17\xab\xbb\x1d;\x03\x15״\x8f\xbf=\xba\xa4 \xb3f\x9fx\xb6oz\x88\x80\xa4~\xf7ܰ\x9d\xd2%\xb7\xec\xb2\xd9\n}\xe7:\xc0\xbf/\u05cc\xfd\xa4\x9a\xf4\x91νb\x11\xa8F\x94UqĈ\x89]v\xc1ti^\xd6ɂ\x88\xc5(\xd8\\\xf8\v\v\a\x97\n\xefTQ\xa8\xa73\x97.x%\xfe\x8b\xde1I[\x1b\xfbp\xbb\xa1\xeaA\xaa\xe8\r\x94&{\xae\x91\xb1-L+\xf4v\xe0\xe4zt\xa1\x8ed\xaf6\x7fN@\xa4W\x04\x82\x9f\xe1\xd5x\xa6P\x8b\xddn\x1c\x96k\x12,.\x8fL\xf9{\xe2\x85\xceW\x15\xd7\xd1M=\xe6\xe5\xc1\\\xf50\fv|n\x05kҬ\x9d\xbe\x8a\xd0-=\x9a\x87\a\x12h\xb3\xf7X\xf5\xb7щ\xd2\x1dz>\a\xa7\xe9\x83˳G\x96_\x01\xa7i\x97iET\x8c\xfc\x14M\xc7{\xf1\xd5C\xe3/\x91\xffE\x1d\xe0ct\x15\xb1\xffd\xc0\xa0\xc9H\x02]\x80:umz\x9b5\x17\xbf\xce\xfa\x052\xe2\x02*\xfe\xe2\xeb\x05\xe3\xf3-\xc6^i\xf0\xf7\x7f\a\xd8\x13\xb6\r\xa7\xec\xed7\n7\x1bu\xe9g\xb8\x0f&\xc3R\xe1\xe0\xaa\xd6\b\xc8\xd83\v/E-\xab4\x7f\x80\x9f\x95{\t#\x85Z\xfd\x16\xbd\xc7P\xbc3\x17\xb2\x89\xfd\\\x8b)s?\xb6!\xc0\xf6\x90\xc1\xc9u\xf4\x88홷\xed[[$\f\xee\xfe\xfeg7 +JX\x7f\xac]\x86\t\xea]\x03H\xe90P\xd7h\x1b\xd7N{\xf5D\xf7\xb9w\x9f\xab\xe8<\b\x04t\xa8\x81\xd2F\xcf\x1aM]\x15\x8a\xe7\xa0o\x94܉\x87\x84\x81\xfd\xb9\xd7``\xd93\xfa\xe8\a\x1b\xeccd`\xa1\xe7so\xc9E\x7f\xac(\xa0\xf8I\x14`\x1cV\x89Z\xfc\xf6\xb4e\xa3\xd4\xebr\xeb\xbc\xcf\x1d\xfe\xd8t2a*\xdd8h\xc1\xbe\x02\x8d^\x9e[گM\x90\xde鑲\x86IBZx\x88d\xef̨\xefC\xef]\x8f0\x03Rtַ\xf1\x96\x1dW\xb83\x17\xa72\x01\xd5.\n\x8b\x1b\xa32A\xde3m}Б\x90\u05fbPz*\n\x9a\xa0cm\xe0˓\x04\xfd5\xe8[\xb3\x91\xb1\x874\xfaS\xe2\xa4a\xf4\x11\r\xab\xc8g\x1fT\x1f\x8bդ'\x90qO\xb0\x84=\x1ca\x9a\x87wNI7\xa3\xc6\xe3*|\xdc\xe1X\x8d\xbfu\xb3j\x9e߹H\xa0\xac{b\xa6\x0fx\xfce%\xf7\x16M\xc6+[\xeb\xa0OjM\xb7k#\x10p\x97O\x9f\xf7\xb6R\xfbV\xdd\f/\xdb\xd7\xeb\xdaU\xedٷ\xf2F\xf8\u05fc\x8e\x14}.\xc8Ń\xee-\xbb\x15\xc2?\x8f\x9d\xa3\xf3\x80n#\x9f\x19\xe9-\xd6i\x8e\xf3yBS\xc3p\x8b\xf9]\f\xf5\xf1\xf3Y+\xf6\x19N\xe3\xb1\x15\xfb$q\x10\xa7n\x9a;\x84\x059\xad\x85\x8f\xbd+79\xc4CӊN\xc0\x8dh\x8b\xbe\x9a\x1bT\x1f\xe4\xe7ҫ4M\x15w\xdam\x8c\xad\xff,vn\xa3\"\xc31\xfd\xcbI\x8d\xa8\xe2\x9aTZ1\x855:\xa5N>\x1a\xd0\az\x06&\b\x89wź_\xeam{\xa9<\xfb\xeb\xdf.\xfe/\x00\x00\xff\xffs@N\x8d|t\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4V\xcfo\xeb6\f\xbe\xe7\xaf \xb0û\xcc\xce\xebv\x19r\x1b\xba\x1d\x8am\x0fE\xf3л\"\xd3\tWY\xf2H*]\xf6\xd7\x0f\x92\xec&\xb1\x9d\xb5\x1b0\xdd\"\xf1\xc7Ǐ\xe4\xe7TU\xb52==#\v\x05\xbf\x01\xd3\x13\xfe\xa9\xe8\xd3/\xa9_~\x90\x9a\xc2\xfax\xb7z!\xdfl\xe0>\x8a\x86\xee\t%D\xb6\xf8\x13\xb6\xe4I)\xf8U\x87j\x1a\xa3f\xb3\x020\xde\a5\xe9Z\xd2O\x00\x1b\xbcrp\x0e\xb9ڣ\xaf_\xe2\x0ew\x91\\\x83\x9c\x83\x8f\xa9\x8f\x9f\xeb\xbb\xef\xea\xcf+\x00o:܀ \xa775\x1a\x85\U0004f222R\x1f\xd1!\x87\x9a\xc2Jz\xb4)\xfe\x9eC\xec7p~(\xfeC\xee\x82{\x9bCms\xa8\xa7\x12*\xbf:\x12\xfd\xe5\x96ů4X\xf5.\xb2qˀ\xb2\x81\x1c\x02\xeb\x97s\xd2\nD\xb8\xbc\x90\xdfGgx\xd1y\x05 6\xf4\xb8\x81\xec\xdb\x1b\x8b\xcd\n` $Ǫ\x06.\x8ew%\x9c=`gJ\x12\x80У\xff\xf1\xf1\xe1\xf9\xfb\xed\xd55@\x83b\x99zʹ.T\x06$``@\x01\x1a\xc0X\x8b\"`#3z\x85\x82\x12ȷ\x81\xbb\xdcɷ\xd0\x00f\x17\xa2\x82\x1e\x10\x9e3\xe5Ce\xf5\x9bIϡGV\x1a\xd9\x18\xdc\xceCvq;\xc1\xfa)\x95S\xac\xa0IӅ\x923\r\x94`30\x00\xa1\x05=\x90\x00c\xcf(\xe8u\x8a2\xf3ӂ\xf1\x10v\xbf\xa3\xd5z\xe0AR\xb3\xa2k\xd2P\x1e\x91\x15\x18m\xd8{\xfa\xeb-\xb6$BRRgt\x9c\x93\xf3!\xaf\xc8\xde88\x1a\x17\xf1[0\xbe\x81Μ\x801e\x81\xe8/\xe2e\x13\xa9\xe1\xb7\xc0\x98\xc9\xdc\xc0A\xb5\x97\xcdz\xbd'\x1d\x97ˆ\xae\x8b\x9e\xf4\xb4\xce{B\xbb\xa8\x81e\xdd\xe0\x11\xddZh_\x19\xb6\aR\xb4\x1a\x19צ\xa7*C\xf7y\xc1\xea\xae\xf9\x86\x87u\x94OWX\xf5\x94&K\x94\xc9\xef/\x1e\xf2B\xfcC\a\xd2:\x94\xf9(\xae\xa5\x8a3\xd1\xe9*\xb1\xf3\xf4\xf3\xf6+\x8c\xa9s3\xa6\xecg\xdeώrnA\"\x8c|\x8b\\\x9a\xd8r\xe8rL\xf4M\x1fȗ鲎\xd0O闸\xebHe\x9c\xddԫ\x1a\xee\xb3\xe2\xc0\x0e!\xf6\x8dQljx\xf0po:t\xf7F\xf0\x7fo@bZ\xaaD\xec\xc7Zp)\x96S\xe3\xc2\xda\xc5\xc3(s7\xfa\xb5\xb0\xdd\xdb\x1em\xea`\"1ySK6\xaf\a\xb4\x81\xc1,\xb9\xd4\x1fB\x92=\xfe%\x96AI\n\x9a\x89\xbe\xa4\xfd|\x1fͲ\x9c䗃\x11\x9c^N0=&\x9bi~G-ړuXB\x145\xc1\xf7\xa1\xa4\x83>v\xf3\x9c\x15|\xc1ׅ\xdbG\x0eIY\xb3\xae_\x9f\x1b\xb3\x01\xe5{\xb3'?+wZY\xb1\xca߰K\xa9\xbe\x10\xe8!\x10p\xf4>\xed\xedL!3\x90\xa9\x92\xcflH\xb1[@\xb3\x88\xe7\xc1\xb7!\x7f\xf0MJl\xb4\xec\x13\x0e\xcd\x1e\xf2\x14\\\v\x01o\xf7\xba\x9c\xb9x}\x88\xd0r\xf2\x97\xf4\xbf9'\xb9!\xc6\xc5\xdcUF\xb5\xf8\x902.1\xbe\xbc_\x03\xca\xe8\x9c\xd99܀r\x9c{\x17_\xc3lNө\x19G\xed+u(j\xba\xfe\xbd\x01\x9a9\xa4=y=\xa0\xbf\xb5\r\xf0j\xa6*\x7f\x95\x19v\xa7[\xae\xf7o\xff\x01\xe7+UFw\x03I\xbb+\xa5\x05\xce>D\xcab\xf7\xcaH/\xfe\xf3\x98\x11\xb2\xbd\xb4\x1d5\xe3j5\xc6?\"\xf3\x1anBXl\xf6\xec2\x87o.\xca\x13\rl\xf6c\xc1\x7f\a\x00\x00\xff\xff\xb1J-\xe7\xa6\v\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xb4VA\x93\xdb6\x0f\xbd\xfbW`&\x87\xbdDr\xf2}\x97\x8e/\x9d̦\x87L\x93f'N\xf7N\x8b\x90\x8d\x9a\"U\x10\xd4\xc6\xfd\xf5\x1d\x90\xd2\xdak\xcb\xc9n\xa7\xd5\xc5c\n\x04\x1f\xde\xc3\x03UU\xd5\xc2\xf4t\x8f\x1c)\xf8\x15\x98\x9e\xf0\x9b\xa0\xd7\x7f\xb1\xde\xff\x14k\n\xcb\xe1\xedbOޮ\xe06E\t\xdd\x17\x8c!q\x83\xef\xb1%OB\xc1/:\x14c\x8d\x98\xd5\x02\xc0x\x1f\xc4\xe8rԿ\x00M\xf0\xc2\xc19\xe4j\x8b\xbeާ\rn\x129\x8b\x9c\x93OG\x0fo\xea\xb7\xff\xab\xdf,\x00\xbc\xe9p\x05Cp\xa9\xc3\xe8M\x1fwA\\hJ\xcez@\x87\x1cj\n\x8b\xd8c\xa3Gl9\xa4~\x05\xc7\x17%\xc5x|\x81~\x9f\xb3\xad\xc7l\x1f\xc7l9\xc0Q\x94_\xbf\x13\xf4\x91\xa2\xe4\xc0\xde%6\xee*\xb2\x1c\x13w\x81\xe5\xb7\xe3\xe9\x15\fѕ7\xe4\xb7\xc9\x19\xbe\xb6\x7f\x01\x10\x9b\xd0\xe3\n\xf2\xf6\xde4h\x17\x00#?9]5Q\xf3\xb6dlvؙr\x0e@\xe8ѿ\xbb\xfbp\xff\xff\xf5\x93e\x00\x8b\xb1a\xea%\xb3<_\"P\x04\x03\x13\x12x\xd8!#\xdcg>!J`\x8c#\xe8Ǥ\x00\x13\xfeX?.\xf6\x1czd\xa1\xa9\xf8\xf2\x9c\xf4\xd7\xc9\xea\x19\xae\x1b\x85^\xa2\xc0jca\x04\xd9\xe1T>ڱZ\b-Ȏ\"0\xf6\x8c\x11\xbd\x1c\x85<>\xa1\x05\xe3!l\xfe\xc0FjX#k\x1a\xd5&9\xab\xfd8 \v06a\xeb\xe9\xaf\xc7\xdc\x11$\xe4C\x9d\x11\x1c5?>\xe4\x05\xd9\x1b\a\x83q\t_\x83\xf1\x16:s\x00F=\x05\x92?ɗCb\r\x9f\x02#\x90o\xc3\nv\"}\\-\x97[\x92\xc9WM\xe8\xba\xe4I\x0e\xcbl\x11\xda$\t\x1c\x97\x16\at\xcbH\xdb\xcap\xb3#\xc1F\x12\xe3\xd2\xf4Te\xe8\xbe\xf8\xa0\xb3\xafxtb\xbcy\x82U\x0e\xdaEQ\x98\xfc\xf6\xe4E6\xc2w\x14P\x0f\x94F([K\x15G\xa2uI\xd9\xf9\xf2\xcb\xfa+LGg1\xce\xd9ϼ\x1f7ƣ\x04J\x18\xf9\x16\xb9\x88\xd8r\xe8rN\xf4\xb6\x0f\xe4%\xffi\x1c\xa1?\xa7?\xa6MG\xa2\xba\xff\x990\x8ajU\xc3m\x1e6\xb0AH\xbd5\x82\xb6\x86\x0f\x1enM\x87\xee\xd6D\xfc\xcf\x05P\xa6c\xa5\xc4>O\x82\xd39y\x1e\\X;5\xd88ޮ\xe85\xef\xe4u\x8f\xcd\x13\x03i\x16jitv\x1b\xf8\x8cW3\xf9|>_\xfd$|\xde\xe0P\x86|K\xdb\xf3U\x00cm\xbe\"\x8c\xbb\xbb\xba\xf7;\x84\xcd\xd4}\x9bO\xd2Fm\x03+\xa2\x81,r5\xd59\"I<\x16L\xe8l\xac/R^\xe1<\x97\xc2hUc\xe3.\x81>E\xf2\x18\x98\xef8C\xbeP~L\x90[\x8f\xbbq\xc6zAo\xf3P\xbf@\x13r\x0fG\xb4\xf0@\xb2+\xe6p\xa7\x97\xd4\xf3T\xd0g\x8f\x87\xb9\xe53\xec_w\xa8\x91e\x9c\"Dl\x18EqDtj^uf\r\xf0)\xc5l/3\x9b\x11tD\x90\x9dv\xef\xf1pI4\xfcH\xdc\xf1\xbe\xff1\xe4\x1b\xbd\x17'\xc0\x8c-2z\x99\xb5\xb8~b\xb0G\xc1\xecr\x1b\x9a\xa8\x06o\xb0\x97\xb8\f\x03\xf2@\xf8\xb0|\b\xbc'\xbf\xad\x94\xf0\xaa4B\\\xe6\xef\x86\xe5\xab\xfcs\xa5䯟\xdf\x7f^\xc1;k!\xc8\x0eYUk\x93\x9b\x1a\xed\xe4\xb6{\x9d'\xeekHd\x7f\xbe\xf9'\xbc\x84\xbe8\xe7\x19ܬs\xf7\x1f\xf4\xe6Π\x94\xa2uQ%0\xe8\xdcT\xb1\xbbQ\xcd2\x1f\xe6\x1aq´\t\xc1\xa1\xb9l=\x9d\xbe\xc4h/!Uz\xc2Kl\x06\xf0\xad:\nUu\xa6\xafJ\xb4\x91\xd0Qs\x16=\xf9\xfc\a\x96\xbc\x1b\xc3t<(\aӶ\xa9m\xcaWL\xfe\xa61[\xbc6\x16f\x14\x99/\xbcz<\xe0Y\x03]\x8c\xa4\xf8\U000917b7\x8d\x91\x9bq\xac7\x89\xb5\xfdǜ3\x9f?\xff\xceX\xefw&\xcex\xf3\x19\xa8\xeft\xe7$\x83\xa3\x16\x9bC\xe3\xb0$\x84\xd0\xce\xf4ދ \xeb\x83>us\x8d\xf8n0\xe4\xcc\xc6\xe1̻߽\xb9\xfa\xf6\xaa\xf8\xb3z^,F\xfdƱ+\x10N%\xf7\xd8e\xe3\xca\xdf\x01\x00\x00\xff\xff\xec\xa0\xe0\xa1k\r\x00\x00"), } diff --git a/design/velero-uploader-configuration.md b/design/velero-uploader-configuration.md index 3bd75cb90f..b565ba7efa 100644 --- a/design/velero-uploader-configuration.md +++ b/design/velero-uploader-configuration.md @@ -25,7 +25,7 @@ type UploaderConfig struct { } ``` -### Integration with Backup & Restore CRD +### Integration with Backup CRD The Velero CLI will support an uploader configuration-related flag, allowing users to set the value when creating backups or restores. This value will be stored in the `UploaderConfig` field within the `Backup` CRD and `Restore` CRD: ```go @@ -122,44 +122,5 @@ Roughly, the process is as follows: 3. Each respective controller within the CRs calls the uploader, and the ParallelFilesUpload from UploaderConfig in CRs is passed to the uploader. 4. When the uploader subsequently calls the Kopia API, it can use the ParallelFilesUpload to set the MaxParallelFileReads parameter, and if the uploader calls the Restic command it would output one warning log for Restic does not support this feature. -### Sparse Option For Kopia & Restic Restore -In many system files, there are numerous zero bytes or empty blocks that still occupy physical storage space. Sparse backup employs a more intelligent approach by only backing up the actual data-containing portions. For those empty blocks or zero bytes, it merely records their presence without actually storing them. This can significantly reduce the storage space required for backups, especially in situations where there is a substantial amount of empty data in large file systems. - -Below are the key steps that should be added to support this new feature. -#### Velero CLI -The Velero CLI will support a `--write-sparse-files` flag, allowing users to set the `WriteSparseFiles` value when creating restores with Restic or Kopia uploader. - -#### UploaderConfig -below the sub-option `WriteSparseFiles` is added into UploaderConfig: - -```go -type UploaderConfig struct { - // +optional - WriteSparseFiles bool `json:"writeSparseFiles,omitempty"` -} -``` - -### Enable Sparse in Restic -For Restic, it could be enabled by pass the flag `--sparse` in creating restore: - -```bash -restic restore create --sparse $snapshotID -``` - -### Enable Sparse in Kopia -For Kopia, it could be enabled this feature by the `WriteSparseFiles` field in the [FilesystemOutput](https://pkg.go.dev/github.com/kopia/kopia@v0.13.0/snapshot/restore#FilesystemOutput). - -```golang -fsOutput := &restore.FilesystemOutput{ - WriteSparseFiles: veleroCfg.WriteSparseFiles, - } -``` - -Roughly, the process is as follows: -1. Users pass the WriteSparseFiles parameter and its value through the Velero CLI. This parameter and its value are stored as a sub-option within UploaderConfig and then placed into the Restore CR. -2. When users perform file system restores, UploaderConfig is passed to the PodVolumeRestore CR. When users use the Data-mover for restores, it is passed to the DataDownload CR. -3. Each respective controller within the CRs calls the uploader, and the WriteSparseFiles from UploaderConfig in CRs is passed to the uploader. -4. When the uploader subsequently calls the Kopia API, it can use the WriteSparseFiles to set the WriteSparseFiles parameter, and if the uploader calls the Restic command it would append `--sparse` flag within the restore command. - ## Alternatives Considered To enhance extensibility further, the option of storing `UploaderConfig` in a Kubernetes ConfigMap can be explored, this approach would allow the addition and modification of configuration options without the need to modify the CRD. \ No newline at end of file diff --git a/internal/hook/hook_tracker.go b/internal/hook/hook_tracker.go new file mode 100644 index 0000000000..f4e2bb817e --- /dev/null +++ b/internal/hook/hook_tracker.go @@ -0,0 +1,137 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hook + +import "sync" + +const ( + HookSourceAnnotation = "annotation" + HookSourceSpec = "spec" +) + +// hookTrackerKey identifies a backup/restore hook +type hookTrackerKey struct { + // PodNamespace indicates the namespace of pod where hooks are executed. + // For hooks specified in the backup/restore spec, this field is the namespace of an applicable pod. + // For hooks specified in pod annotation, this field is the namespace of pod where hooks are annotated. + podNamespace string + // PodName indicates the pod where hooks are executed. + // For hooks specified in the backup/restore spec, this field is an applicable pod name. + // For hooks specified in pod annotation, this field is the pod where hooks are annotated. + podName string + // HookPhase is only for backup hooks, for restore hooks, this field is empty. + hookPhase hookPhase + // HookName is only for hooks specified in the backup/restore spec. + // For hooks specified in pod annotation, this field is empty or "". + hookName string + // HookSource indicates where hooks come from. + hookSource string + // Container indicates the container hooks use. + // For hooks specified in the backup/restore spec, the container might be the same under different hookName. + container string +} + +// hookTrackerVal records the execution status of a specific hook. +// hookTrackerVal is extensible to accommodate additional fields as needs develop. +type hookTrackerVal struct { + // HookFailed indicates if hook failed to execute. + hookFailed bool + // hookExecuted indicates if hook already execute. + hookExecuted bool +} + +// HookTracker tracks all hooks' execution status +type HookTracker struct { + lock *sync.RWMutex + tracker map[hookTrackerKey]hookTrackerVal +} + +// NewHookTracker creates a hookTracker. +func NewHookTracker() *HookTracker { + return &HookTracker{ + lock: &sync.RWMutex{}, + tracker: make(map[hookTrackerKey]hookTrackerVal), + } +} + +// Add adds a hook to the tracker +func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase hookPhase) { + ht.lock.Lock() + defer ht.lock.Unlock() + + key := hookTrackerKey{ + podNamespace: podNamespace, + podName: podName, + hookSource: source, + container: container, + hookPhase: hookPhase, + hookName: hookName, + } + + if _, ok := ht.tracker[key]; !ok { + ht.tracker[key] = hookTrackerVal{ + hookFailed: false, + hookExecuted: false, + } + } +} + +// Record records the hook's execution status +func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase hookPhase, hookFailed bool) { + ht.lock.Lock() + defer ht.lock.Unlock() + + key := hookTrackerKey{ + podNamespace: podNamespace, + podName: podName, + hookSource: source, + container: container, + hookPhase: hookPhase, + hookName: hookName, + } + + if _, ok := ht.tracker[key]; ok { + ht.tracker[key] = hookTrackerVal{ + hookFailed: hookFailed, + hookExecuted: true, + } + } +} + +// Stat calculates the number of attempted hooks and failed hooks +func (ht *HookTracker) Stat() (hookAttemptedCnt int, hookFailed int) { + ht.lock.RLock() + defer ht.lock.RUnlock() + + for _, hookInfo := range ht.tracker { + if hookInfo.hookExecuted { + hookAttemptedCnt++ + if hookInfo.hookFailed { + hookFailed++ + } + } + } + return +} + +// GetTracker gets the tracker inside HookTracker +func (ht *HookTracker) GetTracker() map[hookTrackerKey]hookTrackerVal { + ht.lock.RLock() + defer ht.lock.RUnlock() + + return ht.tracker +} diff --git a/internal/hook/hook_tracker_test.go b/internal/hook/hook_tracker_test.go new file mode 100644 index 0000000000..d104cc91d9 --- /dev/null +++ b/internal/hook/hook_tracker_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2020 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hook + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewHookTracker(t *testing.T) { + tracker := NewHookTracker() + + assert.NotNil(t, tracker) + assert.Empty(t, tracker.tracker) +} + +func TestHookTracker_Add(t *testing.T) { + tracker := NewHookTracker() + + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + + key := hookTrackerKey{ + podNamespace: "ns1", + podName: "pod1", + container: "container1", + hookPhase: PhasePre, + hookSource: HookSourceAnnotation, + hookName: "h1", + } + + _, ok := tracker.tracker[key] + assert.True(t, ok) +} + +func TestHookTracker_Record(t *testing.T) { + tracker := NewHookTracker() + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true) + + key := hookTrackerKey{ + podNamespace: "ns1", + podName: "pod1", + container: "container1", + hookPhase: PhasePre, + hookSource: HookSourceAnnotation, + hookName: "h1", + } + + info := tracker.tracker[key] + assert.True(t, info.hookFailed) +} + +func TestHookTracker_Stat(t *testing.T) { + tracker := NewHookTracker() + + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + tracker.Add("ns2", "pod2", "container1", HookSourceAnnotation, "h2", PhasePre) + tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true) + + attempted, failed := tracker.Stat() + assert.Equal(t, 1, attempted) + assert.Equal(t, 1, failed) +} + +func TestHookTracker_Get(t *testing.T) { + tracker := NewHookTracker() + tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) + + tr := tracker.GetTracker() + assert.NotNil(t, tr) + + t.Logf("tracker :%+v", tr) +} diff --git a/internal/hook/item_hook_handler.go b/internal/hook/item_hook_handler.go index 38c982c550..9075bc50fa 100644 --- a/internal/hook/item_hook_handler.go +++ b/internal/hook/item_hook_handler.go @@ -82,6 +82,7 @@ type ItemHookHandler interface { obj runtime.Unstructured, resourceHooks []ResourceHook, phase hookPhase, + hookTracker *HookTracker, ) error } @@ -200,6 +201,7 @@ func (h *DefaultItemHookHandler) HandleHooks( obj runtime.Unstructured, resourceHooks []ResourceHook, phase hookPhase, + hookTracker *HookTracker, ) error { // We only support hooks on pods right now if groupResource != kuberesource.Pods { @@ -221,15 +223,21 @@ func (h *DefaultItemHookHandler) HandleHooks( hookFromAnnotations = getPodExecHookFromAnnotations(metadata.GetAnnotations(), "", log) } if hookFromAnnotations != nil { + hookTracker.Add(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase) + hookLog := log.WithFields( logrus.Fields{ - "hookSource": "annotation", + "hookSource": HookSourceAnnotation, "hookType": "exec", "hookPhase": phase, }, ) + + hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, false) if err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "", hookFromAnnotations); err != nil { hookLog.WithError(err).Error("Error executing hook") + hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, true) + if hookFromAnnotations.OnError == velerov1api.HookErrorModeFail { return err } @@ -240,6 +248,8 @@ func (h *DefaultItemHookHandler) HandleHooks( labels := labels.Set(metadata.GetLabels()) // Otherwise, check for hooks defined in the backup spec. + // modeFailError records the error from the hook with "Fail" error mode + var modeFailError error for _, resourceHook := range resourceHooks { if !resourceHook.Selector.applicableTo(groupResource, namespace, labels) { continue @@ -251,21 +261,30 @@ func (h *DefaultItemHookHandler) HandleHooks( } else { hooks = resourceHook.Post } + for _, hook := range hooks { if groupResource == kuberesource.Pods { if hook.Exec != nil { - hookLog := log.WithFields( - logrus.Fields{ - "hookSource": "backupSpec", - "hookType": "exec", - "hookPhase": phase, - }, - ) - err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.Name, hook.Exec) - if err != nil { - hookLog.WithError(err).Error("Error executing hook") - if hook.Exec.OnError == velerov1api.HookErrorModeFail { - return err + hookTracker.Add(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase) + // The remaining hooks will only be executed if modeFailError is nil. + // Otherwise, execution will stop and only hook collection will occur. + if modeFailError == nil { + hookLog := log.WithFields( + logrus.Fields{ + "hookSource": HookSourceSpec, + "hookType": "exec", + "hookPhase": phase, + }, + ) + + hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, false) + err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.Name, hook.Exec) + if err != nil { + hookLog.WithError(err).Error("Error executing hook") + hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, true) + if hook.Exec.OnError == velerov1api.HookErrorModeFail { + modeFailError = err + } } } } @@ -273,7 +292,7 @@ func (h *DefaultItemHookHandler) HandleHooks( } } - return nil + return modeFailError } // NoOpItemHookHandler is the an itemHookHandler for the Finalize controller where hooks don't run @@ -285,6 +304,7 @@ func (h *NoOpItemHookHandler) HandleHooks( obj runtime.Unstructured, resourceHooks []ResourceHook, phase hookPhase, + hookTracker *HookTracker, ) error { return nil } @@ -514,6 +534,7 @@ func GroupRestoreExecHooks( resourceRestoreHooks []ResourceRestoreHook, pod *corev1api.Pod, log logrus.FieldLogger, + hookTrack *HookTracker, ) (map[string][]PodExecRestoreHook, error) { byContainer := map[string][]PodExecRestoreHook{} @@ -530,10 +551,11 @@ func GroupRestoreExecHooks( if hookFromAnnotation.Container == "" { hookFromAnnotation.Container = pod.Spec.Containers[0].Name } + hookTrack.Add(metadata.GetNamespace(), metadata.GetName(), hookFromAnnotation.Container, HookSourceAnnotation, "", hookPhase("")) byContainer[hookFromAnnotation.Container] = []PodExecRestoreHook{ { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: *hookFromAnnotation, }, } @@ -554,7 +576,7 @@ func GroupRestoreExecHooks( named := PodExecRestoreHook{ HookName: rrh.Name, Hook: *rh.Exec, - HookSource: "backupSpec", + HookSource: HookSourceSpec, } // default to false if attr WaitForReady not set if named.Hook.WaitForReady == nil { @@ -564,6 +586,7 @@ func GroupRestoreExecHooks( if named.Hook.Container == "" { named.Hook.Container = pod.Spec.Containers[0].Name } + hookTrack.Add(metadata.GetNamespace(), metadata.GetName(), named.Hook.Container, HookSourceSpec, rrh.Name, hookPhase("")) byContainer[named.Hook.Container] = append(byContainer[named.Hook.Container], named) } } diff --git a/internal/hook/item_hook_handler_test.go b/internal/hook/item_hook_handler_test.go index 0912b1bdd2..f8efb5f089 100644 --- a/internal/hook/item_hook_handler_test.go +++ b/internal/hook/item_hook_handler_test.go @@ -108,6 +108,7 @@ func TestHandleHooksSkips(t *testing.T) { }, } + hookTracker := NewHookTracker() for _, test := range tests { t.Run(test.name, func(t *testing.T) { podCommandExecutor := &velerotest.MockPodCommandExecutor{} @@ -118,7 +119,7 @@ func TestHandleHooksSkips(t *testing.T) { } groupResource := schema.ParseGroupResource(test.groupResource) - err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, PhasePre) + err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, PhasePre, hookTracker) assert.NoError(t, err) }) } @@ -485,7 +486,8 @@ func TestHandleHooks(t *testing.T) { } groupResource := schema.ParseGroupResource(test.groupResource) - err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, test.phase) + hookTracker := NewHookTracker() + err := h.HandleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, test.phase, hookTracker) if test.expectedError != nil { assert.EqualError(t, err, test.expectedError.Error()) @@ -861,7 +863,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -892,7 +894,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -933,7 +935,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -973,7 +975,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -1021,7 +1023,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -1140,7 +1142,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container1": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -1152,7 +1154,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { }, { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/bar"}, @@ -1164,7 +1166,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { }, { HookName: "hook2", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/aaa"}, @@ -1178,7 +1180,7 @@ func TestGroupRestoreExecHooks(t *testing.T) { "container2": { { HookName: "hook1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container2", Command: []string{"/usr/bin/baz"}, @@ -1192,9 +1194,11 @@ func TestGroupRestoreExecHooks(t *testing.T) { }, }, } + + hookTracker := NewHookTracker() for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := GroupRestoreExecHooks(tc.resourceRestoreHooks, tc.pod, velerotest.NewLogger()) + actual, err := GroupRestoreExecHooks(tc.resourceRestoreHooks, tc.pod, velerotest.NewLogger(), hookTracker) assert.Nil(t, err) assert.Equal(t, tc.expected, actual) }) @@ -1983,3 +1987,494 @@ func TestValidateContainer(t *testing.T) { // noCommand string should return expected error as result. assert.Equal(t, expectedError, ValidateContainer([]byte(noCommand))) } + +func TestBackupHookTracker(t *testing.T) { + type podWithHook struct { + item runtime.Unstructured + hooks []ResourceHook + hookErrorsByContainer map[string]error + expectedPodHook *velerov1api.ExecHook + expectedPodHookError error + expectedError error + } + test1 := []struct { + name string + phase hookPhase + groupResource string + pods []podWithHook + hookTracker *HookTracker + expectedHookAttempted int + expectedHookFailed int + }{ + { + name: "a pod with spec hooks, no error", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 2, + expectedHookFailed: 0, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"pre-1a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"pre-1b"}, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "a pod with spec hooks and same container under different hook name, no error", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 4, + expectedHookFailed: 0, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"pre-1a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"pre-1b"}, + }, + }, + }, + }, + { + Name: "hook2", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"2a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "2b", + Command: []string{"2b"}, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "a pod with spec hooks, on error=fail", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 3, + expectedHookFailed: 2, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"1a"}, + OnError: velerov1api.HookErrorModeContinue, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"1b"}, + }, + }, + }, + }, + { + Name: "hook2", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "2", + Command: []string{"2"}, + OnError: velerov1api.HookErrorModeFail, + }, + }, + }, + }, + { + Name: "hook3", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "3", + Command: []string{"3"}, + }, + }, + }, + }, + }, + hookErrorsByContainer: map[string]error{ + "1a": errors.New("1a error, but continue"), + "2": errors.New("2 error, fail"), + }, + }, + }, + }, + { + name: "a pod with annotation and spec hooks", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 1, + expectedHookFailed: 0, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls" + } + } + }`), + expectedPodHook: &velerov1api.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + }, + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"1a"}, + OnError: velerov1api.HookErrorModeContinue, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"1b"}, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "a pod with annotation, on error=fail", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 1, + expectedHookFailed: 1, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls", + "hook.backup.velero.io/on-error": "Fail" + } + } + }`), + expectedPodHook: &velerov1api.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + OnError: velerov1api.HookErrorModeFail, + }, + expectedPodHookError: errors.New("pod hook error"), + }, + }, + }, + { + name: "two pods, one with annotation, the other with spec", + phase: PhasePre, + groupResource: "pods", + hookTracker: NewHookTracker(), + expectedHookAttempted: 3, + expectedHookFailed: 1, + pods: []podWithHook{ + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls", + "hook.backup.velero.io/on-error": "Fail" + } + } + }`), + expectedPodHook: &velerov1api.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + OnError: velerov1api.HookErrorModeFail, + }, + expectedPodHookError: errors.New("pod hook error"), + }, + { + item: velerotest.UnstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []ResourceHook{ + { + Name: "hook1", + Pre: []velerov1api.BackupResourceHook{ + { + Exec: &velerov1api.ExecHook{ + Container: "1a", + Command: []string{"pre-1a"}, + }, + }, + { + Exec: &velerov1api.ExecHook{ + Container: "1b", + Command: []string{"pre-1b"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range test1 { + t.Run(test.name, func(t *testing.T) { + podCommandExecutor := &velerotest.MockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + h := &DefaultItemHookHandler{ + PodCommandExecutor: podCommandExecutor, + } + + groupResource := schema.ParseGroupResource(test.groupResource) + hookTracker := test.hookTracker + + for _, pod := range test.pods { + if pod.expectedPodHook != nil { + podCommandExecutor.On("ExecutePodCommand", mock.Anything, pod.item.UnstructuredContent(), "ns", "name", "", pod.expectedPodHook).Return(pod.expectedPodHookError) + } else { + hookLoop: + for _, resourceHook := range pod.hooks { + for _, hook := range resourceHook.Pre { + hookError := pod.hookErrorsByContainer[hook.Exec.Container] + podCommandExecutor.On("ExecutePodCommand", mock.Anything, pod.item.UnstructuredContent(), "ns", "name", resourceHook.Name, hook.Exec).Return(hookError) + if hookError != nil && hook.Exec.OnError == velerov1api.HookErrorModeFail { + break hookLoop + } + } + for _, hook := range resourceHook.Post { + hookError := pod.hookErrorsByContainer[hook.Exec.Container] + podCommandExecutor.On("ExecutePodCommand", mock.Anything, pod.item.UnstructuredContent(), "ns", "name", resourceHook.Name, hook.Exec).Return(hookError) + if hookError != nil && hook.Exec.OnError == velerov1api.HookErrorModeFail { + break hookLoop + } + } + } + } + h.HandleHooks(velerotest.NewLogger(), groupResource, pod.item, pod.hooks, test.phase, hookTracker) + + } + actualAtemptted, actualFailed := hookTracker.Stat() + assert.Equal(t, test.expectedHookAttempted, actualAtemptted) + assert.Equal(t, test.expectedHookFailed, actualFailed) + }) + } + +} + +func TestRestoreHookTrackerAdd(t *testing.T) { + testCases := []struct { + name string + resourceRestoreHooks []ResourceRestoreHook + pod *corev1api.Pod + hookTracker *HookTracker + expectedCnt int + }{ + { + name: "neither spec hooks nor annotations hooks are set", + resourceRestoreHooks: nil, + pod: builder.ForPod("default", "my-pod").Result(), + hookTracker: NewHookTracker(), + expectedCnt: 0, + }, + { + name: "a hook specified in pod annotation", + resourceRestoreHooks: nil, + pod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + podRestoreHookWaitForReadyAnnotationKey, "true", + )). + Containers(&corev1api.Container{ + Name: "container1", + }). + Result(), + hookTracker: NewHookTracker(), + expectedCnt: 1, + }, + { + name: "two hooks specified in restore spec", + resourceRestoreHooks: []ResourceRestoreHook{ + { + Name: "hook1", + Selector: ResourceHookSelector{}, + RestoreHooks: []velerov1api.RestoreResourceHook{ + { + Exec: &velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + { + Exec: &velerov1api.ExecRestoreHook{ + Container: "container2", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + }, + }, + }, + pod: builder.ForPod("default", "my-pod"). + Containers(&corev1api.Container{ + Name: "container1", + }, &corev1api.Container{ + Name: "container2", + }). + Result(), + hookTracker: NewHookTracker(), + expectedCnt: 2, + }, + { + name: "both spec hooks and annotations hooks are set", + resourceRestoreHooks: []ResourceRestoreHook{ + { + Name: "hook1", + Selector: ResourceHookSelector{}, + RestoreHooks: []velerov1api.RestoreResourceHook{ + { + Exec: &velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo2"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + }, + }, + }, + pod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + podRestoreHookWaitForReadyAnnotationKey, "true", + )). + Containers(&corev1api.Container{ + Name: "container1", + }). + Result(), + hookTracker: NewHookTracker(), + expectedCnt: 1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, _ = GroupRestoreExecHooks(tc.resourceRestoreHooks, tc.pod, velerotest.NewLogger(), tc.hookTracker) + tracker := tc.hookTracker.GetTracker() + assert.Equal(t, tc.expectedCnt, len(tracker)) + }) + } +} diff --git a/internal/hook/wait_exec_hook_handler.go b/internal/hook/wait_exec_hook_handler.go index 04ad967a75..452b8c421c 100644 --- a/internal/hook/wait_exec_hook_handler.go +++ b/internal/hook/wait_exec_hook_handler.go @@ -39,6 +39,7 @@ type WaitExecHookHandler interface { log logrus.FieldLogger, pod *v1.Pod, byContainer map[string][]PodExecRestoreHook, + hookTrack *HookTracker, ) []error } @@ -73,6 +74,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( log logrus.FieldLogger, pod *v1.Pod, byContainer map[string][]PodExecRestoreHook, + hookTracker *HookTracker, ) []error { if pod == nil { return nil @@ -164,6 +166,8 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( err := fmt.Errorf("hook %s in container %s expired before executing", hook.HookName, hook.Hook.Container) hookLog.Error(err) errors = append(errors, err) + hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + if hook.Hook.OnError == velerov1api.HookErrorModeFail { cancel() return @@ -175,10 +179,13 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( OnError: hook.Hook.OnError, Timeout: hook.Hook.ExecTimeout, } + hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), false) if err := e.PodCommandExecutor.ExecutePodCommand(hookLog, podMap, pod.Namespace, pod.Name, hook.HookName, eh); err != nil { hookLog.WithError(err).Error("Error executing hook") err = fmt.Errorf("hook %s in container %s failed to execute, err: %v", hook.HookName, hook.Hook.Container, err) errors = append(errors, err) + hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + if hook.Hook.OnError == velerov1api.HookErrorModeFail { cancel() return @@ -226,6 +233,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( "hookPhase": "post", }, ) + hookTracker.Record(pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) hookLog.Error(err) errors = append(errors, err) } diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index 3e809ccfa5..fe632d1138 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -98,7 +98,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -167,7 +167,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -236,7 +236,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -305,7 +305,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "", - HookSource: "annotation", + HookSource: HookSourceAnnotation, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -391,7 +391,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -440,7 +440,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -471,7 +471,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -502,7 +502,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -533,7 +533,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -574,7 +574,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container1": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container1", Command: []string{"/usr/bin/foo"}, @@ -584,7 +584,7 @@ func TestWaitExecHandleHooks(t *testing.T) { "container2": { { HookName: "my-hook-1", - HookSource: "backupSpec", + HookSource: HookSourceSpec, Hook: velerov1api.ExecRestoreHook{ Container: "container2", Command: []string{"/usr/bin/bar"}, @@ -744,7 +744,8 @@ func TestWaitExecHandleHooks(t *testing.T) { defer ctxCancel() } - errs := h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer) + hookTracker := NewHookTracker() + errs := h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer, hookTracker) // for i, ee := range test.expectedErrors { require.Len(t, errs, len(test.expectedErrors)) @@ -997,3 +998,253 @@ func TestMaxHookWait(t *testing.T) { }) } } + +func TestRestoreHookTrackerUpdate(t *testing.T) { + type change struct { + // delta to wait since last change applied or pod added + wait time.Duration + updated *v1.Pod + } + type expectedExecution struct { + hook *velerov1api.ExecHook + name string + error error + pod *v1.Pod + } + + hookTracker1 := NewHookTracker() + hookTracker1.Add("default", "my-pod", "container1", HookSourceAnnotation, "", hookPhase("")) + + hookTracker2 := NewHookTracker() + hookTracker2.Add("default", "my-pod", "container1", HookSourceSpec, "my-hook-1", hookPhase("")) + + hookTracker3 := NewHookTracker() + hookTracker3.Add("default", "my-pod", "container1", HookSourceSpec, "my-hook-1", hookPhase("")) + hookTracker3.Add("default", "my-pod", "container2", HookSourceSpec, "my-hook-2", hookPhase("")) + + tests1 := []struct { + name string + initialPod *v1.Pod + groupResource string + byContainer map[string][]PodExecRestoreHook + expectedExecutions []expectedExecution + hookTracker *HookTracker + expectedFailed int + }{ + { + name: "a hook executes successfully", + initialPod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + )). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }). + Result(), + groupResource: "pods", + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "", + HookSource: HookSourceAnnotation, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + ExecTimeout: metav1.Duration{Duration: time.Second}, + WaitTimeout: metav1.Duration{Duration: time.Minute}, + }, + }, + }, + }, + expectedExecutions: []expectedExecution{ + { + name: "", + hook: &velerov1api.ExecHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + Timeout: metav1.Duration{Duration: time.Second}, + }, + error: nil, + pod: builder.ForPod("default", "my-pod"). + ObjectMeta(builder.WithResourceVersion("1")). + ObjectMeta(builder.WithAnnotations( + podRestoreHookCommandAnnotationKey, "/usr/bin/foo", + podRestoreHookContainerAnnotationKey, "container1", + podRestoreHookOnErrorAnnotationKey, string(velerov1api.HookErrorModeContinue), + podRestoreHookTimeoutAnnotationKey, "1s", + podRestoreHookWaitTimeoutAnnotationKey, "1m", + )). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }). + Result(), + }, + }, + hookTracker: hookTracker1, + expectedFailed: 0, + }, + { + name: "a hook with OnError mode Fail failed to execute", + groupResource: "pods", + initialPod: builder.ForPod("default", "my-pod"). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + Result(), + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "my-hook-1", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeFail, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + }, + hookTracker: hookTracker2, + expectedFailed: 1, + }, + { + name: "a hook with OnError mode Continue failed to execute", + groupResource: "pods", + initialPod: builder.ForPod("default", "my-pod"). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + Result(), + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "my-hook-1", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + }, + hookTracker: hookTracker2, + expectedFailed: 1, + }, + { + name: "two hooks with OnError mode Continue failed to execute", + groupResource: "pods", + initialPod: builder.ForPod("default", "my-pod"). + Containers(&v1.Container{ + Name: "container1", + }). + Containers(&v1.Container{ + Name: "container2", + }). + // initially both are waiting + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container2", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + Result(), + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "my-hook-1", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + "container2": { + { + HookName: "my-hook-2", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container2", + Command: []string{"/usr/bin/bar"}, + OnError: velerov1api.HookErrorModeContinue, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + }, + hookTracker: hookTracker3, + expectedFailed: 2, + }, + } + + for _, test := range tests1 { + t.Run(test.name, func(t *testing.T) { + + source := fcache.NewFakeControllerSource() + go func() { + // This is the state of the pod that will be seen by the AddFunc handler. + source.Add(test.initialPod) + }() + + podCommandExecutor := &velerotest.MockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + h := &DefaultWaitExecHookHandler{ + PodCommandExecutor: podCommandExecutor, + ListWatchFactory: &fakeListWatchFactory{source}, + } + + for _, e := range test.expectedExecutions { + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(e.pod) + assert.Nil(t, err) + podCommandExecutor.On("ExecutePodCommand", mock.Anything, obj, e.pod.Namespace, e.pod.Name, e.name, e.hook).Return(e.error) + } + + ctx := context.Background() + _ = h.HandleHooks(ctx, velerotest.NewLogger(), test.initialPod, test.byContainer, test.hookTracker) + _, actualFailed := test.hookTracker.Stat() + assert.Equal(t, test.expectedFailed, actualFailed) + }) + } +} diff --git a/pkg/apis/velero/v1/backup_types.go b/pkg/apis/velero/v1/backup_types.go index 1f03b89d87..41ca6c4374 100644 --- a/pkg/apis/velero/v1/backup_types.go +++ b/pkg/apis/velero/v1/backup_types.go @@ -176,13 +176,13 @@ type BackupSpec struct { // +optional DataMover string `json:"datamover,omitempty"` - // BackupConfig defines the configuration for the backup. + // UploaderConfig specifies the configuration for the uploader. // +optional - BackupConfig *BackupConfig `json:"backupConfig,omitempty"` + UploaderConfigForBackup *UploaderConfigForBackup `json:"uploaderConfig,omitempty"` } -// BackupConfig defines the configuration for the backup. -type BackupConfig struct { +// UploaderConfigForBackup defines the configuration for the backup. +type UploaderConfigForBackup struct { // ParallelFilesUpload is the number of files parallel uploads to perform when using the uploader. // +optional ParallelFilesUpload int `json:"parallelFilesUpload,omitempty"` @@ -445,6 +445,11 @@ type BackupStatus struct { // BackupItemAction operations for this backup which ended with an error. // +optional BackupItemOperationsFailed int `json:"backupItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` } // BackupProgress stores information about the progress of a Backup's execution. @@ -462,6 +467,19 @@ type BackupProgress struct { ItemsBackedUp int `json:"itemsBackedUp,omitempty"` } +// HookStatus stores information about the status of the hooks. +type HookStatus struct { + // HooksAttempted is the total number of attempted hooks + // Specifically, HooksAttempted represents the number of hooks that failed to execute + // and the number of hooks that executed successfully. + // +optional + HooksAttempted int `json:"hooksAttempted,omitempty"` + + // HooksFailed is the total number of hooks which ended with an error + // +optional + HooksFailed int `json:"hooksFailed,omitempty"` +} + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true diff --git a/pkg/apis/velero/v1/pod_volume_backup_types.go b/pkg/apis/velero/v1/pod_volume_backup_types.go index affcfaa3bc..cb43c55aa3 100644 --- a/pkg/apis/velero/v1/pod_volume_backup_types.go +++ b/pkg/apis/velero/v1/pod_volume_backup_types.go @@ -55,7 +55,7 @@ type PodVolumeBackupSpec struct { // UploaderSettings are a map of key-value pairs that should be applied to the // uploader configuration. // +optional - UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` + UploaderSettings *map[string]string `json:"uploaderSettings,omitempty"` } // PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup. diff --git a/pkg/apis/velero/v1/pod_volume_restore_type.go b/pkg/apis/velero/v1/pod_volume_restore_type.go index 08d518ddc3..b864d627c7 100644 --- a/pkg/apis/velero/v1/pod_volume_restore_type.go +++ b/pkg/apis/velero/v1/pod_volume_restore_type.go @@ -52,7 +52,7 @@ type PodVolumeRestoreSpec struct { // UploaderSettings are a map of key-value pairs that should be applied to the // uploader configuration. // +optional - UploaderSettings map[string]string `json:"uploaderSettings,omitempty"` + UploaderSettings *map[string]string `json:"uploaderSettings,omitempty"` } // PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore. diff --git a/pkg/apis/velero/v1/restore_types.go b/pkg/apis/velero/v1/restore_types.go index a811fe26e2..06684238ac 100644 --- a/pkg/apis/velero/v1/restore_types.go +++ b/pkg/apis/velero/v1/restore_types.go @@ -124,13 +124,13 @@ type RestoreSpec struct { // +nullable ResourceModifier *v1.TypedLocalObjectReference `json:"resourceModifier,omitempty"` - // RestoreConfig specifies the configuration for the restore. + // UploaderConfig specifies the configuration for the restore. // +optional - RestoreConfig *RestoreConfig `json:"restoreConfig,omitempty"` + UploaderConfigForRestore *UploaderConfigForRestore `json:"uploaderConfig,omitempty"` } -// RestoreConfig defines the configuration for the restore. -type RestoreConfig struct { +// UploaderConfigForRestore defines the configuration for the restore. +type UploaderConfigForRestore struct { // WriteSparseFiles is a flag to indicate whether write files sparsely or not. // +optional WriteSparseFiles bool `json:"writeSparseFiles,omitempty"` @@ -356,6 +356,11 @@ type RestoreStatus struct { // RestoreItemAction operations for this restore which ended with an error. // +optional RestoreItemOperationsFailed int `json:"restoreItemOperationsFailed,omitempty"` + + // HookStatus contains information about the status of the hooks. + // +optional + // +nullable + HookStatus *HookStatus `json:"hookStatus,omitempty"` } // RestoreProgress stores information about the restore's execution progress diff --git a/pkg/apis/velero/v1/zz_generated.deepcopy.go b/pkg/apis/velero/v1/zz_generated.deepcopy.go index ca86bb05c4..42c2e9a0a8 100644 --- a/pkg/apis/velero/v1/zz_generated.deepcopy.go +++ b/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -38,21 +38,6 @@ func (in *Backup) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupConfig) DeepCopyInto(out *BackupConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupConfig. -func (in *BackupConfig) DeepCopy() *BackupConfig { - if in == nil { - return nil - } - out := new(BackupConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupHooks) DeepCopyInto(out *BackupHooks) { *out = *in @@ -396,9 +381,9 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { *out = new(bool) **out = **in } - if in.BackupConfig != nil { - in, out := &in.BackupConfig, &out.BackupConfig - *out = new(BackupConfig) + if in.UploaderConfigForBackup != nil { + in, out := &in.UploaderConfigForBackup, &out.UploaderConfigForBackup + *out = new(UploaderConfigForBackup) **out = **in } } @@ -438,6 +423,11 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { *out = new(BackupProgress) **out = **in } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. @@ -821,6 +811,21 @@ func (in *ExecRestoreHook) DeepCopy() *ExecRestoreHook { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HookStatus) DeepCopyInto(out *HookStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HookStatus. +func (in *HookStatus) DeepCopy() *HookStatus { + if in == nil { + return nil + } + out := new(HookStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InitRestoreHook) DeepCopyInto(out *InitRestoreHook) { *out = *in @@ -973,9 +978,13 @@ func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) { } if in.UploaderSettings != nil { in, out := &in.UploaderSettings, &out.UploaderSettings - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } } } @@ -1079,9 +1088,13 @@ func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) { out.Pod = in.Pod if in.UploaderSettings != nil { in, out := &in.UploaderSettings, &out.UploaderSettings - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } } } @@ -1147,21 +1160,6 @@ func (in *Restore) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RestoreConfig) DeepCopyInto(out *RestoreConfig) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreConfig. -func (in *RestoreConfig) DeepCopy() *RestoreConfig { - if in == nil { - return nil - } - out := new(RestoreConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreHooks) DeepCopyInto(out *RestoreHooks) { *out = *in @@ -1376,9 +1374,9 @@ func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) { *out = new(corev1.TypedLocalObjectReference) (*in).DeepCopyInto(*out) } - if in.RestoreConfig != nil { - in, out := &in.RestoreConfig, &out.RestoreConfig - *out = new(RestoreConfig) + if in.UploaderConfigForRestore != nil { + in, out := &in.UploaderConfigForRestore, &out.UploaderConfigForRestore + *out = new(UploaderConfigForRestore) **out = **in } } @@ -1414,6 +1412,11 @@ func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) { *out = new(RestoreProgress) **out = **in } + if in.HookStatus != nil { + in, out := &in.HookStatus, &out.HookStatus + *out = new(HookStatus) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus. @@ -1673,6 +1676,36 @@ func (in *StorageType) DeepCopy() *StorageType { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploaderConfigForBackup) DeepCopyInto(out *UploaderConfigForBackup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploaderConfigForBackup. +func (in *UploaderConfigForBackup) DeepCopy() *UploaderConfigForBackup { + if in == nil { + return nil + } + out := new(UploaderConfigForBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UploaderConfigForRestore) DeepCopyInto(out *UploaderConfigForRestore) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UploaderConfigForRestore. +func (in *UploaderConfigForRestore) DeepCopy() *UploaderConfigForRestore { + if in == nil { + return nil + } + out := new(UploaderConfigForRestore) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotLocation) DeepCopyInto(out *VolumeSnapshotLocation) { *out = *in diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index ed68862365..70fbf01379 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -302,6 +302,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger, itemHookHandler: &hook.DefaultItemHookHandler{ PodCommandExecutor: kb.podCommandExecutor, }, + hookTracker: hook.NewHookTracker(), } // helper struct to send current progress between the main @@ -427,8 +428,15 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger, updated.Status.Progress.TotalItems = len(backupRequest.BackedUpItems) updated.Status.Progress.ItemsBackedUp = len(backupRequest.BackedUpItems) + // update the hooks execution status + if updated.Status.HookStatus == nil { + updated.Status.HookStatus = &velerov1api.HookStatus{} + } + updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed = itemBackupper.hookTracker.Stat() + log.Infof("hookTracker: %+v, hookAttempted: %d, hookFailed: %d", itemBackupper.hookTracker.GetTracker(), updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed) + if err := kube.PatchResource(backupRequest.Backup, updated, kb.kbClient); err != nil { - log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress") + log.WithError(errors.WithStack((err))).Warn("Got error trying to update backup's status.progress and hook status") } skippedPVSummary, _ := json.Marshal(backupRequest.SkippedPVTracker.Summary()) log.Infof("Summary for skipped PVs: %s", skippedPVSummary) @@ -598,6 +606,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(log logrus.FieldLogger, discoveryHelper: kb.discoveryHelper, itemHookHandler: &hook.NoOpItemHookHandler{}, podVolumeSnapshotTracker: newPVCSnapshotTracker(), + hookTracker: hook.NewHookTracker(), } updateFiles := make(map[string]FileForArchive) backedUpGroupResources := map[schema.GroupResource]bool{} diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index ae8074521b..01258a4aa3 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -78,6 +78,7 @@ type itemBackupper struct { itemHookHandler hook.ItemHookHandler snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter + hookTracker *hook.HookTracker } type FileForArchive struct { @@ -184,7 +185,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti ) log.Debug("Executing pre hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePre); err != nil { + if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePre, ib.hookTracker); err != nil { return false, itemFiles, err } if optedOut, podName := ib.podVolumeSnapshotTracker.OptedoutByPod(namespace, name); optedOut { @@ -234,7 +235,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti // if there was an error running actions, execute post hooks and return log.Debug("Executing post hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost); err != nil { + if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost, ib.hookTracker); err != nil { backupErrs = append(backupErrs, err) } return false, itemFiles, kubeerrs.NewAggregate(backupErrs) @@ -293,7 +294,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti } log.Debug("Executing post hooks") - if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost); err != nil { + if err := ib.itemHookHandler.HandleHooks(log, groupResource, obj, ib.backupRequest.ResourceHooks, hook.PhasePost, ib.hookTracker); err != nil { backupErrs = append(backupErrs, err) } diff --git a/pkg/builder/backup_builder.go b/pkg/builder/backup_builder.go index 4c4a1128dc..daa356222b 100644 --- a/pkg/builder/backup_builder.go +++ b/pkg/builder/backup_builder.go @@ -302,10 +302,10 @@ func (b *BackupBuilder) DataMover(name string) *BackupBuilder { // ParallelFilesUpload sets the Backup's uploader parallel uploads func (b *BackupBuilder) ParallelFilesUpload(parallel int) *BackupBuilder { - if b.object.Spec.BackupConfig == nil { - b.object.Spec.BackupConfig = &velerov1api.BackupConfig{} + if b.object.Spec.UploaderConfigForBackup == nil { + b.object.Spec.UploaderConfigForBackup = &velerov1api.UploaderConfigForBackup{} } - b.object.Spec.BackupConfig.ParallelFilesUpload = parallel + b.object.Spec.UploaderConfigForBackup.ParallelFilesUpload = parallel return b } diff --git a/pkg/builder/restore_builder.go b/pkg/builder/restore_builder.go index b286af4b4d..405df89da5 100644 --- a/pkg/builder/restore_builder.go +++ b/pkg/builder/restore_builder.go @@ -174,6 +174,6 @@ func (b *RestoreBuilder) ItemOperationTimeout(timeout time.Duration) *RestoreBui // WriteSparseFiles sets the Restore's uploader write sparse files func (b *RestoreBuilder) WriteSparseFiles(val bool) *RestoreBuilder { - b.object.Spec.RestoreConfig.WriteSparseFiles = val + b.object.Spec.UploaderConfigForRestore.WriteSparseFiles = val return b } diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index 9b8d835f24..fc5784082d 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -73,6 +73,7 @@ type Options struct { UseVolumeSnapshots bool DefaultRepoMaintenanceFrequency time.Duration GarbageCollectionFrequency time.Duration + PodVolumeOperationTimeout time.Duration Plugins flag.StringArray NoDefaultBackupLocation bool CRDsOnly bool @@ -116,6 +117,7 @@ func (o *Options) BindFlags(flags *pflag.FlagSet) { flags.BoolVar(&o.Wait, "wait", o.Wait, "Wait for Velero deployment to be ready. Optional.") flags.DurationVar(&o.DefaultRepoMaintenanceFrequency, "default-repo-maintain-frequency", o.DefaultRepoMaintenanceFrequency, "How often 'maintain' is run for backup repositories by default. Optional.") flags.DurationVar(&o.GarbageCollectionFrequency, "garbage-collection-frequency", o.GarbageCollectionFrequency, "How often the garbage collection runs for expired backups.(default 1h)") + flags.DurationVar(&o.PodVolumeOperationTimeout, "pod-volume-operation-timeout", o.PodVolumeOperationTimeout, "How long to wait for pod volume operations to complete before timing out(default 4h). Optional.") flags.Var(&o.Plugins, "plugins", "Plugin container images to install into the Velero Deployment") flags.BoolVar(&o.CRDsOnly, "crds-only", o.CRDsOnly, "Only generate CustomResourceDefinition resources. Useful for updating CRDs for an existing Velero install.") flags.StringVar(&o.CACertFile, "cacert", o.CACertFile, "File containing a certificate bundle to use when verifying TLS connections to the object store. Optional.") @@ -209,6 +211,7 @@ func (o *Options) AsVeleroOptions() (*install.VeleroOptions, error) { VSLConfig: o.VolumeSnapshotConfig.Data(), DefaultRepoMaintenanceFrequency: o.DefaultRepoMaintenanceFrequency, GarbageCollectionFrequency: o.GarbageCollectionFrequency, + PodVolumeOperationTimeout: o.PodVolumeOperationTimeout, Plugins: o.Plugins, NoDefaultBackupLocation: o.NoDefaultBackupLocation, CACertData: caCertData, @@ -426,5 +429,9 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er return errors.New("--garbage-collection-frequency must be non-negative") } + if o.PodVolumeOperationTimeout < 0 { + return errors.New("--pod-volume-operation-timeout must be non-negative") + } + return nil } diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 0881d194ff..84e5612703 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -194,7 +194,6 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi Scheme: scheme, NewCache: cache.BuilderWithOptions(cacheOption), }) - if err != nil { cancelFunc() return nil, err @@ -286,13 +285,13 @@ func (s *nodeAgentServer) run() { } dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) - s.markDataUploadsCancel(dataUploadReconciler) + s.attemptDataUploadResume(dataUploadReconciler) if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data upload controller") } dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.kubeClient, s.dataPathMgr, repoEnsurer, credentialGetter, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) - s.markDataDownloadsCancel(dataDownloadReconciler) + s.attemptDataDownloadResume(dataDownloadReconciler) if err = dataDownloadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data download controller") } @@ -366,71 +365,34 @@ func (s *nodeAgentServer) markInProgressCRsFailed() { s.markInProgressPVRsFailed(client) } -func (s *nodeAgentServer) markDataUploadsCancel(r *controller.DataUploadReconciler) { +func (s *nodeAgentServer) attemptDataUploadResume(r *controller.DataUploadReconciler) { // the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()}) if err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to create client") return } - if dataUploads, err := r.FindDataUploads(s.ctx, client, s.namespace); err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to find data uploads") - } else { - for i := range dataUploads { - du := dataUploads[i] - if du.Status.Phase == velerov2alpha1api.DataUploadPhaseAccepted || - du.Status.Phase == velerov2alpha1api.DataUploadPhasePrepared || - du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { - err = controller.UpdateDataUploadWithRetry(s.ctx, client, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, s.logger.WithField("dataupload", du.Name), - func(dataUpload *velerov2alpha1api.DataUpload) { - dataUpload.Spec.Cancel = true - dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) - }) - - if err != nil { - s.logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q cancel", du.GetName()) - continue - } - s.logger.WithField("dataupload", du.GetName()).Warn(du.Status.Message) - } - } + if err := r.AttemptDataUploadResume(s.ctx, client, s.logger.WithField("node", s.nodeName), s.namespace); err != nil { + s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data upload resume") } } -func (s *nodeAgentServer) markDataDownloadsCancel(r *controller.DataDownloadReconciler) { +func (s *nodeAgentServer) attemptDataDownloadResume(r *controller.DataDownloadReconciler) { // the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()}) if err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to create client") return } - if dataDownloads, err := r.FindDataDownloads(s.ctx, client, s.namespace); err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to find data downloads") - } else { - for i := range dataDownloads { - dd := dataDownloads[i] - if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseAccepted || - dd.Status.Phase == velerov2alpha1api.DataDownloadPhasePrepared || - dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { - err = controller.UpdateDataDownloadWithRetry(s.ctx, client, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, s.logger.WithField("datadownload", dd.Name), - func(dataDownload *velerov2alpha1api.DataDownload) { - dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) - }) - - if err != nil { - s.logger.WithError(errors.WithStack(err)).Errorf("failed to mark datadownload %q cancel", dd.GetName()) - continue - } - s.logger.WithField("datadownload", dd.GetName()).Warn(dd.Status.Message) - } - } + + if err := r.AttemptDataDownloadResume(s.ctx, client, s.logger.WithField("node", s.nodeName), s.namespace); err != nil { + s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data download resume") } } func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvbs := &velerov1api.PodVolumeBackupList{} - if err := client.List(s.ctx, pvbs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil { + if err := client.List(s.ctx, pvbs, &ctrlclient.ListOptions{Namespace: s.namespace}); err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups") return } @@ -456,7 +418,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { pvrs := &velerov1api.PodVolumeRestoreList{} - if err := client.List(s.ctx, pvrs, &ctrlclient.MatchingFields{"metadata.namespace": s.namespace}); err != nil { + if err := client.List(s.ctx, pvrs, &ctrlclient.ListOptions{Namespace: s.namespace}); err != nil { s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores") return } diff --git a/pkg/cmd/cli/restore/create.go b/pkg/cmd/cli/restore/create.go index 6b12525393..3a8bb2442a 100644 --- a/pkg/cmd/cli/restore/create.go +++ b/pkg/cmd/cli/restore/create.go @@ -149,7 +149,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.ResourceModifierConfigMap, "resource-modifier-configmap", "", "Reference to the resource modifier configmap that restore will use") - f = flags.VarPF(&o.WriteSparseFiles, "write-sparse-files", "", "Whether to write sparse files when restoring volumes using restic or kopia") + f = flags.VarPF(&o.WriteSparseFiles, "write-sparse-files", "", "Whether to write sparse files when restoring volumes") f.NoOptDefVal = cmd.FALSE } @@ -323,7 +323,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { ItemOperationTimeout: metav1.Duration{ Duration: o.ItemOperationTimeout, }, - RestoreConfig: &api.RestoreConfig{ + UploaderConfigForRestore: &api.UploaderConfigForRestore{ WriteSparseFiles: boolptr.IsSetToTrue(o.WriteSparseFiles.Value), }, }, diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 8842467623..ca8c49c3ce 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -34,6 +34,7 @@ import ( "github.com/spf13/cobra" corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kubeerrs "k8s.io/apimachinery/pkg/util/errors" @@ -990,7 +991,7 @@ func markInProgressCRsFailed(ctx context.Context, cfg *rest.Config, scheme *runt func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, namespace string, log logrus.FieldLogger) { backups := &velerov1api.BackupList{} - if err := client.List(ctx, backups, &ctrlclient.MatchingFields{"metadata.namespace": namespace}); err != nil { + if err := client.List(ctx, backups, &ctrlclient.ListOptions{Namespace: namespace}); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list backups") return } @@ -1015,7 +1016,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, namespace string, log logrus.FieldLogger) { restores := &velerov1api.RestoreList{} - if err := client.List(ctx, restores, &ctrlclient.MatchingFields{"metadata.namespace": namespace}); err != nil { + if err := client.List(ctx, restores, &ctrlclient.ListOptions{Namespace: namespace}); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list restores") return } @@ -1040,7 +1041,12 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, func markDataUploadsCancel(ctx context.Context, client ctrlclient.Client, backup velerov1api.Backup, log logrus.FieldLogger) { dataUploads := &velerov2alpha1api.DataUploadList{} - if err := client.List(ctx, dataUploads, &ctrlclient.MatchingFields{"metadata.namespace": backup.GetNamespace()}, &ctrlclient.MatchingLabels{velerov1api.BackupUIDLabel: string(backup.GetUID())}); err != nil { + if err := client.List(ctx, dataUploads, &ctrlclient.ListOptions{ + Namespace: backup.GetNamespace(), + LabelSelector: labels.Set(map[string]string{ + velerov1api.BackupUIDLabel: string(backup.GetUID()), + }).AsSelector(), + }); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list dataUploads") return } @@ -1070,7 +1076,12 @@ func markDataUploadsCancel(ctx context.Context, client ctrlclient.Client, backup func markDataDownloadsCancel(ctx context.Context, client ctrlclient.Client, restore velerov1api.Restore, log logrus.FieldLogger) { dataDownloads := &velerov2alpha1api.DataDownloadList{} - if err := client.List(ctx, dataDownloads, &ctrlclient.MatchingFields{"metadata.namespace": restore.GetNamespace()}, &ctrlclient.MatchingLabels{velerov1api.RestoreUIDLabel: string(restore.GetUID())}); err != nil { + if err := client.List(ctx, dataDownloads, &ctrlclient.ListOptions{ + Namespace: restore.GetNamespace(), + LabelSelector: labels.Set(map[string]string{ + velerov1api.RestoreUIDLabel: string(restore.GetUID()), + }).AsSelector(), + }); err != nil { log.WithError(errors.WithStack(err)).Error("failed to list dataDownloads") return } diff --git a/pkg/cmd/util/output/backup_describer.go b/pkg/cmd/util/output/backup_describer.go index 8de3da75da..f9bca5dfcf 100644 --- a/pkg/cmd/util/output/backup_describer.go +++ b/pkg/cmd/util/output/backup_describer.go @@ -88,7 +88,7 @@ func DescribeBackup( DescribeResourcePolicies(d, backup.Spec.ResourcePolicy) } - if backup.Spec.BackupConfig != nil && backup.Spec.BackupConfig.ParallelFilesUpload > 0 { + if backup.Spec.UploaderConfigForBackup != nil && backup.Spec.UploaderConfigForBackup.ParallelFilesUpload > 0 { d.Println() DescribeUploaderConfig(d, backup.Spec) } @@ -138,7 +138,7 @@ func DescribeResourcePolicies(d *Describer, resPolicies *v1.TypedLocalObjectRefe // DescribeUploaderConfig describes uploader config in human-readable format func DescribeUploaderConfig(d *Describer, spec velerov1api.BackupSpec) { d.Printf("Uploader config:\n") - d.Printf("\tParallel files upload:\t%d\n", spec.BackupConfig.ParallelFilesUpload) + d.Printf("\tParallel files upload:\t%d\n", spec.UploaderConfigForBackup.ParallelFilesUpload) } // DescribeBackupSpec describes a backup spec in human-readable format. @@ -392,6 +392,12 @@ func DescribeBackupStatus(ctx context.Context, kbClient kbclient.Client, d *Desc } d.Printf("Velero-Native Snapshots: \n") + + if status.HookStatus != nil { + d.Println() + d.Printf("HooksAttempted:\t%d\n", status.HookStatus.HooksAttempted) + d.Printf("HooksFailed:\t%d\n", status.HookStatus.HooksFailed) + } } func describeBackupItemOperations(ctx context.Context, kbClient kbclient.Client, d *Describer, backup *velerov1api.Backup, details bool, insecureSkipTLSVerify bool, caCertPath string) { diff --git a/pkg/cmd/util/output/backup_structured_describer.go b/pkg/cmd/util/output/backup_structured_describer.go index 4a69fc057c..e7de9f776a 100644 --- a/pkg/cmd/util/output/backup_structured_describer.go +++ b/pkg/cmd/util/output/backup_structured_describer.go @@ -303,6 +303,11 @@ func DescribeBackupStatusInSF(ctx context.Context, kbClient kbclient.Client, d * backupStatusInfo["veleroNativeSnapshotsDetail"] = snapshotDetails return } + + if status.HookStatus != nil { + backupStatusInfo["hooksAttempted"] = status.HookStatus.HooksAttempted + backupStatusInfo["hooksFailed"] = status.HookStatus.HooksFailed + } } func describeBackupResourceListInSF(ctx context.Context, kbClient kbclient.Client, backupStatusInfo map[string]interface{}, backup *velerov1api.Backup, insecureSkipTLSVerify bool, caCertPath string) { diff --git a/pkg/cmd/util/output/restore_describer.go b/pkg/cmd/util/output/restore_describer.go index 00812cda5b..5d9273a705 100644 --- a/pkg/cmd/util/output/restore_describer.go +++ b/pkg/cmd/util/output/restore_describer.go @@ -177,14 +177,20 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel d.Println() d.Printf("Preserve Service NodePorts:\t%s\n", BoolPointerString(restore.Spec.PreserveNodePorts, "false", "true", "auto")) - if restore.Spec.RestoreConfig != nil && restore.Spec.RestoreConfig.WriteSparseFiles { + if restore.Spec.UploaderConfigForRestore != nil && restore.Spec.UploaderConfigForRestore.WriteSparseFiles { d.Println() - d.Printf("Write Sparse Files:\t%T\n", restore.Spec.RestoreConfig.WriteSparseFiles) + d.Printf("Write Sparse Files:\t%T\n", restore.Spec.UploaderConfigForRestore.WriteSparseFiles) } d.Println() describeRestoreItemOperations(ctx, kbClient, d, restore, details, insecureSkipTLSVerify, caCertFile) + if restore.Status.HookStatus != nil { + d.Println() + d.Printf("HooksAttempted: \t%d\n", restore.Status.HookStatus.HooksAttempted) + d.Printf("HooksFailed: \t%d\n", restore.Status.HookStatus.HooksFailed) + } + if details { describeRestoreResourceList(ctx, kbClient, d, restore, insecureSkipTLSVerify, caCertFile) d.Println() diff --git a/pkg/cmd/util/output/schedule_describer.go b/pkg/cmd/util/output/schedule_describer.go index 70c397cc07..4c3ff70041 100644 --- a/pkg/cmd/util/output/schedule_describer.go +++ b/pkg/cmd/util/output/schedule_describer.go @@ -48,7 +48,7 @@ func DescribeSchedule(schedule *v1.Schedule) string { DescribeResourcePolicies(d, schedule.Spec.Template.ResourcePolicy) } - if schedule.Spec.Template.BackupConfig != nil && schedule.Spec.Template.BackupConfig.ParallelFilesUpload > 0 { + if schedule.Spec.Template.UploaderConfigForBackup != nil && schedule.Spec.Template.UploaderConfigForBackup.ParallelFilesUpload > 0 { d.Println() DescribeUploaderConfig(d, schedule.Spec.Template) } diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index 2358464230..a57e42fc84 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -140,7 +140,7 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request // to help clear up resources instead of clear them directly in case of some conflict with Expose action if err := UpdateDataDownloadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataDownload *velerov2alpha1api.DataDownload) { dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a dataupload %s/%s is being deleted, mark it as cancel", dd.Namespace, dd.Name) + dataDownload.Status.Message = fmt.Sprintf("found a datadownload %s/%s is being deleted, mark it as cancel", dd.Namespace, dd.Name) }); err != nil { log.Errorf("failed to set cancel flag with error %s for %s/%s", err.Error(), dd.Namespace, dd.Name) return ctrl.Result{}, err @@ -192,7 +192,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request return r.errorOut(ctx, dd, err, "error to expose snapshot", log) } } - log.Info("Restore is exposed") // we need to get CR again for it may canceled by datadownload controller on other @@ -205,7 +204,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request } return ctrl.Result{}, errors.Wrap(err, "getting datadownload") } - // we need to clean up resources as resources created in Expose it may later than cancel action or prepare time // and need to clean up resources again if isDataDownloadInFinalState(dd) { @@ -267,7 +265,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request return r.errorOut(ctx, dd, err, "error to create data path", log) } } - // Update status to InProgress original := dd.DeepCopy() dd.Status.Phase = velerov2alpha1api.DataDownloadPhaseInProgress @@ -335,7 +332,7 @@ func (r *DataDownloadReconciler) runCancelableDataPath(ctx context.Context, fsRe } log.WithField("path", path.ByPath).Info("fs init") - if err := fsRestore.StartRestore(dd.Spec.SnapshotID, path, dd.Spec.DataMoverConfig); err != nil { + if err := fsRestore.StartRestore(dd.Spec.SnapshotID, path, &dd.Spec.DataMoverConfig); err != nil { return r.errorOut(ctx, dd, err, fmt.Sprintf("error starting data path %s restore", path.ByPath), log) } @@ -576,6 +573,51 @@ func (r *DataDownloadReconciler) FindDataDownloads(ctx context.Context, cli clie return dataDownloads, nil } +func (r *DataDownloadReconciler) findAcceptDataDownloadsByNodeLabel(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataDownload, error) { + dataDownloads := &velerov2alpha1api.DataDownloadList{} + if err := cli.List(ctx, dataDownloads, &client.ListOptions{Namespace: ns}); err != nil { + r.logger.WithError(errors.WithStack(err)).Error("failed to list datauploads") + return nil, errors.Wrapf(err, "failed to list datauploads") + } + + var result []velerov2alpha1api.DataDownload + for _, dd := range dataDownloads.Items { + if dd.Status.Phase != velerov2alpha1api.DataDownloadPhaseAccepted { + continue + } + if dd.Labels[acceptNodeLabelKey] == r.nodeName { + result = append(result, dd) + } + } + return result, nil +} + +// CancelAcceptedDataDownload will cancel the accepted data download +func (r *DataDownloadReconciler) CancelAcceptedDataDownload(ctx context.Context, cli client.Client, ns string) { + r.logger.Infof("Canceling accepted data for node %s", r.nodeName) + dataDownloads, err := r.findAcceptDataDownloadsByNodeLabel(ctx, cli, ns) + if err != nil { + r.logger.WithError(err).Error("failed to find data downloads") + return + } + + for _, dd := range dataDownloads { + if dd.Spec.Cancel { + continue + } + err = UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, + r.logger.WithField("dataupload", dd.Name), func(dataDownload *velerov2alpha1api.DataDownload) { + dataDownload.Spec.Cancel = true + dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) + }) + + r.logger.Warn(dd.Status.Message) + if err != nil { + r.logger.WithError(err).Errorf("failed to set cancel flag with error %s", err.Error()) + } + } +} + func (r *DataDownloadReconciler) prepareDataDownload(ssb *velerov2alpha1api.DataDownload) { ssb.Status.Phase = velerov2alpha1api.DataDownloadPhasePrepared ssb.Status.Node = r.nodeName @@ -749,3 +791,35 @@ func UpdateDataDownloadWithRetry(ctx context.Context, client client.Client, name return true, nil }) } + +func (r *DataDownloadReconciler) AttemptDataDownloadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { + if dataDownloads, err := r.FindDataDownloads(ctx, cli, ns); err != nil { + return errors.Wrapf(err, "failed to find data downloads") + } else { + for i := range dataDownloads { + dd := dataDownloads[i] + if dd.Status.Phase == velerov2alpha1api.DataDownloadPhasePrepared { + // keep doing nothing let controller re-download the data + // the Prepared CR could be still handled by datadownload controller after node-agent restart + logger.WithField("datadownload", dd.GetName()).Debug("find a datadownload with status prepared") + } else if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { + err = UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, logger.WithField("datadownload", dd.Name), + func(dataDownload *velerov2alpha1api.DataDownload) { + dataDownload.Spec.Cancel = true + dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) + }) + + if err != nil { + logger.WithError(errors.WithStack(err)).Errorf("failed to mark datadownload %q into canceled", dd.GetName()) + continue + } + logger.WithField("datadownload", dd.GetName()).Debug("mark datadownload into canceled") + } + } + } + + //If the data download is in Accepted status, the expoded PVC may be not created + // so we need to mark the data download as canceled for it may not be recoverable + r.CancelAcceptedDataDownload(ctx, cli, ns) + return nil +} diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index de9fa7516a..afdadf61d2 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -69,7 +69,7 @@ func dataDownloadBuilder() *builder.DataDownloadBuilder { } func initDataDownloadReconciler(objects []runtime.Object, needError ...bool) (*DataDownloadReconciler, error) { - var errs []error = make([]error, 5) + var errs []error = make([]error, 6) for k, isError := range needError { if k == 0 && isError { errs[0] = fmt.Errorf("Get error") @@ -81,6 +81,8 @@ func initDataDownloadReconciler(objects []runtime.Object, needError ...bool) (*D errs[3] = fmt.Errorf("Patch error") } else if k == 4 && isError { errs[4] = apierrors.NewConflict(velerov2alpha1api.Resource("datadownload"), dataDownloadName, errors.New("conflict")) + } else if k == 5 && isError { + errs[5] = fmt.Errorf("List error") } } return initDataDownloadReconcilerWithError(objects, errs...) @@ -116,6 +118,8 @@ func initDataDownloadReconcilerWithError(objects []runtime.Object, needError ... fakeClient.patchError = needError[3] } else if k == 4 { fakeClient.updateConflict = needError[4] + } else if k == 5 { + fakeClient.listError = needError[5] } } @@ -939,3 +943,111 @@ func TestFindDataDownloads(t *testing.T) { }) } } + +func TestAttemptDataDownloadResume(t *testing.T) { + tests := []struct { + name string + dataUploads []velerov2alpha1api.DataDownload + du *velerov2alpha1api.DataDownload + pod *corev1.Pod + needErrs []bool + acceptedDataDownloads []string + prepareddDataDownloads []string + cancelledDataDownloads []string + expectedError bool + }{ + // Test case 1: Process Accepted DataDownload + { + name: "AcceptedDataDownload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), + acceptedDataDownloads: []string{dataDownloadName}, + expectedError: false, + }, + // Test case 2: Cancel an Accepted DataDownload + { + name: "CancelAcceptedDataDownload", + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), + }, + // Test case 3: Process Accepted Prepared DataDownload + { + name: "PreparedDataDownload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + prepareddDataDownloads: []string{dataDownloadName}, + }, + // Test case 4: Process Accepted InProgress DataDownload + { + name: "InProgressDataDownload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + prepareddDataDownloads: []string{dataDownloadName}, + }, + // Test case 5: get resume error + { + name: "ResumeError", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataDownloadLabel: dataDownloadName, + }).Result(), + needErrs: []bool{false, false, false, false, false, true}, + du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + expectedError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.TODO() + r, err := initDataDownloadReconciler(nil, test.needErrs...) + r.nodeName = "node-1" + require.NoError(t, err) + defer func() { + r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) + if test.pod != nil { + r.client.Delete(ctx, test.pod, &kbclient.DeleteOptions{}) + } + }() + + assert.NoError(t, r.client.Create(ctx, test.du)) + if test.pod != nil { + assert.NoError(t, r.client.Create(ctx, test.pod)) + } + // Run the test + err = r.AttemptDataDownloadResume(ctx, r.client, r.logger.WithField("name", test.name), test.du.Namespace) + + if test.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Verify DataDownload marked as Cancelled + for _, duName := range test.cancelledDataDownloads { + dataUpload := &velerov2alpha1api.DataDownload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataDownloadPhaseCanceled, dataUpload.Status.Phase) + } + // Verify DataDownload marked as Accepted + for _, duName := range test.acceptedDataDownloads { + dataUpload := &velerov2alpha1api.DataDownload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataDownloadPhaseAccepted, dataUpload.Status.Phase) + } + // Verify DataDownload marked as Prepared + for _, duName := range test.prepareddDataDownloads { + dataUpload := &velerov2alpha1api.DataDownload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataDownloadPhasePrepared, dataUpload.Status.Phase) + } + } + }) + } +} diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index 46cfdd4935..8d7f9238ac 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -274,7 +274,6 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request) return r.errorOut(ctx, du, err, "error to create data path", log) } } - // Update status to InProgress original := du.DeepCopy() du.Status.Phase = velerov2alpha1api.DataUploadPhaseInProgress @@ -344,7 +343,8 @@ func (r *DataUploadReconciler) runCancelableDataUpload(ctx context.Context, fsBa tags := map[string]string{ velerov1api.AsyncOperationIDLabel: du.Labels[velerov1api.AsyncOperationIDLabel], } - if err := fsBackup.StartBackup(path, fmt.Sprintf("%s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC), "", false, tags, *du.Spec.DataMoverConfig); err != nil { + + if err := fsBackup.StartBackup(path, fmt.Sprintf("%s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC), "", false, tags, du.Spec.DataMoverConfig); err != nil { return r.errorOut(ctx, du, err, "error starting data path backup", log) } @@ -581,7 +581,7 @@ func (r *DataUploadReconciler) findDataUploadForPod(podObj client.Object) []reco return []reconcile.Request{requests} } -func (r *DataUploadReconciler) FindDataUploads(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { +func (r *DataUploadReconciler) FindDataUploadsByPod(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { pods := &corev1.PodList{} var dataUploads []velerov2alpha1api.DataUpload if err := cli.List(ctx, pods, &client.ListOptions{Namespace: ns}); err != nil { @@ -605,6 +605,51 @@ func (r *DataUploadReconciler) FindDataUploads(ctx context.Context, cli client.C return dataUploads, nil } +func (r *DataUploadReconciler) findAcceptDataUploadsByNodeLabel(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { + dataUploads := &velerov2alpha1api.DataUploadList{} + if err := cli.List(ctx, dataUploads, &client.ListOptions{Namespace: ns}); err != nil { + r.logger.WithError(errors.WithStack(err)).Error("failed to list datauploads") + return nil, errors.Wrapf(err, "failed to list datauploads") + } + + var result []velerov2alpha1api.DataUpload + for _, du := range dataUploads.Items { + if du.Status.Phase != velerov2alpha1api.DataUploadPhaseAccepted { + continue + } + if du.Labels[acceptNodeLabelKey] == r.nodeName { + result = append(result, du) + } + } + return result, nil +} + +func (r *DataUploadReconciler) CancelAcceptedDataupload(ctx context.Context, cli client.Client, ns string) { + r.logger.Infof("Reset accepted dataupload for node %s", r.nodeName) + dataUploads, err := r.findAcceptDataUploadsByNodeLabel(ctx, cli, ns) + if err != nil { + r.logger.WithError(err).Error("failed to find dataupload") + return + } + + for _, du := range dataUploads { + if du.Spec.Cancel { + continue + } + err = UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, r.logger.WithField("dataupload", du.Name), + func(dataUpload *velerov2alpha1api.DataUpload) { + dataUpload.Spec.Cancel = true + dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) + }) + + r.logger.WithField("dataupload", du.GetName()).Warn(du.Status.Message) + if err != nil { + r.logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q cancel", du.GetName()) + continue + } + } +} + func (r *DataUploadReconciler) prepareDataUpload(du *velerov2alpha1api.DataUpload) { du.Status.Phase = velerov2alpha1api.DataUploadPhasePrepared du.Status.Node = r.nodeName @@ -833,3 +878,34 @@ func UpdateDataUploadWithRetry(ctx context.Context, client client.Client, namesp return true, nil }) } + +func (r *DataUploadReconciler) AttemptDataUploadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { + if dataUploads, err := r.FindDataUploadsByPod(ctx, cli, ns); err != nil { + return errors.Wrap(err, "failed to find data uploads") + } else { + for _, du := range dataUploads { + if du.Status.Phase == velerov2alpha1api.DataUploadPhasePrepared { + // keep doing nothing let controller re-download the data + // the Prepared CR could be still handled by dataupload controller after node-agent restart + logger.WithField("dataupload", du.GetName()).Debug("find a dataupload with status prepared") + } else if du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { + err = UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, logger.WithField("dataupload", du.Name), + func(dataUpload *velerov2alpha1api.DataUpload) { + dataUpload.Spec.Cancel = true + dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) + }) + + if err != nil { + logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q into canceled", du.GetName()) + continue + } + logger.WithField("dataupload", du.GetName()).Debug("mark dataupload into canceled") + } + } + } + + //If the data upload is in Accepted status, the volume snapshot may be deleted and the exposed pod may not be created + // so we need to mark the data upload as canceled for it may not be recoverable + r.CancelAcceptedDataupload(ctx, cli, ns) + return nil +} diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index 1ef10bf7a2..046dc84c15 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -68,6 +68,7 @@ type FakeClient struct { updateError error patchError error updateConflict error + listError error } func (c *FakeClient) Get(ctx context.Context, key kbclient.ObjectKey, obj kbclient.Object) error { @@ -106,8 +107,16 @@ func (c *FakeClient) Patch(ctx context.Context, obj kbclient.Object, patch kbcli return c.Client.Patch(ctx, obj, patch, opts...) } +func (c *FakeClient) List(ctx context.Context, list kbclient.ObjectList, opts ...kbclient.ListOption) error { + if c.listError != nil { + return c.listError + } + + return c.Client.List(ctx, list, opts...) +} + func initDataUploaderReconciler(needError ...bool) (*DataUploadReconciler, error) { - var errs []error = make([]error, 5) + var errs []error = make([]error, 6) for k, isError := range needError { if k == 0 && isError { errs[0] = fmt.Errorf("Get error") @@ -118,7 +127,9 @@ func initDataUploaderReconciler(needError ...bool) (*DataUploadReconciler, error } else if k == 3 && isError { errs[3] = fmt.Errorf("Patch error") } else if k == 4 && isError { - errs[4] = apierrors.NewConflict(velerov2alpha1api.Resource("datadownload"), dataDownloadName, errors.New("conflict")) + errs[4] = apierrors.NewConflict(velerov2alpha1api.Resource("dataupload"), dataUploadName, errors.New("conflict")) + } else if k == 5 && isError { + errs[5] = fmt.Errorf("List error") } } @@ -198,6 +209,8 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci fakeClient.patchError = needError[3] } else if k == 4 { fakeClient.updateConflict = needError[4] + } else if k == 5 { + fakeClient.listError = needError[5] } } @@ -283,7 +296,7 @@ func (f *fakeDataUploadFSBR) Init(ctx context.Context, bslName string, sourceNam return nil } -func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs map[string]string) error { +func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs *map[string]string) error { du := f.du original := f.du.DeepCopy() du.Status.Phase = velerov2alpha1api.DataUploadPhaseCompleted @@ -293,7 +306,7 @@ func (f *fakeDataUploadFSBR) StartBackup(source datapath.AccessPoint, realSource return nil } -func (f *fakeDataUploadFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error { +func (f *fakeDataUploadFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs *map[string]string) error { return nil } @@ -983,7 +996,7 @@ func TestFindDataUploads(t *testing.T) { require.NoError(t, err) err = r.client.Create(ctx, &test.pod) require.NoError(t, err) - uploads, err := r.FindDataUploads(context.Background(), r.client, "velero") + uploads, err := r.FindDataUploadsByPod(context.Background(), r.client, "velero") if test.expectedError { assert.Error(t, err) @@ -994,3 +1007,110 @@ func TestFindDataUploads(t *testing.T) { }) } } +func TestAttemptDataUploadResume(t *testing.T) { + tests := []struct { + name string + dataUploads []velerov2alpha1api.DataUpload + du *velerov2alpha1api.DataUpload + pod *corev1.Pod + needErrs []bool + acceptedDataUploads []string + prepareddDataUploads []string + cancelledDataUploads []string + expectedError bool + }{ + // Test case 1: Process Accepted DataUpload + { + name: "AcceptedDataUpload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), + acceptedDataUploads: []string{dataUploadName}, + expectedError: false, + }, + // Test case 2: Cancel an Accepted DataUpload + { + name: "CancelAcceptedDataUpload", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), + }, + // Test case 3: Process Accepted Prepared DataUpload + { + name: "PreparedDataUpload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), + prepareddDataUploads: []string{dataUploadName}, + }, + // Test case 4: Process Accepted InProgress DataUpload + { + name: "InProgressDataUpload", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), + prepareddDataUploads: []string{dataUploadName}, + }, + // Test case 5: get resume error + { + name: "ResumeError", + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ + velerov1api.DataUploadLabel: dataUploadName, + }).Result(), + needErrs: []bool{false, false, false, false, false, true}, + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), + expectedError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.TODO() + r, err := initDataUploaderReconciler(test.needErrs...) + r.nodeName = "node-1" + require.NoError(t, err) + defer func() { + r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) + if test.pod != nil { + r.client.Delete(ctx, test.pod, &kbclient.DeleteOptions{}) + } + }() + + assert.NoError(t, r.client.Create(ctx, test.du)) + if test.pod != nil { + assert.NoError(t, r.client.Create(ctx, test.pod)) + } + // Run the test + err = r.AttemptDataUploadResume(ctx, r.client, r.logger.WithField("name", test.name), test.du.Namespace) + + if test.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Verify DataUploads marked as Cancelled + for _, duName := range test.cancelledDataUploads { + dataUpload := &velerov2alpha1api.DataUpload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataUploadPhaseCanceled, dataUpload.Status.Phase) + } + // Verify DataUploads marked as Accepted + for _, duName := range test.acceptedDataUploads { + dataUpload := &velerov2alpha1api.DataUpload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataUploadPhaseAccepted, dataUpload.Status.Phase) + } + // Verify DataUploads marked as Prepared + for _, duName := range test.prepareddDataUploads { + dataUpload := &velerov2alpha1api.DataUpload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataUploadPhasePrepared, dataUpload.Status.Phase) + } + } + }) + } +} diff --git a/pkg/controller/pod_volume_backup_controller_test.go b/pkg/controller/pod_volume_backup_controller_test.go index 852e8d3889..14de4bb093 100644 --- a/pkg/controller/pod_volume_backup_controller_test.go +++ b/pkg/controller/pod_volume_backup_controller_test.go @@ -103,7 +103,7 @@ func (b *fakeFSBR) Init(ctx context.Context, bslName string, sourceNamespace str return nil } -func (b *fakeFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs map[string]string) error { +func (b *fakeFSBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfigs *map[string]string) error { pvb := b.pvb original := b.pvb.DeepCopy() @@ -115,7 +115,7 @@ func (b *fakeFSBR) StartBackup(source datapath.AccessPoint, realSource string, p return nil } -func (b *fakeFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error { +func (b *fakeFSBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs *map[string]string) error { return nil } diff --git a/pkg/datapath/file_system.go b/pkg/datapath/file_system.go index 60a0c8347b..2dfdb9583a 100644 --- a/pkg/datapath/file_system.go +++ b/pkg/datapath/file_system.go @@ -129,7 +129,7 @@ func (fs *fileSystemBR) Close(ctx context.Context) { fs.log.WithField("user", fs.jobName).Info("FileSystemBR is closed") } -func (fs *fileSystemBR) StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfig map[string]string) error { +func (fs *fileSystemBR) StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, uploaderConfig *map[string]string) error { if !fs.initialized { return errors.New("file system data path is not initialized") } @@ -150,7 +150,7 @@ func (fs *fileSystemBR) StartBackup(source AccessPoint, realSource string, paren return nil } -func (fs *fileSystemBR) StartRestore(snapshotID string, target AccessPoint, uploaderConfigs map[string]string) error { +func (fs *fileSystemBR) StartRestore(snapshotID string, target AccessPoint, uploaderConfigs *map[string]string) error { if !fs.initialized { return errors.New("file system data path is not initialized") } diff --git a/pkg/datapath/file_system_test.go b/pkg/datapath/file_system_test.go index 6efb58458e..43866f27f3 100644 --- a/pkg/datapath/file_system_test.go +++ b/pkg/datapath/file_system_test.go @@ -100,7 +100,7 @@ func TestAsyncBackup(t *testing.T) { fs.initialized = true fs.callbacks = test.callbacks - err := fs.StartBackup(AccessPoint{ByPath: test.path}, "", "", false, nil, make(map[string]string)) + err := fs.StartBackup(AccessPoint{ByPath: test.path}, "", "", false, nil, &map[string]string{}) require.Equal(t, nil, err) <-finish @@ -183,7 +183,7 @@ func TestAsyncRestore(t *testing.T) { fs.initialized = true fs.callbacks = test.callbacks - err := fs.StartRestore(test.snapshot, AccessPoint{ByPath: test.path}, make(map[string]string)) + err := fs.StartRestore(test.snapshot, AccessPoint{ByPath: test.path}, &map[string]string{}) require.Equal(t, nil, err) <-finish diff --git a/pkg/datapath/mocks/types.go b/pkg/datapath/mocks/types.go index b3bf4b4229..c765defead 100644 --- a/pkg/datapath/mocks/types.go +++ b/pkg/datapath/mocks/types.go @@ -43,11 +43,11 @@ func (_m *AsyncBR) Init(ctx context.Context, bslName string, sourceNamespace str } // StartBackup provides a mock function with given fields: source, realSource, parentSnapshot, forceFull, tags, dataMoverConfig -func (_m *AsyncBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig map[string]string) error { +func (_m *AsyncBR) StartBackup(source datapath.AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig *map[string]string) error { ret := _m.Called(source, realSource, parentSnapshot, forceFull, tags, dataMoverConfig) var r0 error - if rf, ok := ret.Get(0).(func(datapath.AccessPoint, string, string, bool, map[string]string, map[string]string) error); ok { + if rf, ok := ret.Get(0).(func(datapath.AccessPoint, string, string, bool, map[string]string, *map[string]string) error); ok { r0 = rf(source, realSource, parentSnapshot, forceFull, tags, dataMoverConfig) } else { r0 = ret.Error(0) @@ -56,13 +56,13 @@ func (_m *AsyncBR) StartBackup(source datapath.AccessPoint, realSource string, p return r0 } -// StartRestore provides a mock function with given fields: snapshotID, target, uploaderConfigs -func (_m *AsyncBR) StartRestore(snapshotID string, target datapath.AccessPoint, uploaderConfigs map[string]string) error { - ret := _m.Called(snapshotID, target, uploaderConfigs) +// StartRestore provides a mock function with given fields: snapshotID, target, dataMoverConfig +func (_m *AsyncBR) StartRestore(snapshotID string, target datapath.AccessPoint, dataMoverConfig *map[string]string) error { + ret := _m.Called(snapshotID, target, dataMoverConfig) var r0 error - if rf, ok := ret.Get(0).(func(string, datapath.AccessPoint, map[string]string) error); ok { - r0 = rf(snapshotID, target, uploaderConfigs) + if rf, ok := ret.Get(0).(func(string, datapath.AccessPoint, *map[string]string) error); ok { + r0 = rf(snapshotID, target, dataMoverConfig) } else { r0 = ret.Error(0) } diff --git a/pkg/datapath/types.go b/pkg/datapath/types.go index 9964e9d8f5..15688a0957 100644 --- a/pkg/datapath/types.go +++ b/pkg/datapath/types.go @@ -62,10 +62,10 @@ type AsyncBR interface { Init(ctx context.Context, bslName string, sourceNamespace string, uploaderType string, repositoryType string, repoIdentifier string, repositoryEnsurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter) error // StartBackup starts an asynchronous data path instance for backup - StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig map[string]string) error + StartBackup(source AccessPoint, realSource string, parentSnapshot string, forceFull bool, tags map[string]string, dataMoverConfig *map[string]string) error // StartRestore starts an asynchronous data path instance for restore - StartRestore(snapshotID string, target AccessPoint, uploaderConfigs map[string]string) error + StartRestore(snapshotID string, target AccessPoint, dataMoverConfig *map[string]string) error // Cancel cancels an asynchronous data path instance Cancel() diff --git a/pkg/install/deployment.go b/pkg/install/deployment.go index 5ea680dc16..7c1bd7a81f 100644 --- a/pkg/install/deployment.go +++ b/pkg/install/deployment.go @@ -41,6 +41,7 @@ type podTemplateConfig struct { withSecret bool defaultRepoMaintenanceFrequency time.Duration garbageCollectionFrequency time.Duration + podVolumeOperationTimeout time.Duration plugins []string features []string defaultVolumesToFsBackup bool @@ -115,6 +116,12 @@ func WithGarbageCollectionFrequency(val time.Duration) podTemplateOption { } } +func WithPodVolumeOperationTimeout(val time.Duration) podTemplateOption { + return func(c *podTemplateConfig) { + c.podVolumeOperationTimeout = val + } +} + func WithPlugins(plugins []string) podTemplateOption { return func(c *podTemplateConfig) { c.plugins = plugins @@ -212,6 +219,10 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment args = append(args, fmt.Sprintf("--garbage-collection-frequency=%v", c.garbageCollectionFrequency)) } + if c.podVolumeOperationTimeout > 0 { + args = append(args, fmt.Sprintf("--fs-backup-timeout=%v", c.podVolumeOperationTimeout)) + } + deployment := &appsv1.Deployment{ ObjectMeta: objectMeta(namespace, "velero"), TypeMeta: metav1.TypeMeta{ diff --git a/pkg/install/resources.go b/pkg/install/resources.go index 21aa83ff65..2e9e1bc3e2 100644 --- a/pkg/install/resources.go +++ b/pkg/install/resources.go @@ -246,6 +246,7 @@ type VeleroOptions struct { VSLConfig map[string]string DefaultRepoMaintenanceFrequency time.Duration GarbageCollectionFrequency time.Duration + PodVolumeOperationTimeout time.Duration Plugins []string NoDefaultBackupLocation bool CACertData []byte @@ -335,6 +336,7 @@ func AllResources(o *VeleroOptions) *unstructured.UnstructuredList { WithDefaultRepoMaintenanceFrequency(o.DefaultRepoMaintenanceFrequency), WithServiceAccountName(serviceAccountName), WithGarbageCollectionFrequency(o.GarbageCollectionFrequency), + WithPodVolumeOperationTimeout(o.PodVolumeOperationTimeout), WithUploaderType(o.UploaderType), } diff --git a/pkg/podvolume/backupper.go b/pkg/podvolume/backupper.go index 202a210052..d4f2f4d8e1 100644 --- a/pkg/podvolume/backupper.go +++ b/pkg/podvolume/backupper.go @@ -301,11 +301,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } } - volumeBackup, err := newPodVolumeBackup(backup, pod, volume, repoIdentifier, b.uploaderType, pvc) - if err != nil { - errs = append(errs, errors.Wrapf(err, "error creating PodVolumeBackup for volume %s", volumeName)) - continue - } + volumeBackup := newPodVolumeBackup(backup, pod, volume, repoIdentifier, b.uploaderType, pvc) if err := veleroclient.CreateRetryGenerateName(b.crClient, b.ctx, volumeBackup); err != nil { errs = append(errs, err) continue @@ -358,7 +354,7 @@ func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeC return pv.Spec.HostPath != nil, nil } -func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume corev1api.Volume, repoIdentifier, uploaderType string, pvc *corev1api.PersistentVolumeClaim) (*velerov1api.PodVolumeBackup, error) { +func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume corev1api.Volume, repoIdentifier, uploaderType string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeBackup { pvb := &velerov1api.PodVolumeBackup{ ObjectMeta: metav1.ObjectMeta{ Namespace: backup.Namespace, @@ -415,15 +411,9 @@ func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume c pvb.Spec.Tags["pvc-uid"] = string(pvc.UID) } - if backup.Spec.BackupConfig != nil { - configJSON, err := uploaderconfig.MarshalToPVBConfig(backup.Spec.BackupConfig) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal backup config") - } - pvb.Spec.UploaderSettings = map[string]string{ - uploaderconfig.PodVolumeBackups: configJSON, - } + if backup.Spec.UploaderConfigForBackup != nil { + pvb.Spec.UploaderSettings = uploaderconfig.StoreBackupConfig(backup.Spec.UploaderConfigForBackup) } - return pvb, nil + return pvb } diff --git a/pkg/podvolume/restorer.go b/pkg/podvolume/restorer.go index 0d3c935fc1..93157ccc36 100644 --- a/pkg/podvolume/restorer.go +++ b/pkg/podvolume/restorer.go @@ -177,12 +177,7 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { } } - volumeRestore, err := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, backupInfo.snapshotID, repoIdentifier, backupInfo.uploaderType, data.SourceNamespace, pvc) - if err != nil { - errs = append(errs, errors.Wrapf(err, "error creating PodVolumeRestore for volume %s", volume)) - continue - } - + volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, backupInfo.snapshotID, repoIdentifier, backupInfo.uploaderType, data.SourceNamespace, pvc) if err := veleroclient.CreateRetryGenerateName(r.crClient, r.ctx, volumeRestore); err != nil { errs = append(errs, errors.WithStack(err)) continue @@ -251,7 +246,7 @@ ForEachVolume: return errs } -func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier, uploaderType, sourceNamespace string, pvc *corev1api.PersistentVolumeClaim) (*velerov1api.PodVolumeRestore, error) { +func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier, uploaderType, sourceNamespace string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeRestore { pvr := &velerov1api.PodVolumeRestore{ ObjectMeta: metav1.ObjectMeta{ Namespace: restore.Namespace, @@ -291,18 +286,11 @@ func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backu pvr.Labels[velerov1api.PVCUIDLabel] = string(pvc.UID) } - if restore.Spec.RestoreConfig != nil { - configJSON, err := uploaderconfig.MarshalToPVRConfig(restore.Spec.RestoreConfig) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal restore config") - } - - pvr.Spec.UploaderSettings = map[string]string{ - uploaderconfig.PodVolumeRestores: configJSON, - } + if restore.Spec.UploaderConfigForRestore != nil { + pvr.Spec.UploaderSettings = uploaderconfig.StoreRestoreConfig(restore.Spec.UploaderConfigForRestore) } - return pvr, nil + return pvr } func getVolumesRepositoryType(volumes map[string]volumeBackupInfo) (string, error) { diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index ea0af47c97..4336b853d1 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -325,6 +325,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( resourceModifiers: req.ResourceModifiers, disableInformerCache: req.DisableInformerCache, featureVerifier: kr.featureVerifier, + hookTracker: hook.NewHookTracker(), } return restoreCtx.execute() @@ -377,6 +378,7 @@ type restoreContext struct { resourceModifiers *resourcemodifiers.ResourceModifiers disableInformerCache bool featureVerifier features.Verifier + hookTracker *hook.HookTracker } type resourceClientKey struct { @@ -544,6 +546,23 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { errs.Merge(&e) } + var createdOrUpdatedCRDs bool + for _, restoredItem := range ctx.restoredItems { + if restoredItem.action == itemRestoreResultCreated || restoredItem.action == itemRestoreResultUpdated { + createdOrUpdatedCRDs = true + break + } + } + // If we just restored custom resource definitions (CRDs), refresh + // discovery because the restored CRDs may have created or updated new APIs that + // didn't previously exist in the cluster, and we want to be able to + // resolve & restore instances of them in subsequent loop iterations. + if createdOrUpdatedCRDs { + if err := ctx.discoveryHelper.Refresh(); err != nil { + warnings.Add("", errors.Wrap(err, "refresh discovery after restoring CRDs")) + } + } + // Restore everything else selectedResourceCollection, _, w, e := ctx.getOrderedResourceCollection( backupResources, @@ -629,11 +648,6 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { updated.Status.Progress.TotalItems = len(ctx.restoredItems) updated.Status.Progress.ItemsRestored = len(ctx.restoredItems) - err = kube.PatchResource(ctx.restore, updated, ctx.kbClient) - if err != nil { - ctx.log.WithError(errors.WithStack((err))).Warn("Updating restore status.progress") - } - // Wait for all of the pod volume restore goroutines to be done, which is // only possible once all of their errors have been received by the loop // below, then close the podVolumeErrs channel so the loop terminates. @@ -668,6 +682,19 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { } ctx.log.Info("Done waiting for all post-restore exec hooks to complete") + // update hooks execution status + if updated.Status.HookStatus == nil { + updated.Status.HookStatus = &velerov1api.HookStatus{} + } + updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed = ctx.hookTracker.Stat() + ctx.log.Infof("hookTracker: %+v, hookAttempted: %d, hookFailed: %d", ctx.hookTracker.GetTracker(), updated.Status.HookStatus.HooksAttempted, updated.Status.HookStatus.HooksFailed) + + // patch the restore status + err = kube.PatchResource(ctx.restore, updated, ctx.kbClient) + if err != nil { + ctx.log.WithError(errors.WithStack((err))).Warn("Updating restore status") + } + return warnings, errs } @@ -762,15 +789,6 @@ func (ctx *restoreContext) processSelectedResource( } } - // If we just restored custom resource definitions (CRDs), refresh - // discovery because the restored CRDs may have created new APIs that - // didn't previously exist in the cluster, and we want to be able to - // resolve & restore instances of them in subsequent loop iterations. - if groupResource == kuberesource.CustomResourceDefinitions { - if err := ctx.discoveryHelper.Refresh(); err != nil { - warnings.Add("", errors.Wrap(err, "refresh discovery after restoring CRDs")) - } - } return processedItems, warnings, errs } @@ -1963,6 +1981,7 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) { ctx.resourceRestoreHooks, pod, ctx.log, + ctx.hookTracker, ) if err != nil { ctx.log.WithError(err).Errorf("error getting exec hooks for pod %s/%s", pod.Namespace, pod.Name) @@ -1970,7 +1989,7 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) { return } - if errs := ctx.waitExecHookHandler.HandleHooks(ctx.hooksContext, ctx.log, pod, execHooksByContainer); len(errs) > 0 { + if errs := ctx.waitExecHookHandler.HandleHooks(ctx.hooksContext, ctx.log, pod, execHooksByContainer, ctx.hookTracker); len(errs) > 0 { ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully execute post-restore hooks") ctx.hooksCancelFunc() diff --git a/pkg/uploader/kopia/snapshot.go b/pkg/uploader/kopia/snapshot.go index b89e59febd..9ce0f1fa58 100644 --- a/pkg/uploader/kopia/snapshot.go +++ b/pkg/uploader/kopia/snapshot.go @@ -18,7 +18,6 @@ package kopia import ( "context" - "encoding/json" "fmt" "math" "os" @@ -39,7 +38,7 @@ import ( "github.com/kopia/kopia/snapshot/snapshotfs" "github.com/pkg/errors" - velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/util/uploaderconfig" "github.com/vmware-tanzu/velero/pkg/kopia" "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" @@ -107,21 +106,18 @@ func getDefaultPolicy() *policy.Policy { } } -func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo, uploaderCfg map[string]string) (*policy.Tree, error) { +func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snapshot.SourceInfo, uploaderCfg *map[string]string) (*policy.Tree, error) { // some internal operations from Kopia code retrieves policies from repo directly, so we need to persist the policy to repo - backupCfg := velerov1api.BackupConfig{} - // currently, we only have one uploader config in one uploader config so we can just loop through it - for configItem, jsonConfig := range uploaderCfg { - err := json.Unmarshal([]byte(jsonConfig), &backupCfg) + curPolicy := getDefaultPolicy() + + if uploaderCfg != nil { + uploaderConfig, err := uploaderconfig.GetBackupConfig(uploaderCfg) if err != nil { - return nil, errors.Wrapf(err, "failed to parse %s uploader config", configItem) + return nil, errors.Wrap(err, "failed to get uploader config") + } + if uploaderConfig.ParallelFilesUpload > 0 { + curPolicy.UploadPolicy.MaxParallelFileReads = newOptionalInt(uploaderConfig.ParallelFilesUpload) } - break - } - - curPolicy := getDefaultPolicy() - if backupCfg.ParallelFilesUpload > 0 { - curPolicy.UploadPolicy.MaxParallelFileReads = newOptionalInt(backupCfg.ParallelFilesUpload) } err := setPolicyFunc(ctx, rep, sourceInfo, curPolicy) @@ -145,7 +141,7 @@ func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snap // Backup backup specific sourcePath and update progress func Backup(ctx context.Context, fsUploader SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, - forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { if fsUploader == nil { return nil, false, errors.New("get empty kopia uploader") } @@ -241,7 +237,7 @@ func SnapshotSource( forceFull bool, parentSnapshot string, snapshotTags map[string]string, - uploaderCfg map[string]string, + uploaderCfg *map[string]string, log logrus.FieldLogger, description string, ) (string, int64, error) { @@ -373,7 +369,7 @@ func findPreviousSnapshotManifest(ctx context.Context, rep repo.Repository, sour } // Restore restore specific sourcePath with given snapshotID and update progress -func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, +func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { log.Info("Start to restore...") @@ -396,23 +392,20 @@ func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress, return 0, 0, errors.Wrapf(err, "Unable to resolve path %v", dest) } - restoreCfg := velerov1api.RestoreConfig{} - // currently, we only have one uploader config in map so we can just loop through it - for configItem, jsonConfig := range uploaderCfg { - err := json.Unmarshal([]byte(jsonConfig), &restoreCfg) - if err != nil { - return 0, 0, errors.Wrapf(err, "failed to parse %s uploader config", configItem) - } - break - } - fsOutput := &restore.FilesystemOutput{ TargetPath: path, OverwriteDirectories: true, OverwriteFiles: true, OverwriteSymlinks: true, IgnorePermissionErrors: true, - WriteSparseFiles: restoreCfg.WriteSparseFiles, + } + + if uploaderCfg != nil { + restoreCfg, err := uploaderconfig.GetRestoreConfig(uploaderCfg) + if err != nil { + return 0, 0, errors.Wrap(err, "failed to get uploader config") + } + fsOutput.WriteSparseFiles = restoreCfg.WriteSparseFiles } log.Debugf("Restore filesystem output %v", fsOutput) diff --git a/pkg/uploader/kopia/snapshot_test.go b/pkg/uploader/kopia/snapshot_test.go index f4a751d32b..be73146a7d 100644 --- a/pkg/uploader/kopia/snapshot_test.go +++ b/pkg/uploader/kopia/snapshot_test.go @@ -96,7 +96,7 @@ func TestSnapshotSource(t *testing.T) { testCases := []struct { name string args []mockArgs - uploaderCfg map[string]string + uploaderCfg *map[string]string notError bool }{ { @@ -152,7 +152,7 @@ func TestSnapshotSource(t *testing.T) { notError: false, }, { - name: "set policy with ParallelFilesUpload", + name: "set policy with parallel files upload", args: []mockArgs{ {methodName: "LoadSnapshot", returns: []interface{}{manifest, nil}}, {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}}, @@ -162,8 +162,10 @@ func TestSnapshotSource(t *testing.T) { {methodName: "Upload", returns: []interface{}{manifest, nil}}, {methodName: "Flush", returns: []interface{}{nil}}, }, - uploaderCfg: map[string]string{"ParallelFilesUpload": "10"}, - notError: true, + uploaderCfg: &map[string]string{ + "ParallelFilesUpload": "10", + }, + notError: true, }, { name: "failed to upload snapshot", @@ -645,9 +647,9 @@ func TestBackup(t *testing.T) { var snapshotInfo *uploader.SnapshotInfo var err error if tc.isEmptyUploader { - snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), nil, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, make(map[string]string), tc.tags, &logrus.Logger{}) + snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), nil, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, &map[string]string{}, tc.tags, &logrus.Logger{}) } else { - snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), s.uploderMock, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, make(map[string]string), tc.tags, &logrus.Logger{}) + snapshotInfo, isSnapshotEmpty, err = Backup(context.Background(), s.uploderMock, s.repoWriterMock, tc.sourcePath, "", tc.forceFull, tc.parentSnapshot, tc.volMode, &map[string]string{}, tc.tags, &logrus.Logger{}) } // Check if the returned error matches the expected error if tc.expectedError != nil { @@ -786,7 +788,7 @@ func TestRestore(t *testing.T) { repoWriterMock.On("OpenObject", mock.Anything, mock.Anything).Return(em, nil) progress := new(Progress) - bytesRestored, fileCount, err := Restore(context.Background(), repoWriterMock, progress, tc.snapshotID, tc.dest, tc.volMode, make(map[string]string), logrus.New(), nil) + bytesRestored, fileCount, err := Restore(context.Background(), repoWriterMock, progress, tc.snapshotID, tc.dest, tc.volMode, &map[string]string{}, logrus.New(), nil) // Check if the returned error matches the expected error if tc.expectedError != nil { diff --git a/pkg/uploader/provider/kopia.go b/pkg/uploader/provider/kopia.go index 6d1dbbf722..1c2f955975 100644 --- a/pkg/uploader/provider/kopia.go +++ b/pkg/uploader/provider/kopia.go @@ -119,7 +119,7 @@ func (kp *kopiaProvider) RunBackup( forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, - uploaderCfg map[string]string, + uploaderCfg *map[string]string, updater uploader.ProgressUpdater) (string, bool, error) { if updater == nil { return "", false, errors.New("Need to initial backup progress updater first") @@ -204,7 +204,7 @@ func (kp *kopiaProvider) RunRestore( snapshotID string, volumePath string, volMode uploader.PersistentVolumeMode, - uploaderCfg map[string]string, + uploaderCfg *map[string]string, updater uploader.ProgressUpdater) error { log := kp.log.WithFields(logrus.Fields{ "snapshotID": snapshotID, diff --git a/pkg/uploader/provider/kopia_test.go b/pkg/uploader/provider/kopia_test.go index 507be4ae54..3baa724f6b 100644 --- a/pkg/uploader/provider/kopia_test.go +++ b/pkg/uploader/provider/kopia_test.go @@ -68,34 +68,34 @@ func TestRunBackup(t *testing.T) { testCases := []struct { name string - hookBackupFunc func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) + hookBackupFunc func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) volMode uploader.PersistentVolumeMode notError bool }{ { name: "success to backup", - hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { return &uploader.SnapshotInfo{}, false, nil }, notError: true, }, { name: "get error to backup", - hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { return &uploader.SnapshotInfo{}, false, errors.New("failed to backup") }, notError: false, }, { name: "got empty snapshot", - hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { return nil, true, errors.New("snapshot is empty") }, notError: false, }, { name: "success to backup block mode volume", - hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + hookBackupFunc: func(ctx context.Context, fsUploader kopia.SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { return &uploader.SnapshotInfo{}, false, nil }, volMode: uploader.PersistentVolumeBlock, @@ -108,7 +108,7 @@ func TestRunBackup(t *testing.T) { tc.volMode = uploader.PersistentVolumeFilesystem } BackupFunc = tc.hookBackupFunc - _, _, err := kp.RunBackup(context.Background(), "var", "", nil, false, "", tc.volMode, map[string]string{}, &updater) + _, _, err := kp.RunBackup(context.Background(), "var", "", nil, false, "", tc.volMode, &map[string]string{}, &updater) if tc.notError { assert.NoError(t, err) } else { @@ -125,27 +125,27 @@ func TestRunRestore(t *testing.T) { testCases := []struct { name string - hookRestoreFunc func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) + hookRestoreFunc func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) notError bool volMode uploader.PersistentVolumeMode }{ { name: "normal restore", - hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { return 0, 0, nil }, notError: true, }, { name: "failed to restore", - hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { return 0, 0, errors.New("failed to restore") }, notError: false, }, { name: "normal block mode restore", - hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + hookRestoreFunc: func(ctx context.Context, rep repo.RepositoryWriter, progress *kopia.Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { return 0, 0, nil }, volMode: uploader.PersistentVolumeBlock, @@ -159,7 +159,7 @@ func TestRunRestore(t *testing.T) { tc.volMode = uploader.PersistentVolumeFilesystem } RestoreFunc = tc.hookRestoreFunc - err := kp.RunRestore(context.Background(), "", "/var", tc.volMode, map[string]string{}, &updater) + err := kp.RunRestore(context.Background(), "", "/var", tc.volMode, &map[string]string{}, &updater) if tc.notError { assert.NoError(t, err) } else { diff --git a/pkg/uploader/provider/mocks/Provider.go b/pkg/uploader/provider/mocks/Provider.go index 7651431b2c..16efccf112 100644 --- a/pkg/uploader/provider/mocks/Provider.go +++ b/pkg/uploader/provider/mocks/Provider.go @@ -30,28 +30,28 @@ func (_m *Provider) Close(ctx context.Context) error { } // RunBackup provides a mock function with given fields: ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater -func (_m *Provider) RunBackup(ctx context.Context, path string, realSource string, tags map[string]string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, updater uploader.ProgressUpdater) (string, bool, error) { +func (_m *Provider) RunBackup(ctx context.Context, path string, realSource string, tags map[string]string, forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg *map[string]string, updater uploader.ProgressUpdater) (string, bool, error) { ret := _m.Called(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater) var r0 string var r1 bool var r2 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) (string, bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) (string, bool, error)); ok { return rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater) } - if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) string); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) string); ok { r0 = rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater) } else { r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) bool); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) bool); ok { r1 = rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater) } else { r1 = ret.Get(1).(bool) } - if rf, ok := ret.Get(2).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, string, string, map[string]string, bool, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) error); ok { r2 = rf(ctx, path, realSource, tags, forceFull, parentSnapshot, volMode, uploaderCfg, updater) } else { r2 = ret.Error(2) @@ -61,11 +61,11 @@ func (_m *Provider) RunBackup(ctx context.Context, path string, realSource strin } // RunRestore provides a mock function with given fields: ctx, snapshotID, volumePath, volMode, uploaderConfig, updater -func (_m *Provider) RunRestore(ctx context.Context, snapshotID string, volumePath string, volMode uploader.PersistentVolumeMode, uploaderConfig map[string]string, updater uploader.ProgressUpdater) error { +func (_m *Provider) RunRestore(ctx context.Context, snapshotID string, volumePath string, volMode uploader.PersistentVolumeMode, uploaderConfig *map[string]string, updater uploader.ProgressUpdater) error { ret := _m.Called(ctx, snapshotID, volumePath, volMode, uploaderConfig, updater) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, uploader.PersistentVolumeMode, map[string]string, uploader.ProgressUpdater) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, string, uploader.PersistentVolumeMode, *map[string]string, uploader.ProgressUpdater) error); ok { r0 = rf(ctx, snapshotID, volumePath, volMode, uploaderConfig, updater) } else { r0 = ret.Error(0) diff --git a/pkg/uploader/provider/provider.go b/pkg/uploader/provider/provider.go index 20a3dc4368..4e01a2e187 100644 --- a/pkg/uploader/provider/provider.go +++ b/pkg/uploader/provider/provider.go @@ -49,7 +49,7 @@ type Provider interface { forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, - uploaderCfg map[string]string, + uploaderCfg *map[string]string, updater uploader.ProgressUpdater) (string, bool, error) // RunRestore which will do restore for one specific volume with given snapshot id and return error // updater is used for updating backup progress which implement by third-party @@ -58,7 +58,7 @@ type Provider interface { snapshotID string, volumePath string, volMode uploader.PersistentVolumeMode, - uploaderConfig map[string]string, + uploaderConfig *map[string]string, updater uploader.ProgressUpdater) error // Close which will close related repository Close(ctx context.Context) error diff --git a/pkg/uploader/provider/restic.go b/pkg/uploader/provider/restic.go index 2f16970a97..a1a30b092a 100644 --- a/pkg/uploader/provider/restic.go +++ b/pkg/uploader/provider/restic.go @@ -18,7 +18,6 @@ package provider import ( "context" - "encoding/json" "fmt" "os" "strings" @@ -32,6 +31,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/filesystem" + "github.com/vmware-tanzu/velero/pkg/util/uploaderconfig" ) // resticBackupCMDFunc and resticRestoreCMDFunc are mainly used to make testing more convenient @@ -123,7 +123,7 @@ func (rp *resticProvider) RunBackup( forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, - uploaderCfg map[string]string, + uploaderCfg *map[string]string, updater uploader.ProgressUpdater) (string, bool, error) { if updater == nil { return "", false, errors.New("Need to initial backup progress updater first") @@ -146,18 +146,15 @@ func (rp *resticProvider) RunBackup( "parentSnapshot": parentSnapshot, }) - backupCfg := velerov1api.BackupConfig{} - // currently, we only have one uploader config in one uploader config so we can just loop through it - for configItem, jsonConfig := range uploaderCfg { - err := json.Unmarshal([]byte(jsonConfig), &backupCfg) + if uploaderCfg != nil { + uploaderConfig, err := uploaderconfig.GetBackupConfig(uploaderCfg) if err != nil { - return "", false, errors.Wrapf(err, "failed to parse %s config", configItem) + return "", false, errors.Wrap(err, "failed to get uploader config") } - break - } - if backupCfg.ParallelFilesUpload > 0 { - log.Warnf("ParallelFilesUpload is set to %d, but restic does not support parallel file uploads. Ignoring.", backupCfg.ParallelFilesUpload) + if uploaderConfig.ParallelFilesUpload > 0 { + log.Warnf("ParallelFilesUpload is set to %d, but restic does not support parallel file uploads. Ignoring.", uploaderConfig.ParallelFilesUpload) + } } backupCmd := resticBackupCMDFunc(rp.repoIdentifier, rp.credentialsFile, path, tags) @@ -201,7 +198,7 @@ func (rp *resticProvider) RunRestore( snapshotID string, volumePath string, volMode uploader.PersistentVolumeMode, - uploaderCfg map[string]string, + uploaderCfg *map[string]string, updater uploader.ProgressUpdater) error { if updater == nil { return errors.New("Need to initial backup progress updater first") @@ -222,11 +219,13 @@ func (rp *resticProvider) RunRestore( restoreCmd.ExtraFlags = append(restoreCmd.ExtraFlags, rp.extraFlags...) } - extraFlags, err := rp.parseRestoreExtraFlags(uploaderCfg) - if err != nil { - return errors.Wrap(err, "failed to parse uploader config") - } else if len(extraFlags) != 0 { - restoreCmd.ExtraFlags = append(restoreCmd.ExtraFlags, extraFlags...) + if uploaderCfg != nil { + extraFlags, err := rp.parseRestoreExtraFlags(uploaderCfg) + if err != nil { + return errors.Wrap(err, "failed to parse uploader config") + } else if len(extraFlags) != 0 { + restoreCmd.ExtraFlags = append(restoreCmd.ExtraFlags, extraFlags...) + } } stdout, stderr, err := restic.RunRestore(restoreCmd, log, updater) @@ -235,19 +234,14 @@ func (rp *resticProvider) RunRestore( return err } -func (rp *resticProvider) parseRestoreExtraFlags(uploaderCfg map[string]string) ([]string, error) { +func (rp *resticProvider) parseRestoreExtraFlags(uploaderCfg *map[string]string) ([]string, error) { extraFlags := []string{} - restoreCfg := velerov1api.RestoreConfig{} - // currently, we only have one uploader config in map so we can just loop through it - for configItem, jsonConfig := range uploaderCfg { - err := json.Unmarshal([]byte(jsonConfig), &restoreCfg) - if err != nil { - return extraFlags, errors.Wrapf(err, "failed to parse %s uploader config", configItem) - } - break + uploaderConfig, err := uploaderconfig.GetRestoreConfig(uploaderCfg) + if err != nil { + return extraFlags, errors.Wrap(err, "failed to get uploader config") } - if restoreCfg.WriteSparseFiles { + if uploaderConfig.WriteSparseFiles { extraFlags = append(extraFlags, "--sparse") } return extraFlags, nil diff --git a/pkg/uploader/provider/restic_test.go b/pkg/uploader/provider/restic_test.go index 389f1ee632..77e576c988 100644 --- a/pkg/uploader/provider/restic_test.go +++ b/pkg/uploader/provider/restic_test.go @@ -38,7 +38,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/util/filesystem" - "github.com/vmware-tanzu/velero/pkg/util/uploaderconfig" ) func TestResticRunBackup(t *testing.T) { @@ -151,9 +150,9 @@ func TestResticRunBackup(t *testing.T) { } if !tc.nilUpdater { updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} - _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, &updater) + _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, &map[string]string{}, &updater) } else { - _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, map[string]string{}, nil) + _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, &map[string]string{}, nil) } tc.rp.log.Infof("test name %v error %v", tc.name, err) @@ -224,9 +223,9 @@ func TestResticRunRestore(t *testing.T) { var err error if !tc.nilUpdater { updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} - err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, map[string]string{}, &updater) + err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, &map[string]string{}, &updater) } else { - err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, map[string]string{}, nil) + err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, &map[string]string{}, nil) } tc.rp.log.Infof("test name %v error %v", tc.name, err) @@ -418,20 +417,20 @@ func TestParseUploaderConfig(t *testing.T) { testCases := []struct { name string - uploaderConfig map[string]string + uploaderConfig *map[string]string expectedFlags []string }{ { name: "SparseFilesEnabled", - uploaderConfig: map[string]string{ - uploaderconfig.PodVolumeRestores: `{"WriteSparseFiles": true}`, + uploaderConfig: &map[string]string{ + "WriteSparseFiles": "true", }, expectedFlags: []string{"--sparse"}, }, { name: "SparseFilesDisabled", - uploaderConfig: map[string]string{ - uploaderconfig.PodVolumeRestores: `{"WriteSparseFiles": false}`, + uploaderConfig: &map[string]string{ + "writeSparseFiles": "false", }, expectedFlags: []string{}, }, diff --git a/pkg/util/uploaderconfig/uploaderconfig.go b/pkg/util/uploaderconfig/uploaderconfig.go index 6fba47930d..fdd6d67ef9 100644 --- a/pkg/util/uploaderconfig/uploaderconfig.go +++ b/pkg/util/uploaderconfig/uploaderconfig.go @@ -1,7 +1,7 @@ package uploaderconfig import ( - "encoding/json" + "strconv" "github.com/pkg/errors" @@ -9,74 +9,42 @@ import ( ) const ( - PodVolumeBackups = "PodVolumeBackups" - PodVolumeRestores = "PodVolumeRestores" - DataUploads = "DataUploads" - DataDownloads = "DataDownloads" + parallelFilesUpload = "ParallelFilesUpload" + writeSparseFiles = "WriteSparseFiles" ) -type PVBConfig struct { - ParallelFilesUpload int `json:"parallelFilesUpload,omitempty"` +func StoreBackupConfig(config *velerov1api.UploaderConfigForBackup) *map[string]string { + data := make(map[string]string) + data[parallelFilesUpload] = strconv.Itoa(config.ParallelFilesUpload) + return &data } -type PVRConfig struct { - WriteSparseFiles bool `json:"writeSparseFiles,omitempty"` +func StoreRestoreConfig(config *velerov1api.UploaderConfigForRestore) *map[string]string { + data := make(map[string]string) + data[writeSparseFiles] = strconv.FormatBool(config.WriteSparseFiles) + return &data } -func MarshalToPVBConfig(backupConfig *velerov1api.BackupConfig) (string, error) { - jsonData, err := json.Marshal(backupConfig) - if err != nil { - return "", errors.Wrap(err, "failed to marshal backup config") - } - - var pvb PVBConfig - err = json.Unmarshal(jsonData, &pvb) - if err != nil { - return "", errors.Wrap(err, "failed to unmarshal backup config") - } - - finalJSONData, err := json.Marshal(pvb) - if err != nil { - return "", errors.Wrap(err, "failed to marshal backup config") - } - - return string(finalJSONData), nil -} - -func MarshalToPVRConfig(restoreConfig *velerov1api.RestoreConfig) (string, error) { - jsonData, err := json.Marshal(restoreConfig) - if err != nil { - return "", errors.Wrap(err, "failed to marshal restore config") - } - - var pvr PVRConfig - err = json.Unmarshal(jsonData, &pvr) - if err != nil { - return "", errors.Wrap(err, "failed to unmarshal restore config") - } - - finalJSONData, err := json.Marshal(pvr) - if err != nil { - return "", errors.Wrap(err, "failed to marshal restore config") - } - - return string(finalJSONData), nil -} - -func ParseBackupConfig(str string) (velerov1api.BackupConfig, error) { - var config velerov1api.BackupConfig - err := json.Unmarshal([]byte(str), &config) - if err != nil { - return velerov1api.BackupConfig{}, err +func GetBackupConfig(data *map[string]string) (velerov1api.UploaderConfigForBackup, error) { + config := velerov1api.UploaderConfigForBackup{} + var err error + if item, ok := (*data)[parallelFilesUpload]; ok { + config.ParallelFilesUpload, err = strconv.Atoi(item) + if err != nil { + return velerov1api.UploaderConfigForBackup{}, errors.Wrap(err, "failed to parse ParallelFilesUpload") + } } return config, nil } -func ParseRestoreConfig(str string) (velerov1api.RestoreConfig, error) { - var config velerov1api.RestoreConfig - err := json.Unmarshal([]byte(str), &config) - if err != nil { - return velerov1api.RestoreConfig{}, err +func GetRestoreConfig(data *map[string]string) (velerov1api.UploaderConfigForRestore, error) { + config := velerov1api.UploaderConfigForRestore{} + var err error + if item, ok := (*data)[writeSparseFiles]; ok { + config.WriteSparseFiles, err = strconv.ParseBool(item) + if err != nil { + return velerov1api.UploaderConfigForRestore{}, errors.Wrap(err, "failed to parse WriteSparseFiles") + } } return config, nil } diff --git a/pkg/util/uploaderconfig/uploaderconfig_test.go b/pkg/util/uploaderconfig/uploaderconfig_test.go new file mode 100644 index 0000000000..85ab2ec320 --- /dev/null +++ b/pkg/util/uploaderconfig/uploaderconfig_test.go @@ -0,0 +1,95 @@ +package uploaderconfig + +import ( + "reflect" + "strings" + "testing" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +func TestStoreBackupConfig(t *testing.T) { + config := &velerov1api.UploaderConfigForBackup{ + ParallelFilesUpload: 3, + } + + expectedData := map[string]string{ + parallelFilesUpload: "3", + } + + result := StoreBackupConfig(config) + + if !reflect.DeepEqual(*result, expectedData) { + t.Errorf("Expected: %v, but got: %v", expectedData, *result) + } +} + +func TestStoreRestoreConfig(t *testing.T) { + config := &velerov1api.UploaderConfigForRestore{ + WriteSparseFiles: true, + } + + expectedData := map[string]string{ + writeSparseFiles: "true", + } + + result := StoreRestoreConfig(config) + + if !reflect.DeepEqual(*result, expectedData) { + t.Errorf("Expected: %v, but got: %v", expectedData, *result) + } +} + +func TestGetBackupConfig(t *testing.T) { + data := &map[string]string{ + parallelFilesUpload: "3", + } + + expectedConfig := velerov1api.UploaderConfigForBackup{ + ParallelFilesUpload: 3, + } + + result, err := GetBackupConfig(data) + + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(result, expectedConfig) { + t.Errorf("Expected: %v, but got: %v", expectedConfig, result) + } + + // Test error case + (*data)[parallelFilesUpload] = "invalid" + _, err = GetBackupConfig(data) + if !strings.Contains(err.Error(), "failed to parse ParallelFilesUpload") { + t.Errorf("Expected error message containing 'failed to parse ParallelFilesUpload', but got: %v", err) + } +} + +func TestGetRestoreConfig(t *testing.T) { + data := &map[string]string{ + writeSparseFiles: "true", + } + + expectedConfig := velerov1api.UploaderConfigForRestore{ + WriteSparseFiles: true, + } + + result, err := GetRestoreConfig(data) + + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(result, expectedConfig) { + t.Errorf("Expected: %v, but got: %v", expectedConfig, result) + } + + // Test error case + (*data)[writeSparseFiles] = "invalid" + _, err = GetRestoreConfig(data) + if !strings.Contains(err.Error(), "failed to parse WriteSparseFiles") { + t.Errorf("Expected error message containing 'failed to parse WriteSparseFiles', but got: %v", err) + } +} diff --git a/site/content/docs/main/custom-plugins.md b/site/content/docs/main/custom-plugins.md index a4c13d0f48..703de3c494 100644 --- a/site/content/docs/main/custom-plugins.md +++ b/site/content/docs/main/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.0.0/plugins.md b/site/content/docs/v1.0.0/plugins.md index e9024fcce9..0005bff8e7 100644 --- a/site/content/docs/v1.0.0/plugins.md +++ b/site/content/docs/v1.0.0/plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.1.0/plugins.md b/site/content/docs/v1.1.0/plugins.md index 617b711e5a..0b9d409972 100644 --- a/site/content/docs/v1.1.0/plugins.md +++ b/site/content/docs/v1.1.0/plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.10/custom-plugins.md b/site/content/docs/v1.10/custom-plugins.md index 5fe168d75f..c26698dc01 100644 --- a/site/content/docs/v1.10/custom-plugins.md +++ b/site/content/docs/v1.10/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.11/custom-plugins.md b/site/content/docs/v1.11/custom-plugins.md index 6024fecc15..fce6282e80 100644 --- a/site/content/docs/v1.11/custom-plugins.md +++ b/site/content/docs/v1.11/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.2.0/custom-plugins.md b/site/content/docs/v1.2.0/custom-plugins.md index b7d42019ff..96c1bb04f7 100644 --- a/site/content/docs/v1.2.0/custom-plugins.md +++ b/site/content/docs/v1.2.0/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.3.0/custom-plugins.md b/site/content/docs/v1.3.0/custom-plugins.md index 0451bbd633..10752a8b60 100644 --- a/site/content/docs/v1.3.0/custom-plugins.md +++ b/site/content/docs/v1.3.0/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.3.1/custom-plugins.md b/site/content/docs/v1.3.1/custom-plugins.md index b9c21f31f5..894a187fab 100644 --- a/site/content/docs/v1.3.1/custom-plugins.md +++ b/site/content/docs/v1.3.1/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.3.2/custom-plugins.md b/site/content/docs/v1.3.2/custom-plugins.md index baa5b26d0b..9a5b9fe618 100644 --- a/site/content/docs/v1.3.2/custom-plugins.md +++ b/site/content/docs/v1.3.2/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.4/custom-plugins.md b/site/content/docs/v1.4/custom-plugins.md index 075d9a77af..6bbf5863d7 100644 --- a/site/content/docs/v1.4/custom-plugins.md +++ b/site/content/docs/v1.4/custom-plugins.md @@ -15,7 +15,7 @@ When naming your plugin, keep in mind that the name needs to conform to these ru - have two parts separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same name cannot not already exist +- a plugin with the same name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.5/custom-plugins.md b/site/content/docs/v1.5/custom-plugins.md index 36aa6eb07d..989aeec418 100644 --- a/site/content/docs/v1.5/custom-plugins.md +++ b/site/content/docs/v1.5/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.6/custom-plugins.md b/site/content/docs/v1.6/custom-plugins.md index 7968ff4ad1..167584eb0c 100644 --- a/site/content/docs/v1.6/custom-plugins.md +++ b/site/content/docs/v1.6/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.7/custom-plugins.md b/site/content/docs/v1.7/custom-plugins.md index dbb82d5a30..38bbb246e7 100644 --- a/site/content/docs/v1.7/custom-plugins.md +++ b/site/content/docs/v1.7/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.8/custom-plugins.md b/site/content/docs/v1.8/custom-plugins.md index c4dbc03e14..e84ee777f8 100644 --- a/site/content/docs/v1.8/custom-plugins.md +++ b/site/content/docs/v1.8/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.9/custom-plugins.md b/site/content/docs/v1.9/custom-plugins.md index 8f92ec17ba..403f60d4ad 100644 --- a/site/content/docs/v1.9/custom-plugins.md +++ b/site/content/docs/v1.9/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/test/e2e/backup/backup.go b/test/e2e/backup/backup.go index 52dc7ad8fb..923781e258 100644 --- a/test/e2e/backup/backup.go +++ b/test/e2e/backup/backup.go @@ -31,15 +31,33 @@ import ( . "github.com/vmware-tanzu/velero/test/util/velero" ) +type BackupRestoreTestConfig struct { + useVolumeSnapshots bool + kibishiiPatchSubDir string + isRetainPVTest bool +} + func BackupRestoreWithSnapshots() { - BackupRestoreTest(true) + config := BackupRestoreTestConfig{true, "", false} + BackupRestoreTest(config) } func BackupRestoreWithRestic() { - BackupRestoreTest(false) + config := BackupRestoreTestConfig{false, "", false} + BackupRestoreTest(config) +} + +func BackupRestoreRetainedPVWithSnapshots() { + config := BackupRestoreTestConfig{true, "overlays/sc-reclaim-policy/", true} + BackupRestoreTest(config) +} + +func BackupRestoreRetainedPVWithRestic() { + config := BackupRestoreTestConfig{false, "overlays/sc-reclaim-policy/", true} + BackupRestoreTest(config) } -func BackupRestoreTest(useVolumeSnapshots bool) { +func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) { var ( backupName, restoreName, kibishiiNamespace string @@ -48,25 +66,34 @@ func BackupRestoreTest(useVolumeSnapshots bool) { veleroCfg VeleroConfig ) provideSnapshotVolumesParmInBackup = false + useVolumeSnapshots := backupRestoreTestConfig.useVolumeSnapshots BeforeEach(func() { veleroCfg = VeleroCfg + + veleroCfg.KibishiiDirectory = veleroCfg.KibishiiDirectory + backupRestoreTestConfig.kibishiiPatchSubDir veleroCfg.UseVolumeSnapshots = useVolumeSnapshots veleroCfg.UseNodeAgent = !useVolumeSnapshots if useVolumeSnapshots && veleroCfg.CloudProvider == "kind" { Skip("Volume snapshots not supported on kind") } + var err error flag.Parse() UUIDgen, err = uuid.NewRandom() kibishiiNamespace = "k-" + UUIDgen.String() Expect(err).To(Succeed()) + DeleteStorageClass(context.Background(), *veleroCfg.ClientToInstallVelero, KibishiiStorageClassName) }) AfterEach(func() { if !veleroCfg.Debug { By("Clean backups after test", func() { DeleteAllBackups(context.Background(), *veleroCfg.ClientToInstallVelero) + if backupRestoreTestConfig.isRetainPVTest { + CleanAllRetainedPV(context.Background(), *veleroCfg.ClientToInstallVelero) + } + DeleteStorageClass(context.Background(), *veleroCfg.ClientToInstallVelero, KibishiiStorageClassName) }) if veleroCfg.InstallVelero { ctx, ctxCancel := context.WithTimeout(context.Background(), time.Minute*5) @@ -106,6 +133,9 @@ func BackupRestoreTest(useVolumeSnapshots bool) { }) It("should successfully back up and restore to an additional BackupStorageLocation with unique credentials", func() { + if backupRestoreTestConfig.isRetainPVTest { + Skip("It's tested by 1st test case") + } if veleroCfg.AdditionalBSLProvider == "" { Skip("no additional BSL provider given, not running multiple BackupStorageLocation with unique credentials tests") } diff --git a/test/e2e/basic/namespace-mapping.go b/test/e2e/basic/namespace-mapping.go index ea2a8f53a7..dbf98c1f92 100644 --- a/test/e2e/basic/namespace-mapping.go +++ b/test/e2e/basic/namespace-mapping.go @@ -102,7 +102,7 @@ func (n *NamespaceMapping) Verify() error { n.kibishiiData.Levels = len(*n.NSIncluded) + index By(fmt.Sprintf("Verify workload %s after restore ", ns), func() { Expect(KibishiiVerifyAfterRestore(n.Client, ns, - n.Ctx, n.kibishiiData)).To(Succeed(), "Fail to verify workload after restore") + n.Ctx, n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) } for _, ns := range *n.NSIncluded { diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index b4cb6b22a3..f14601924f 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -28,6 +28,7 @@ import ( "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" + "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/e2e/backup" . "github.com/vmware-tanzu/velero/test/e2e/backups" @@ -49,6 +50,7 @@ import ( ) func init() { + VeleroCfg.Options = &install.Options{} flag.StringVar(&VeleroCfg.CloudProvider, "cloud-provider", "", "cloud that Velero will be installed into. Required.") flag.StringVar(&VeleroCfg.ObjectStoreProvider, "object-store-provider", "", "provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.") flag.StringVar(&VeleroCfg.BSLBucket, "bucket", "", "name of the object storage bucket where backups from e2e tests should be stored. Required.") @@ -102,6 +104,10 @@ var _ = Describe("[Basic][Restic] Velero tests on cluster using the plugin provi var _ = Describe("[Basic][Snapshot] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreWithSnapshots) +var _ = Describe("[Basic][Snapshot][RetainPV] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreRetainedPVWithSnapshots) + +var _ = Describe("[Basic][Restic][RetainPV] Velero tests on cluster using the plugin provider for object storage and snapshots for volume backups", BackupRestoreRetainedPVWithRestic) + var _ = Describe("[Basic][ClusterResource] Backup/restore of cluster resources", ResourcesCheckTest) var _ = Describe("[Scale][LongTime] Backup/restore of 2500 namespaces", MultiNSBackupRestore) diff --git a/test/e2e/migration/migration.go b/test/e2e/migration/migration.go index a1a5e895c9..da808ba92c 100644 --- a/test/e2e/migration/migration.go +++ b/test/e2e/migration/migration.go @@ -273,15 +273,16 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) } By(fmt.Sprintf("Install Velero in cluster-B (%s) to restore workload", veleroCfg.StandbyCluster), func() { + //Ensure workload of "migrationNamespace" existed in cluster-A ns, err := GetNamespace(context.Background(), *veleroCfg.DefaultClient, migrationNamespace) Expect(ns.Name).To(Equal(migrationNamespace)) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("get namespace in cluster-B err: %v", err)) + //Ensure cluster-B is the target cluster Expect(KubectlConfigUseContext(context.Background(), veleroCfg.StandbyCluster)).To(Succeed()) _, err = GetNamespace(context.Background(), *veleroCfg.StandbyClient, migrationNamespace) Expect(err).To(HaveOccurred()) strings.Contains(fmt.Sprint(err), "namespaces \""+migrationNamespace+"\" not found") - fmt.Println(err) veleroCfg.ClientToInstallVelero = veleroCfg.StandbyClient @@ -335,7 +336,7 @@ func MigrationTest(useVolumeSnapshots bool, veleroCLI2Version VeleroCLI2Version) By(fmt.Sprintf("Verify workload %s after restore ", migrationNamespace), func() { Expect(KibishiiVerifyAfterRestore(*veleroCfg.StandbyClient, migrationNamespace, - oneHourTimeout, &KibishiiData)).To(Succeed(), "Fail to verify workload after restore") + oneHourTimeout, &KibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) // TODO: delete backup created by case self, not all diff --git a/test/e2e/pv-backup/pv-backup-filter.go b/test/e2e/pv-backup/pv-backup-filter.go index d8de42dd2e..556dfeb702 100644 --- a/test/e2e/pv-backup/pv-backup-filter.go +++ b/test/e2e/pv-backup/pv-backup-filter.go @@ -180,7 +180,7 @@ func fileContent(namespace, podName, volume string) string { } func fileExist(ctx context.Context, namespace, podName, volume string) error { - c, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) + c, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) if err != nil { return errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s ", FILE_NAME, volume, podName, namespace)) @@ -195,7 +195,7 @@ func fileExist(ctx context.Context, namespace, podName, volume string) error { } } func fileNotExist(ctx context.Context, namespace, podName, volume string) error { - _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) + _, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME) if err != nil { return nil } else { diff --git a/test/e2e/resourcepolicies/resource_policies.go b/test/e2e/resourcepolicies/resource_policies.go index 6f98c5ebda..df96bc3d91 100644 --- a/test/e2e/resourcepolicies/resource_policies.go +++ b/test/e2e/resourcepolicies/resource_policies.go @@ -24,7 +24,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/pkg/errors" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -164,7 +163,7 @@ func (r *ResourcePoliciesCase) Verify() error { if vol.Name != volName { continue } - content, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) + content, _, err := ReadFileFromPodVolume(r.Ctx, ns, pod.Name, "container-busybox", vol.Name, FileName) if i%2 == 0 { Expect(err).To(HaveOccurred(), "Expected file not found") // File should not exist } else { diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 6fd4c40ed3..c9e9af90bb 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -29,7 +29,6 @@ import ( . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/util/k8s" . "github.com/vmware-tanzu/velero/test/util/kibishii" - . "github.com/vmware-tanzu/velero/test/util/providers" . "github.com/vmware-tanzu/velero/test/util/velero" ) @@ -256,7 +255,7 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() { Expect(KibishiiVerifyAfterRestore(*veleroCfg.ClientToInstallVelero, upgradeNamespace, - oneHourTimeout, DefaultKibishiiData)).To(Succeed(), "Fail to verify workload after restore") + oneHourTimeout, DefaultKibishiiData, "")).To(Succeed(), "Fail to verify workload after restore") }) }) }) diff --git a/test/perf/Makefile b/test/perf/Makefile index f30ee5b995..843ccab87e 100644 --- a/test/perf/Makefile +++ b/test/perf/Makefile @@ -76,6 +76,17 @@ NFS_SERVER_PATH ?= UPLOADER_TYPE ?= TEST_CASE_DESCRIBE ?= 'velero performance test' BACKUP_FOR_RESTORE ?= +Delete_Cluster_Resource ?= false +Debug_Velero_Pod_Restart ?= false +NODE_AGENT_POD_CPU_LIMIT ?= 4 +NODE_AGENT_POD_MEM_LIMIT ?= 4Gi +NODE_AGENT_POD_CPU_REQUEST ?= 2 +NODE_AGENT_POD_MEM_REQUEST ?= 2Gi +VELERO_POD_CPU_LIMIT ?= 4 +VELERO_POD_MEM_LIMIT ?= 4Gi +VELERO_POD_CPU_REQUEST ?= 2 +VELERO_POD_MEM_REQUEST ?= 2Gi +POD_VOLUME_OPERATION_TIMEOUT ?= 6h .PHONY:ginkgo ginkgo: # Make sure ginkgo is in $GOPATH/bin @@ -110,7 +121,18 @@ run: ginkgo -uploader-type=$(UPLOADER_TYPE) \ -nfs-server-path=$(NFS_SERVER_PATH) \ -test-case-describe=$(TEST_CASE_DESCRIBE) \ - -backup-for-restore=$(BACKUP_FOR_RESTORE) + -backup-for-restore=$(BACKUP_FOR_RESTORE) \ + -delete-cluster-resource=$(Delete_Cluster_Resource) \ + -debug-velero-pod-restart=$(Debug_Velero_Pod_Restart) \ + -node-agent-pod-cpu-limit=$(NODE_AGENT_POD_CPU_LIMIT) \ + -node-agent-pod-mem-limit=$(NODE_AGENT_POD_MEM_LIMIT) \ + -node-agent-pod-cpu-request=$(NODE_AGENT_POD_CPU_REQUEST) \ + -node-agent-pod-mem-request=$(NODE_AGENT_POD_MEM_REQUEST) \ + -velero-pod-cpu-limit=$(VELERO_POD_CPU_LIMIT) \ + -velero-pod-mem-limit=$(VELERO_POD_MEM_LIMIT) \ + -velero-pod-cpu-request=$(VELERO_POD_CPU_REQUEST) \ + -velero-pod-mem-request=$(VELERO_POD_MEM_REQUEST) \ + -pod-volume-operation-timeout=$(POD_VOLUME_OPERATION_TIMEOUT) build: ginkgo mkdir -p $(OUTPUT_DIR) diff --git a/test/perf/backup/backup.go b/test/perf/backup/backup.go index 7f9f35de08..3a3c059a56 100644 --- a/test/perf/backup/backup.go +++ b/test/perf/backup/backup.go @@ -32,7 +32,7 @@ type BackupTest struct { func (b *BackupTest) Init() error { b.TestCase.Init() - b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) b.CaseBaseName = "backup" b.BackupName = "backup-" + b.CaseBaseName + "-" + b.UUIDgen diff --git a/test/perf/basic/basic.go b/test/perf/basic/basic.go index 80c6b02185..76bf605a68 100644 --- a/test/perf/basic/basic.go +++ b/test/perf/basic/basic.go @@ -18,12 +18,14 @@ package basic import ( "context" - "fmt" "strings" "time" + "github.com/pkg/errors" + . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/perf/test" + "github.com/vmware-tanzu/velero/test/util/k8s" ) type BasicTest struct { @@ -32,7 +34,7 @@ type BasicTest struct { func (b *BasicTest) Init() error { b.TestCase.Init() - b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + b.Ctx, b.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) b.CaseBaseName = "backuprestore" b.BackupName = "backup-" + b.CaseBaseName + "-" + b.UUIDgen b.RestoreName = "restore-" + b.CaseBaseName + "-" + b.UUIDgen @@ -49,10 +51,20 @@ func (b *BasicTest) Init() error { "--from-backup", b.BackupName, "--wait", } + if !VeleroCfg.DeleteClusterResource { + joinedNsMapping, err := k8s.GetMappingNamespaces(b.Ctx, b.Client, *b.NSExcluded) + if err != nil { + return errors.Wrapf(err, "failed to get mapping namespaces in init") + } + + b.RestoreArgs = append(b.RestoreArgs, "--namespace-mappings") + b.RestoreArgs = append(b.RestoreArgs, joinedNsMapping) + } + b.TestMsg = &TestMSG{ Desc: "Do backup and restore resources for performance test", FailedMSG: "Failed to backup and restore resources", - Text: fmt.Sprintf("Should backup and restore resources success"), + Text: "Should backup and restore resources success", } return nil } diff --git a/test/perf/e2e_suite_test.go b/test/perf/e2e_suite_test.go index 4d3275dec1..57599ec364 100644 --- a/test/perf/e2e_suite_test.go +++ b/test/perf/e2e_suite_test.go @@ -21,12 +21,14 @@ import ( "flag" "fmt" "testing" + "time" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" "github.com/pkg/errors" + "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" . "github.com/vmware-tanzu/velero/test" "github.com/vmware-tanzu/velero/test/perf/backup" @@ -39,6 +41,7 @@ import ( ) func init() { + VeleroCfg.Options = &install.Options{} flag.StringVar(&VeleroCfg.CloudProvider, "cloud-provider", "", "cloud that Velero will be installed into. Required.") flag.StringVar(&VeleroCfg.ObjectStoreProvider, "object-store-provider", "", "provider of object store plugin. Required if cloud-provider is kind, otherwise ignored.") flag.StringVar(&VeleroCfg.BSLBucket, "bucket", "", "name of the object storage bucket where backups from e2e tests should be stored. Required.") @@ -56,6 +59,15 @@ func init() { flag.BoolVar(&VeleroCfg.InstallVelero, "install-velero", true, "install/uninstall velero during the test. Optional.") flag.BoolVar(&VeleroCfg.UseNodeAgent, "use-node-agent", true, "whether deploy node agent daemonset velero during the test. Optional.") flag.StringVar(&VeleroCfg.RegistryCredentialFile, "registry-credential-file", "", "file containing credential for the image registry, follows the same format rules as the ~/.docker/config.json file. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodCPULimit, "node-agent-pod-cpu-limit", "4", "CPU limit for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodMemLimit, "node-agent-pod-mem-limit", "4Gi", "Memory limit for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodCPURequest, "node-agent-pod-cpu-request", "2", "CPU request for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.NodeAgentPodMemRequest, "node-agent-pod-mem-request", "2Gi", "Memory request for node agent pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodCPULimit, "velero-pod-cpu-limit", "4", "CPU limit for velero pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodMemLimit, "velero-pod-mem-limit", "4Gi", "Memory limit for velero pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodCPURequest, "velero-pod-cpu-request", "2", "CPU request for velero pod. Optional.") + flag.StringVar(&VeleroCfg.VeleroPodMemRequest, "velero-pod-mem-request", "2Gi", "Memory request for velero pod. Optional.") + flag.DurationVar(&VeleroCfg.PodVolumeOperationTimeout, "pod-volume-operation-timeout", 360*time.Minute, "Timeout for pod volume operations. Optional.") //vmware-tanzu-experiments flag.StringVar(&VeleroCfg.Features, "features", "", "Comma-separated list of features to enable for this Velero process.") flag.StringVar(&VeleroCfg.DefaultCluster, "default-cluster-context", "", "Default cluster context for migration test.") @@ -65,6 +77,8 @@ func init() { flag.StringVar(&VeleroCfg.NFSServerPath, "nfs-server-path", "", "the path of nfs server") flag.StringVar(&VeleroCfg.TestCaseDescribe, "test-case-describe", "velero performance test", "the description for the current test") flag.StringVar(&VeleroCfg.BackupForRestore, "backup-for-restore", "", "the name of backup for restore") + flag.BoolVar(&VeleroCfg.DeleteClusterResource, "delete-cluster-resource", false, "delete cluster resource after test") + flag.BoolVar(&VeleroCfg.DebugVeleroPodRestart, "debug-velero-pod-restart", false, "Switch for debugging velero pod restart.") } func initConfig() error { diff --git a/test/perf/metrics/pod.go b/test/perf/metrics/pod.go index f341fe918a..56572f6728 100644 --- a/test/perf/metrics/pod.go +++ b/test/perf/metrics/pod.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/pkg/errors" @@ -29,6 +30,7 @@ import ( ) const PodResourceDesc = "Resource consumption" +const PodMetricsTimeout = 5 * time.Minute type PodMetrics struct { Client *metricsclientset.Clientset @@ -39,31 +41,31 @@ type PodMetrics struct { } func (p *PodMetrics) Update() error { - cpu, mem, err := metrics.GetPodUsageMetrics(p.Ctx, p.Client, p.PodName, p.Namespace) + cpu, mem, err := metrics.GetPodUsageMetrics(p.Ctx, p.Client, p.PodName, p.Namespace, PodMetricsTimeout) if err != nil { return errors.WithStack(err) - } else { - keyMaxCPU := p.PodName + ":MaxCPU" - curCPU := cpu.MilliValue() - if curCPU > p.Metrics[keyMaxCPU] { - p.Metrics[keyMaxCPU] = curCPU - } + } + keyMaxCPU := p.PodName + ":MaxCPU" + curCPU := cpu.MilliValue() + if curCPU > p.Metrics[keyMaxCPU] { + p.Metrics[keyMaxCPU] = curCPU + } - keyMaxMem := p.PodName + ":MaxMemory" - curMem := mem.MilliValue() - if curMem > p.Metrics[keyMaxMem] { - p.Metrics[keyMaxMem] = curMem - } + keyMaxMem := p.PodName + ":MaxMemory" + curMem := mem.MilliValue() + if curMem > p.Metrics[keyMaxMem] { + p.Metrics[keyMaxMem] = curMem + } - keyAvgCPU := p.PodName + ":AverageCPU" - preAvgCPU := p.Metrics[keyAvgCPU] - p.Metrics[keyAvgCPU] = (preAvgCPU*p.count + curCPU) / (p.count + 1) + keyAvgCPU := p.PodName + ":AverageCPU" + preAvgCPU := p.Metrics[keyAvgCPU] + p.Metrics[keyAvgCPU] = (preAvgCPU*p.count + curCPU) / (p.count + 1) + + keyAvgMem := p.PodName + ":AverageMemory" + preAvgMem := p.Metrics[keyAvgMem] + p.Metrics[keyAvgMem] = (preAvgMem*p.count + curMem) / (p.count + 1) + p.count++ - keyAvgMem := p.PodName + ":AverageMemory" - preAvgMem := p.Metrics[keyAvgMem] - p.Metrics[keyAvgMem] = (preAvgMem*p.count + curMem) / (p.count + 1) - p.count++ - } return nil } diff --git a/test/perf/metrics/time.go b/test/perf/metrics/time.go index 3334cbb297..aa760389d2 100644 --- a/test/perf/metrics/time.go +++ b/test/perf/metrics/time.go @@ -16,40 +16,53 @@ limitations under the License. package metrics -import "time" +import ( + "fmt" + "time" +) const TimeCaseDesc = "Time cost" +type TimeSpan struct { + Start time.Time + End time.Time +} + type TimeMetrics struct { Name string - TimeInfo map[string]time.Time // metric name : start timestamp - Metrics map[string]float64 // metric name : time duration + TimeInfo map[string]TimeSpan // metric name : start timestamp } func (t *TimeMetrics) GetMetrics() map[string]string { tmpMetrics := make(map[string]string) - for k, v := range t.Metrics { - duration := time.Duration(v) * time.Second - tmpMetrics[k] = duration.String() + for k, v := range t.TimeInfo { + duration := v.End.Sub(v.Start) + if duration < time.Second { + // For those too shoter time difference we should ignored + // as it may not really execute the logic + continue + } + tmpMetrics[k] = duration.String() + fmt.Sprintf(" (%s - %s)", v.Start.Format(time.RFC3339), v.End.Format(time.RFC3339)) } return tmpMetrics } func (t *TimeMetrics) Start(name string) { - t.TimeInfo[name] = time.Now() + t.TimeInfo[name] = TimeSpan{ + Start: time.Now(), + } } func (t *TimeMetrics) End(name string) { - t.Metrics[name] = time.Now().Sub(t.TimeInfo[name]).Seconds() - if t.Metrics[name] < 1 { - // For those too shoter time difference we should ignored - // as it may not really execute the logic - delete(t.Metrics, name) + if _, ok := t.TimeInfo[name]; !ok { + return } + timeSpan := t.TimeInfo[name] + timeSpan.End = time.Now() + t.TimeInfo[name] = timeSpan } func (t *TimeMetrics) Update() error { - t.Metrics[t.Name] = time.Now().Sub(t.TimeInfo[t.Name]).Seconds() return nil } diff --git a/test/perf/restore/restore.go b/test/perf/restore/restore.go index 025ef49865..f07d5df4d6 100644 --- a/test/perf/restore/restore.go +++ b/test/perf/restore/restore.go @@ -25,6 +25,7 @@ import ( . "github.com/vmware-tanzu/velero/test" . "github.com/vmware-tanzu/velero/test/perf/test" + "github.com/vmware-tanzu/velero/test/util/k8s" . "github.com/vmware-tanzu/velero/test/util/velero" ) @@ -34,7 +35,7 @@ type RestoreTest struct { func (r *RestoreTest) Init() error { r.TestCase.Init() - r.Ctx, r.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + r.Ctx, r.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) r.CaseBaseName = "restore" r.RestoreName = "restore-" + r.CaseBaseName + "-" + r.UUIDgen @@ -43,7 +44,7 @@ func (r *RestoreTest) Init() error { FailedMSG: "Failed to restore resources", Text: fmt.Sprintf("Should restore resources success"), } - return r.clearUpResourcesBeforRestore() + return nil } func (r *RestoreTest) clearUpResourcesBeforRestore() error { @@ -52,6 +53,11 @@ func (r *RestoreTest) clearUpResourcesBeforRestore() error { } func (r *RestoreTest) Restore() error { + // we need to clear up all resources before do the restore test + err := r.clearUpResourcesBeforRestore() + if err != nil { + return errors.Wrapf(err, "failed to clear up resources before do the restore test") + } var backupName string if VeleroCfg.BackupForRestore != "" { backupName = VeleroCfg.BackupForRestore @@ -71,6 +77,16 @@ func (r *RestoreTest) Restore() error { "--from-backup", r.BackupName, "--wait", } + if !VeleroCfg.DeleteClusterResource { + joinedNsMapping, err := k8s.GetMappingNamespaces(r.Ctx, r.Client, *r.NSExcluded) + if err != nil { + return errors.Wrapf(err, "failed to get mapping namespaces in init") + } + + r.RestoreArgs = append(r.RestoreArgs, "--namespace-mappings") + r.RestoreArgs = append(r.RestoreArgs, joinedNsMapping) + } + return r.TestCase.Restore() } func (r *RestoreTest) Destroy() error { diff --git a/test/perf/test/test.go b/test/perf/test/test.go index 9aed01bb27..c7f80e3fea 100644 --- a/test/perf/test/test.go +++ b/test/perf/test/test.go @@ -97,14 +97,15 @@ func TestFunc(test VeleroBackupRestoreTest) func() { } func (t *TestCase) Init() error { - t.Ctx, t.CtxCancel = context.WithTimeout(context.Background(), 1*time.Hour) + t.Ctx, t.CtxCancel = context.WithTimeout(context.Background(), 6*time.Hour) t.NSExcluded = &[]string{"kube-system", "velero", "default", "kube-public", "kube-node-lease"} t.UUIDgen = t.GenerateUUID() t.Client = *VeleroCfg.DefaultClient t.timer = &metrics.TimeMetrics{ - Name: "Total time cost", - TimeInfo: map[string]time.Time{"Total time cost": time.Now()}, - Metrics: make(map[string]float64), + Name: "Total time cost", + TimeInfo: map[string]metrics.TimeSpan{"Total time cost": { + Start: time.Now(), + }}, } return nil } @@ -131,10 +132,12 @@ func (t *TestCase) Backup() error { } func (t *TestCase) Destroy() error { - By(fmt.Sprintf("Start to destroy namespace %s......", t.CaseBaseName), func() { - Expect(CleanupNamespacesFiterdByExcludes(t.GetTestCase().Ctx, t.Client, *t.NSExcluded)).To(Succeed(), "Could cleanup retrieve namespaces") - Expect(ClearClaimRefForFailedPVs(t.Ctx, t.Client)).To(Succeed(), "Failed to make PV status become to available") - }) + if VeleroCfg.DeleteClusterResource { + By(fmt.Sprintf("Start to destroy namespace %s......", t.CaseBaseName), func() { + Expect(CleanupNamespacesFiterdByExcludes(t.GetTestCase().Ctx, t.Client, *t.NSExcluded)).To(Succeed(), "Could cleanup retrieve namespaces") + Expect(ClearClaimRefForFailedPVs(t.Ctx, t.Client)).To(Succeed(), "Failed to make PV status become to available") + }) + } return nil } @@ -160,7 +163,7 @@ func (t *TestCase) Verify() error { } func (t *TestCase) Clean() error { - if !VeleroCfg.Debug { + if !VeleroCfg.Debug || VeleroCfg.DeleteClusterResource { By("Clean backups and restore after test", func() { if len(t.BackupArgs) != 0 { if err := VeleroBackupDelete(t.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, t.BackupName); err != nil { @@ -269,8 +272,7 @@ func (t *TestCase) MonitorMetircs(ctx context.Context, collectors *metrics.Metri timeMetrics := &metrics.TimeMetrics{ Name: t.CaseBaseName, - TimeInfo: make(map[string]time.Time), - Metrics: make(map[string]float64), + TimeInfo: make(map[string]metrics.TimeSpan), } collectors.RegisterOneTimeMetric(timeMetrics) diff --git a/test/types.go b/test/types.go index 327139f35a..360c904735 100644 --- a/test/types.go +++ b/test/types.go @@ -21,6 +21,7 @@ import ( "github.com/google/uuid" + "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" . "github.com/vmware-tanzu/velero/test/util/k8s" ) @@ -40,6 +41,7 @@ var ReportData *Report type VeleroConfig struct { VeleroCfgInPerf + *install.Options VeleroCLI string VeleroImage string VeleroVersion string @@ -66,7 +68,6 @@ type VeleroConfig struct { AddBSLPlugins string InstallVelero bool KibishiiDirectory string - Features string Debug bool GCFrequency string DefaultCluster string @@ -74,12 +75,7 @@ type VeleroConfig struct { ClientToInstallVelero *TestClient DefaultClient *TestClient StandbyClient *TestClient - UploaderType string - UseNodeAgent bool - UseRestic bool ProvideSnapshotsVolumeParam bool - DefaultVolumesToFsBackup bool - UseVolumeSnapshots bool VeleroServerDebugMode bool SnapshotMoveData bool DataMoverPlugin string @@ -90,9 +86,10 @@ type VeleroConfig struct { } type VeleroCfgInPerf struct { - NFSServerPath string - TestCaseDescribe string - BackupForRestore string + NFSServerPath string + TestCaseDescribe string + BackupForRestore string + DeleteClusterResource bool } type SnapshotCheckPoint struct { diff --git a/test/util/csi/common.go b/test/util/csi/common.go index e96e865b00..932646f0cf 100644 --- a/test/util/csi/common.go +++ b/test/util/csi/common.go @@ -21,14 +21,12 @@ import ( "fmt" "strings" - "github.com/pkg/errors" - snapshotterClientSet "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - . "github.com/vmware-tanzu/velero/test/util/k8s" ) @@ -128,6 +126,7 @@ func GetCsiSnapshotHandleV1(client TestClient, backupName string) ([]string, err } return snapshotHandleList, nil } + func GetVolumeSnapshotContentNameByPod(client TestClient, podName, namespace, backupName string) (string, error) { pvcList, err := GetPvcByPVCName(context.Background(), namespace, podName) if err != nil { diff --git a/test/util/k8s/common.go b/test/util/k8s/common.go index ed579cb77d..da439f24c7 100644 --- a/test/util/k8s/common.go +++ b/test/util/k8s/common.go @@ -104,7 +104,6 @@ func GetPvcByPVCName(ctx context.Context, namespace, pvcName string) ([]string, Args: []string{"{print $1}"}, } cmds = append(cmds, cmd) - return common.GetListByCmdPipes(ctx, cmds) } @@ -279,15 +278,30 @@ func CreateFileToPod(ctx context.Context, namespace, podName, containerName, vol fmt.Printf("Kubectl exec cmd =%v\n", cmd) return cmd.Run() } -func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, error) { +func FileExistInPV(ctx context.Context, namespace, podName, containerName, volume, filename string) (bool, error) { + stdout, stderr, err := ReadFileFromPodVolume(ctx, namespace, podName, containerName, volume, filename) + + output := fmt.Sprintf("%s:%s", stdout, stderr) + if strings.Contains(output, fmt.Sprintf("/%s/%s: No such file or directory", volume, filename)) { + return false, nil + } else { + if err == nil { + return true, nil + } else { + return false, errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s", + filename, volume, podName, namespace)) + } + } +} +func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, string, error) { arg := []string{"exec", "-n", namespace, "-c", containerName, podName, "--", "cat", fmt.Sprintf("/%s/%s", volume, filename)} cmd := exec.CommandContext(ctx, "kubectl", arg...) fmt.Printf("Kubectl exec cmd =%v\n", cmd) stdout, stderr, err := veleroexec.RunCommand(cmd) - fmt.Print(stdout) - fmt.Print(stderr) - return stdout, err + fmt.Printf("stdout: %s\n", stdout) + fmt.Printf("stderr: %s\n", stderr) + return stdout, stderr, err } func RunCommand(cmdName string, arg []string) string { diff --git a/test/util/k8s/namespace.go b/test/util/k8s/namespace.go index e056dc9905..3c76867560 100644 --- a/test/util/k8s/namespace.go +++ b/test/util/k8s/namespace.go @@ -194,3 +194,42 @@ func NamespaceShouldNotExist(ctx context.Context, client TestClient, namespace s } return nil } + +func GetBackupNamespaces(ctx context.Context, client TestClient, excludeNS []string) ([]string, error) { + namespaces, err := client.ClientGo.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrap(err, "Could not retrieve namespaces") + } + var backupNamespaces []string + for _, checkNamespace := range namespaces.Items { + isExclude := false + for k := range excludeNS { + if checkNamespace.Name == excludeNS[k] { + isExclude = true + } + } + if !isExclude { + backupNamespaces = append(backupNamespaces, checkNamespace.Name) + } + } + return backupNamespaces, nil +} + +func GetMappingNamespaces(ctx context.Context, client TestClient, excludeNS []string) (string, error) { + ns, err := GetBackupNamespaces(ctx, client, excludeNS) + if err != nil { + return "", errors.Wrap(err, "Could not retrieve namespaces") + } else if len(ns) == 0 { + return "", errors.Wrap(err, "Get empty namespaces in backup") + } + + nsMapping := []string{} + for _, n := range ns { + nsMapping = append(nsMapping, n+":mapping-"+n) + } + joinedNsMapping := strings.Join(nsMapping, ",") + if len(joinedNsMapping) > 0 { + joinedNsMapping = joinedNsMapping[:len(joinedNsMapping)-1] + } + return joinedNsMapping, nil +} diff --git a/test/util/k8s/persistentvolumes.go b/test/util/k8s/persistentvolumes.go index f4c8005945..441c1bd108 100644 --- a/test/util/k8s/persistentvolumes.go +++ b/test/util/k8s/persistentvolumes.go @@ -22,10 +22,9 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/retry" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" ) func CreatePersistentVolume(client TestClient, name string) (*corev1.PersistentVolume, error) { @@ -93,3 +92,16 @@ func ClearClaimRefForFailedPVs(ctx context.Context, client TestClient) error { return nil } + +func GetAllPVNames(ctx context.Context, client TestClient) ([]string, error) { + var pvNameList []string + pvList, err := client.ClientGo.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to List PV") + } + + for _, pvName := range pvList.Items { + pvNameList = append(pvNameList, pvName.Name) + } + return pvNameList, nil +} diff --git a/test/util/kibishii/kibishii_utils.go b/test/util/kibishii/kibishii_utils.go index f9d2b00d86..de30dccf44 100644 --- a/test/util/kibishii/kibishii_utils.go +++ b/test/util/kibishii/kibishii_utils.go @@ -51,6 +51,7 @@ type KibishiiData struct { var DefaultKibishiiWorkerCounts = 2 var DefaultKibishiiData = &KibishiiData{2, 10, 10, 1024, 1024, 0, DefaultKibishiiWorkerCounts} +var KibishiiPodNameList = []string{"kibishii-deployment-0", "kibishii-deployment-1"} var KibishiiPVCNameList = []string{"kibishii-data-kibishii-deployment-0", "kibishii-data-kibishii-deployment-1"} var KibishiiStorageClassName = "kibishii-storage-class" @@ -107,6 +108,8 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc } fmt.Printf("VeleroBackupNamespace done %s\n", time.Now().Format("2006-01-02 15:04:05")) + + // Checkpoint for a successful backup if useVolumeSnapshots { if providerName == "vsphere" { // Wait for uploads started by the Velero Plugin for vSphere to complete @@ -165,11 +168,49 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc } } + // Modify PV data right after backup. If PV's reclaim policy is retain, PV will be restored with the origin resource config + fileName := "file-" + kibishiiNamespace + fileBaseContent := fileName + fmt.Printf("Re-poulate volume %s\n", time.Now().Format("2006-01-02 15:04:05")) + for _, pod := range KibishiiPodNameList { + // To ensure Kibishii verification result is accurate + ClearKibishiiData(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data") + + fileContent := fileBaseContent + pod + err := CreateFileToPod(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", + fileName, fileContent) + if err != nil { + return errors.Wrapf(err, "failed to create file %s", fileName) + } + } + fmt.Printf("Re-poulate volume done %s\n", time.Now().Format("2006-01-02 15:04:05")) + + pvList := []string{} + if strings.Contains(veleroCfg.KibishiiDirectory, "sc-reclaim-policy") { + // Get leftover PV list for PV cleanup + for _, pvc := range KibishiiPVCNameList { + pv, err := GetPvName(oneHourTimeout, client, pvc, kibishiiNamespace) + if err != nil { + errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) + } + pvList = append(pvList, pv) + } + } + fmt.Printf("Simulating a disaster by removing namespace %s %s\n", kibishiiNamespace, time.Now().Format("2006-01-02 15:04:05")) if err := DeleteNamespace(oneHourTimeout, client, kibishiiNamespace, true); err != nil { return errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) } + if strings.Contains(veleroCfg.KibishiiDirectory, "sc-reclaim-policy") { + // In scenario of CSI PV-retain-policy test, to restore PV of the backed up resource, we should make sure + // there are no PVs of the same name left, because in previous test step, PV's reclaim policy is retain, + // so PVs are not deleted although workload namespace is destroyed. + if err := DeletePVs(oneHourTimeout, *veleroCfg.ClientToInstallVelero, pvList); err != nil { + return errors.Wrapf(err, "failed to delete PVs %v", pvList) + } + } + // the snapshots of AWS may be still in pending status when do the restore, wait for a while // to avoid this https://github.com/vmware-tanzu/velero/issues/1799 // TODO remove this after https://github.com/vmware-tanzu/velero/issues/3533 is fixed @@ -191,10 +232,12 @@ func RunKibishiiTests(veleroCfg VeleroConfig, backupName, restoreName, backupLoc return errors.New(fmt.Sprintf("PVR count %d is not as expected %d", len(pvrs), pvCount)) } } + fmt.Printf("KibishiiVerifyAfterRestore %s\n", time.Now().Format("2006-01-02 15:04:05")) - if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData); err != nil { + if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData, fileName); err != nil { return errors.Wrapf(err, "Error verifying kibishii after restore") } + fmt.Printf("kibishii test completed successfully %s\n", time.Now().Format("2006-01-02 15:04:05")) return nil } @@ -309,6 +352,15 @@ func waitForKibishiiPods(ctx context.Context, client TestClient, kibishiiNamespa return WaitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"}) } +func KibishiiGenerateData(oneHourTimeout context.Context, kibishiiNamespace string, kibishiiData *KibishiiData) error { + fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05")) + if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil { + return errors.Wrap(err, "Failed to generate data") + } + fmt.Printf("generateData done %s\n", time.Now().Format("2006-01-02 15:04:05")) + return nil +} + func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClient, providerName, kibishiiNamespace, registryCredentialFile, veleroFeatures, kibishiiDirectory string, useVolumeSnapshots bool, kibishiiData *KibishiiData) error { @@ -338,16 +390,12 @@ func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClie if kibishiiData == nil { kibishiiData = DefaultKibishiiData } - fmt.Printf("generateData %s\n", time.Now().Format("2006-01-02 15:04:05")) - if err := generateData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil { - return errors.Wrap(err, "Failed to generate data") - } - fmt.Printf("generateData done %s\n", time.Now().Format("2006-01-02 15:04:05")) + KibishiiGenerateData(oneHourTimeout, kibishiiNamespace, kibishiiData) return nil } func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, oneHourTimeout context.Context, - kibishiiData *KibishiiData) error { + kibishiiData *KibishiiData, incrementalFileName string) error { if kibishiiData == nil { kibishiiData = DefaultKibishiiData } @@ -357,6 +405,18 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one if err := waitForKibishiiPods(oneHourTimeout, client, kibishiiNamespace); err != nil { return errors.Wrapf(err, "Failed to wait for ready status of kibishii pods in %s", kibishiiNamespace) } + if incrementalFileName != "" { + for _, pod := range KibishiiPodNameList { + exist, err := FileExistInPV(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName) + if err != nil { + return errors.Wrapf(err, fmt.Sprintf("fail to get file %s", incrementalFileName)) + } + + if exist { + return errors.New("Unexpected incremental data exist") + } + } + } // TODO - check that namespace exists fmt.Printf("running kibishii verify\n") @@ -365,3 +425,11 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one } return nil } + +func ClearKibishiiData(ctx context.Context, namespace, podName, containerName, dir string) error { + arg := []string{"exec", "-n", namespace, "-c", containerName, podName, + "--", "/bin/sh", "-c", "rm -rf /" + dir + "/*"} + cmd := exec.CommandContext(ctx, "kubectl", arg...) + fmt.Printf("Kubectl exec cmd =%v\n", cmd) + return cmd.Run() +} diff --git a/test/util/metrics/pod.go b/test/util/metrics/pod.go index 331211bb0e..d31f6a481a 100644 --- a/test/util/metrics/pod.go +++ b/test/util/metrics/pod.go @@ -18,21 +18,35 @@ package metrics import ( "context" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/metrics/pkg/apis/metrics/v1beta1" metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned" ) -func GetPodUsageMetrics(ctx context.Context, metricsClient *metricsclientset.Clientset, podName, namespace string) (cpuUsage, memoryUsage resource.Quantity, err error) { +func GetPodUsageMetrics(ctx context.Context, metricsClient *metricsclientset.Clientset, podName, namespace string, podMetricsTimeout time.Duration) (cpuUsage, memoryUsage resource.Quantity, err error) { + ctx, cancel := context.WithTimeout(context.Background(), podMetricsTimeout) + defer cancel() + var podMetrics *v1beta1.PodMetrics - podMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(namespace).Get(ctx, podName, metav1.GetOptions{}) + err = wait.PollImmediateUntil(time.Second, func() (bool, error) { + var err error + podMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(namespace).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + return true, nil + }, ctx.Done()) + if err != nil { return + } else if podMetrics == nil { + return cpuUsage, memoryUsage, nil } - // Variables to store the max and sum of CPU and memory usage // For velero pod we only return the main container for _, container := range podMetrics.Containers { diff --git a/test/util/velero/install.go b/test/util/velero/install.go index b0bbcf7ff1..2c45b4c2dd 100644 --- a/test/util/velero/install.go +++ b/test/util/velero/install.go @@ -120,6 +120,15 @@ func VeleroInstall(ctx context.Context, veleroCfg *VeleroConfig, isStandbyCluste veleroInstallOptions.UploaderType = veleroCfg.UploaderType GCFrequency, _ := time.ParseDuration(veleroCfg.GCFrequency) veleroInstallOptions.GarbageCollectionFrequency = GCFrequency + veleroInstallOptions.PodVolumeOperationTimeout = veleroCfg.PodVolumeOperationTimeout + veleroInstallOptions.NodeAgentPodCPULimit = veleroCfg.NodeAgentPodCPULimit + veleroInstallOptions.NodeAgentPodCPURequest = veleroCfg.NodeAgentPodCPURequest + veleroInstallOptions.NodeAgentPodMemLimit = veleroCfg.NodeAgentPodMemLimit + veleroInstallOptions.NodeAgentPodMemRequest = veleroCfg.NodeAgentPodMemRequest + veleroInstallOptions.VeleroPodCPULimit = veleroCfg.VeleroPodCPULimit + veleroInstallOptions.VeleroPodCPURequest = veleroCfg.VeleroPodCPURequest + veleroInstallOptions.VeleroPodMemLimit = veleroCfg.VeleroPodMemLimit + veleroInstallOptions.VeleroPodMemRequest = veleroCfg.VeleroPodMemRequest err = installVeleroServer(ctx, veleroCfg.VeleroCLI, veleroCfg.CloudProvider, &installOptions{ Options: veleroInstallOptions, @@ -251,6 +260,42 @@ func installVeleroServer(ctx context.Context, cli, cloudProvider string, options args = append(args, fmt.Sprintf("--garbage-collection-frequency=%v", options.GarbageCollectionFrequency)) } + if options.PodVolumeOperationTimeout > 0 { + args = append(args, fmt.Sprintf("--pod-volume-operation-timeout=%v", options.PodVolumeOperationTimeout)) + } + + if options.NodeAgentPodCPULimit != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-cpu-limit=%v", options.NodeAgentPodCPULimit)) + } + + if options.NodeAgentPodCPURequest != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-cpu-request=%v", options.NodeAgentPodCPURequest)) + } + + if options.NodeAgentPodMemLimit != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-mem-limit=%v", options.NodeAgentPodMemLimit)) + } + + if options.NodeAgentPodMemRequest != "" { + args = append(args, fmt.Sprintf("--node-agent-pod-mem-request=%v", options.NodeAgentPodMemRequest)) + } + + if options.VeleroPodCPULimit != "" { + args = append(args, fmt.Sprintf("--velero-pod-cpu-limit=%v", options.VeleroPodCPULimit)) + } + + if options.VeleroPodCPURequest != "" { + args = append(args, fmt.Sprintf("--velero-pod-cpu-request=%v", options.VeleroPodCPURequest)) + } + + if options.VeleroPodMemLimit != "" { + args = append(args, fmt.Sprintf("--velero-pod-mem-limit=%v", options.VeleroPodMemLimit)) + } + + if options.VeleroPodMemRequest != "" { + args = append(args, fmt.Sprintf("--velero-pod-mem-request=%v", options.VeleroPodMemRequest)) + } + if len(options.UploaderType) > 0 { args = append(args, fmt.Sprintf("--uploader-type=%v", options.UploaderType)) } diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index a106cf5b50..fd0d919e4f 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -1561,3 +1561,62 @@ func InstallTestStorageClasses(path string) error { } return InstallStorageClass(ctx, tmpFile.Name()) } + +func GetPvName(ctx context.Context, client TestClient, pvcName, namespace string) (string, error) { + + pvcList, err := GetPvcByPVCName(context.Background(), namespace, pvcName) + if err != nil { + return "", err + } + + if len(pvcList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], pvcName, namespace)) + } + + pvList, err := GetPvByPvc(context.Background(), namespace, pvcList[0]) + if err != nil { + return "", err + } + if len(pvList) != 1 { + return "", errors.New(fmt.Sprintf("Only 1 PV of PVC %s pod %s should be found under namespace %s", pvcList[0], pvcName, namespace)) + } + + return pvList[0], nil + +} +func DeletePVs(ctx context.Context, client TestClient, pvList []string) error { + for _, pv := range pvList { + args := []string{"delete", "pv", pv, "--timeout=0s"} + fmt.Println(args) + err := exec.CommandContext(ctx, "kubectl", args...).Run() + if err != nil { + return errors.New(fmt.Sprintf("Deleted PV %s ", pv)) + } + } + return nil +} + +func CleanAllRetainedPV(ctx context.Context, client TestClient) { + + pvNameList, err := GetAllPVNames(ctx, client) + if err != nil { + fmt.Println("fail to list PV") + } + for _, pv := range pvNameList { + args := []string{"patch", "pv", pv, "-p", "{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Delete\"}}"} + fmt.Println(args) + cmd := exec.CommandContext(ctx, "kubectl", args...) + stdout, errMsg, err := veleroexec.RunCommand(cmd) + if err != nil { + fmt.Printf("fail to patch PV %s reclaim policy to delete: stdout: %s, stderr: %s", pv, stdout, errMsg) + } + + args = []string{"delete", "pv", pv, "--timeout=60s"} + fmt.Println(args) + cmd = exec.CommandContext(ctx, "kubectl", args...) + stdout, errMsg, err = veleroexec.RunCommand(cmd) + if err != nil { + fmt.Printf("fail to delete PV %s reclaim policy to delete: stdout: %s, stderr: %s", pv, stdout, errMsg) + } + } +}