From bacb270f26f0fddebafa6a08a9292ca32c3eebee Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 12 Feb 2024 20:36:16 +0400 Subject: [PATCH] fix `upload` command for S3 when object lock policy turned on, fix https://github.com/Altinity/clickhouse-backup/issues/829 --- ChangeLog.md | 1 + pkg/storage/s3.go | 25 ++++++++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 17e260c2..3c637991 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -2,6 +2,7 @@ BUG FIXES - fix `download` command corner cases for increment backup for tables with projections, fix [830](https://github.com/Altinity/clickhouse-backup/issues/830) - more informative error during try to `restore` not exists local backup +- fix `upload` command for S3 when object lock policy turned on, fix [829](https://github.com/Altinity/clickhouse-backup/issues/829) # v2.4.29 IMPROVEMENTS diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 0b22ac47..eacf1863 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -267,10 +267,11 @@ func (s *S3) GetFileReaderWithLocalPath(ctx context.Context, key, localPath stri func (s *S3) PutFile(ctx context.Context, key string, r io.ReadCloser) error { params := s3.PutObjectInput{ - Bucket: aws.String(s.Config.Bucket), - Key: aws.String(path.Join(s.Config.Path, key)), - Body: r, - StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), + Bucket: aws.String(s.Config.Bucket), + Key: aws.String(path.Join(s.Config.Path, key)), + Body: r, + StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), + ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32, } // ACL shall be optional, fix https://github.com/Altinity/clickhouse-backup/issues/785 if s.Config.ACL != "" { @@ -456,10 +457,11 @@ func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, d // just copy object without multipart if srcSize < 5*1024*1024*1024 || strings.Contains(s.Config.Endpoint, "storage.googleapis.com") { params := &s3.CopyObjectInput{ - Bucket: aws.String(s.Config.Bucket), - Key: aws.String(dstKey), - CopySource: aws.String(path.Join(srcBucket, srcKey)), - StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), + Bucket: aws.String(s.Config.Bucket), + Key: aws.String(dstKey), + CopySource: aws.String(path.Join(srcBucket, srcKey)), + StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), + ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32, } s.enrichCopyObjectParams(params) _, err := s.client.CopyObject(ctx, params) @@ -470,9 +472,10 @@ func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, d } // Initiate a multipart upload createMultipartUploadParams := &s3.CreateMultipartUploadInput{ - Bucket: aws.String(s.Config.Bucket), - Key: aws.String(dstKey), - StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), + Bucket: aws.String(s.Config.Bucket), + Key: aws.String(dstKey), + StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), + ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32, } s.enrichCreateMultipartUploadParams(createMultipartUploadParams) initResp, err := s.client.CreateMultipartUpload(ctx, createMultipartUploadParams)