diff --git a/br/pkg/storage/compress_test.go b/br/pkg/storage/compress_test.go index b4222e04af153..4f7d315ebad4a 100644 --- a/br/pkg/storage/compress_test.go +++ b/br/pkg/storage/compress_test.go @@ -8,35 +8,36 @@ import ( "os" "path/filepath" "strings" + "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -func (r *testStorageSuite) TestWithCompressReadWriteFile(c *C) { - dir := c.MkDir() +func TestWithCompressReadWriteFile(t *testing.T) { + dir := t.TempDir() backend, err := ParseBackend("local://"+filepath.ToSlash(dir), nil) - c.Assert(err, IsNil) + require.NoError(t, err) ctx := context.Background() storage, err := Create(ctx, backend, true) - c.Assert(err, IsNil) + require.NoError(t, err) storage = WithCompression(storage, Gzip) name := "with compress test" content := "hello,world!" fileName := strings.ReplaceAll(name, " ", "-") + ".txt.gz" err = storage.WriteFile(ctx, fileName, []byte(content)) - c.Assert(err, IsNil) + require.NoError(t, err) // make sure compressed file is written correctly file, err := os.Open(filepath.Join(dir, fileName)) - c.Assert(err, IsNil) + require.NoError(t, err) uncompressedFile, err := newCompressReader(Gzip, file) - c.Assert(err, IsNil) + require.NoError(t, err) newContent, err := io.ReadAll(uncompressedFile) - c.Assert(err, IsNil) - c.Assert(string(newContent), Equals, content) + require.NoError(t, err) + require.Equal(t, content, string(newContent)) // test withCompression ReadFile newContent, err = storage.ReadFile(ctx, fileName) - c.Assert(err, IsNil) - c.Assert(string(newContent), Equals, content) + require.NoError(t, err) + require.Equal(t, content, string(newContent)) } diff --git a/br/pkg/storage/gcs_test.go b/br/pkg/storage/gcs_test.go index ccf3927497bea..39578d2258c7d 100644 --- a/br/pkg/storage/gcs_test.go +++ b/br/pkg/storage/gcs_test.go @@ -7,20 +7,21 @@ import ( "fmt" "io" "os" + "testing" "github.com/fsouza/fake-gcs-server/fakestorage" - . "github.com/pingcap/check" backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/stretchr/testify/require" ) -func (r *testStorageSuite) TestGCS(c *C) { +func TestGCS(t *testing.T) { ctx := context.Background() opts := fakestorage.Options{ NoListener: true, } server, err := fakestorage.NewServerWithOptions(opts) - c.Assert(err, IsNil) + require.NoError(t, err) bucketName := "testbucket" server.CreateBucketWithOpts(fakestorage.CreateBucketOpts{Name: bucketName}) @@ -36,54 +37,54 @@ func (r *testStorageSuite) TestGCS(c *C) { CheckPermissions: []Permission{AccessBuckets}, HTTPClient: server.HTTPClient(), }) - c.Assert(err, IsNil) + require.NoError(t, err) err = stg.WriteFile(ctx, "key", []byte("data")) - c.Assert(err, IsNil) + require.NoError(t, err) err = stg.WriteFile(ctx, "key1", []byte("data1")) - c.Assert(err, IsNil) + require.NoError(t, err) err = stg.WriteFile(ctx, "key2", []byte("data22223346757222222222289722222")) - c.Assert(err, IsNil) + require.NoError(t, err) rc, err := server.Client().Bucket(bucketName).Object("a/b/key").NewReader(ctx) - c.Assert(err, IsNil) + require.NoError(t, err) d, err := io.ReadAll(rc) - rc.Close() - c.Assert(err, IsNil) - c.Assert(d, DeepEquals, []byte("data")) + require.NoError(t, err) + require.Equal(t, []byte("data"), d) + require.NoError(t, rc.Close()) d, err = stg.ReadFile(ctx, "key") - c.Assert(err, IsNil) - c.Assert(d, DeepEquals, []byte("data")) + require.NoError(t, err) + require.Equal(t, []byte("data"), d) exist, err := stg.FileExists(ctx, "key") - c.Assert(err, IsNil) - c.Assert(exist, IsTrue) + require.NoError(t, err) + require.True(t, exist) exist, err = stg.FileExists(ctx, "key_not_exist") - c.Assert(err, IsNil) - c.Assert(exist, IsFalse) + require.NoError(t, err) + require.False(t, exist) keyDelete := "key_delete" exist, err = stg.FileExists(ctx, keyDelete) - c.Assert(err, IsNil) - c.Assert(exist, IsFalse) + require.NoError(t, err) + require.False(t, exist) err = stg.WriteFile(ctx, keyDelete, []byte("data")) - c.Assert(err, IsNil) + require.NoError(t, err) exist, err = stg.FileExists(ctx, keyDelete) - c.Assert(err, IsNil) - c.Assert(exist, IsTrue) + require.NoError(t, err) + require.True(t, exist) err = stg.DeleteFile(ctx, keyDelete) - c.Assert(err, IsNil) + require.NoError(t, err) exist, err = stg.FileExists(ctx, keyDelete) - c.Assert(err, IsNil) - c.Assert(exist, IsFalse) + require.NoError(t, err) + require.False(t, exist) list := "" var totalSize int64 = 0 @@ -92,15 +93,15 @@ func (r *testStorageSuite) TestGCS(c *C) { totalSize += size return nil }) - c.Assert(err, IsNil) - c.Assert(list, Equals, "keykey1key2") - c.Assert(totalSize, Equals, int64(42)) + require.NoError(t, err) + require.Equal(t, "keykey1key2", list) + require.Equal(t, int64(42), totalSize) // test 1003 files totalSize = 0 for i := 0; i < 1000; i += 1 { err = stg.WriteFile(ctx, fmt.Sprintf("f%d", i), []byte("data")) - c.Assert(err, IsNil) + require.NoError(t, err) } filesSet := make(map[string]struct{}, 1003) err = stg.WalkDir(ctx, nil, func(name string, size int64) error { @@ -108,83 +109,83 @@ func (r *testStorageSuite) TestGCS(c *C) { totalSize += size return nil }) - c.Assert(err, IsNil) - c.Assert(totalSize, Equals, int64(42+4000)) + require.NoError(t, err) + require.Equal(t, int64(42+4000), totalSize) _, ok := filesSet["key"] - c.Assert(ok, IsTrue) + require.True(t, ok) _, ok = filesSet["key1"] - c.Assert(ok, IsTrue) + require.True(t, ok) _, ok = filesSet["key2"] - c.Assert(ok, IsTrue) + require.True(t, ok) for i := 0; i < 1000; i += 1 { _, ok = filesSet[fmt.Sprintf("f%d", i)] - c.Assert(ok, IsTrue) + require.True(t, ok) } efr, err := stg.Open(ctx, "key2") - c.Assert(err, IsNil) + require.NoError(t, err) p := make([]byte, 10) n, err := efr.Read(p) - c.Assert(err, IsNil) - c.Assert(n, Equals, 10) - c.Assert(string(p), Equals, "data222233") + require.NoError(t, err) + require.Equal(t, 10, n) + require.Equal(t, "data222233", string(p)) p = make([]byte, 40) n, err = efr.Read(p) - c.Assert(err, IsNil) - c.Assert(n, Equals, 23) - c.Assert(string(p[:23]), Equals, "46757222222222289722222") + require.NoError(t, err) + require.Equal(t, 23, n) + require.Equal(t, "46757222222222289722222", string(p[:23])) p = make([]byte, 5) offs, err := efr.Seek(3, io.SeekStart) - c.Assert(err, IsNil) - c.Assert(offs, Equals, int64(3)) + require.NoError(t, err) + require.Equal(t, int64(3), offs) n, err = efr.Read(p) - c.Assert(err, IsNil) - c.Assert(n, Equals, 5) - c.Assert(string(p), Equals, "a2222") + require.NoError(t, err) + require.Equal(t, 5, n) + require.Equal(t, "a2222", string(p)) p = make([]byte, 5) offs, err = efr.Seek(3, io.SeekCurrent) - c.Assert(err, IsNil) - c.Assert(offs, Equals, int64(11)) + require.NoError(t, err) + require.Equal(t, int64(11), offs) n, err = efr.Read(p) - c.Assert(err, IsNil) - c.Assert(n, Equals, 5) - c.Assert(string(p), Equals, "67572") + require.NoError(t, err) + require.Equal(t, 5, n) + require.Equal(t, "67572", string(p)) /* Since fake_gcs_server hasn't support for negative offset yet. p = make([]byte, 5) offs, err = efr.Seek(int64(-7), io.SeekEnd) - c.Assert(err, IsNil) - c.Assert(offs, Equals, int64(-7)) + require.NoError(t, err) + require.Equal(t, int64(-7), offs) n, err = efr.Read(p) - c.Assert(err, IsNil) - c.Assert(n, Equals, 5) - c.Assert(string(p), Equals, "97222") + require.NoError(t, err) + require.Equal(t, 5, n) + require.Equal(t, "97222", string(p)) */ err = efr.Close() - c.Assert(err, IsNil) + require.NoError(t, err) - c.Assert(stg.URI(), Equals, "gcs://testbucket/a/b/") + require.Equal(t, "gcs://testbucket/a/b/", stg.URI()) } -func (r *testStorageSuite) TestNewGCSStorage(c *C) { +func TestNewGCSStorage(t *testing.T) { ctx := context.Background() opts := fakestorage.Options{ NoListener: true, } - server, err1 := fakestorage.NewServerWithOptions(opts) - c.Assert(err1, IsNil) + server, err := fakestorage.NewServerWithOptions(opts) + require.NoError(t, err) bucketName := "testbucket" server.CreateBucketWithOpts(fakestorage.CreateBucketOpts{Name: bucketName}) - testDir := c.MkDir() + testDir := t.TempDir() { gcs := &backuppb.GCS{ @@ -199,8 +200,8 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { CheckPermissions: []Permission{AccessBuckets}, HTTPClient: server.HTTPClient(), }) - c.Assert(err, IsNil) - c.Assert(gcs.CredentialsBlob, Equals, "FakeCredentials") + require.NoError(t, err) + require.Equal(t, "FakeCredentials", gcs.CredentialsBlob) } { @@ -216,22 +217,24 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { CheckPermissions: []Permission{AccessBuckets}, HTTPClient: server.HTTPClient(), }) - c.Assert(err, IsNil) - c.Assert(gcs.CredentialsBlob, Equals, "") + require.NoError(t, err) + require.Equal(t, "", gcs.CredentialsBlob) } { fakeCredentialsFile, err := os.CreateTemp(testDir, "fakeCredentialsFile") - c.Assert(err, IsNil) + require.NoError(t, err) defer func() { - fakeCredentialsFile.Close() - os.Remove(fakeCredentialsFile.Name()) + require.NoError(t, fakeCredentialsFile.Close()) + require.NoError(t, os.Remove(fakeCredentialsFile.Name())) }() _, err = fakeCredentialsFile.Write([]byte(`{"type": "service_account"}`)) - c.Assert(err, IsNil) + require.NoError(t, err) err = os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", fakeCredentialsFile.Name()) - defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") - c.Assert(err, IsNil) + defer func() { + require.NoError(t, os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS")) + }() + require.NoError(t, err) gcs := &backuppb.GCS{ Bucket: bucketName, @@ -245,22 +248,24 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { CheckPermissions: []Permission{AccessBuckets}, HTTPClient: server.HTTPClient(), }) - c.Assert(err, IsNil) - c.Assert(gcs.CredentialsBlob, Equals, `{"type": "service_account"}`) + require.NoError(t, err) + require.Equal(t, `{"type": "service_account"}`, gcs.CredentialsBlob) } { fakeCredentialsFile, err := os.CreateTemp(testDir, "fakeCredentialsFile") - c.Assert(err, IsNil) + require.NoError(t, err) defer func() { - fakeCredentialsFile.Close() - os.Remove(fakeCredentialsFile.Name()) + require.NoError(t, fakeCredentialsFile.Close()) + require.NoError(t, os.Remove(fakeCredentialsFile.Name())) }() _, err = fakeCredentialsFile.Write([]byte(`{"type": "service_account"}`)) - c.Assert(err, IsNil) + require.NoError(t, err) err = os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", fakeCredentialsFile.Name()) - defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") - c.Assert(err, IsNil) + defer func() { + require.NoError(t, os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS")) + }() + require.NoError(t, err) gcs := &backuppb.GCS{ Bucket: bucketName, @@ -274,13 +279,13 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { CheckPermissions: []Permission{AccessBuckets}, HTTPClient: server.HTTPClient(), }) - c.Assert(err, IsNil) - c.Assert(gcs.CredentialsBlob, Equals, "") - c.Assert(s.objectName("x"), Equals, "a/b/x") + require.NoError(t, err) + require.Equal(t, "", gcs.CredentialsBlob) + require.Equal(t, "a/b/x", s.objectName("x")) } { - os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") + require.NoError(t, os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS")) gcs := &backuppb.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -293,7 +298,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { CheckPermissions: []Permission{AccessBuckets}, HTTPClient: server.HTTPClient(), }) - c.Assert(err, NotNil) + require.Error(t, err) } { @@ -309,8 +314,8 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { CheckPermissions: []Permission{AccessBuckets}, HTTPClient: server.HTTPClient(), }) - c.Assert(err, IsNil) - c.Assert(gcs.CredentialsBlob, Equals, "") - c.Assert(s.objectName("x"), Equals, "a/b/x") + require.NoError(t, err) + require.Equal(t, "", gcs.CredentialsBlob) + require.Equal(t, "a/b/x", s.objectName("x")) } } diff --git a/br/pkg/storage/local_test.go b/br/pkg/storage/local_test.go index 54eed83cd47bd..ac94476d190b9 100644 --- a/br/pkg/storage/local_test.go +++ b/br/pkg/storage/local_test.go @@ -7,88 +7,85 @@ import ( "os" "path/filepath" "runtime" + "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -type testLocalSuite struct{} - -var _ = Suite(&testLocalSuite{}) - -func (r *testStorageSuite) TestDeleteFile(c *C) { - dir := c.MkDir() +func TestDeleteFile(t *testing.T) { + dir := t.TempDir() sb, err := ParseBackend("file://"+filepath.ToSlash(dir), &BackendOptions{}) - c.Assert(err, IsNil) + require.NoError(t, err) store, err := Create(context.TODO(), sb, true) - c.Assert(err, IsNil) + require.NoError(t, err) name := "test_delete" ret, err := store.FileExists(context.Background(), name) - c.Assert(err, IsNil) - c.Assert(ret, Equals, false) + require.NoError(t, err) + require.Equal(t, false, ret) _, err = store.Create(context.Background(), name) - c.Assert(err, IsNil) + require.NoError(t, err) ret, err = store.FileExists(context.Background(), name) - c.Assert(err, IsNil) - c.Assert(ret, Equals, true) + require.NoError(t, err) + require.Equal(t, true, ret) err = store.DeleteFile(context.Background(), name) - c.Assert(err, IsNil) + require.NoError(t, err) ret, err = store.FileExists(context.Background(), name) - c.Assert(err, IsNil) - c.Assert(ret, Equals, false) + require.NoError(t, err) + require.Equal(t, false, ret) } -func (r *testStorageSuite) TestWalkDirWithSoftLinkFile(c *C) { +func TestWalkDirWithSoftLinkFile(t *testing.T) { if runtime.GOOS == "windows" { // skip the test on windows. typically windows users don't have symlink permission. return } - dir1 := c.MkDir() + dir1 := t.TempDir() name1 := "test.warehouse.0.sql" path1 := filepath.Join(dir1, name1) f1, err := os.Create(path1) - c.Assert(err, IsNil) + require.NoError(t, err) data := "/* whatever pragmas */;" + "INSERT INTO `namespaced`.`table` (columns, more, columns) VALUES (1,-2, 3),\n(4,5., 6);" + "INSERT `namespaced`.`table` (x,y,z) VALUES (7,8,9);" + "insert another_table values (10,11e1,12, '(13)', '(', 14, ')');" _, err = f1.Write([]byte(data)) - c.Assert(err, IsNil) + require.NoError(t, err) err = f1.Close() - c.Assert(err, IsNil) + require.NoError(t, err) - dir2 := c.MkDir() + dir2 := t.TempDir() name2 := "test.warehouse.1.sql" f2, err := os.Create(filepath.Join(dir2, name2)) - c.Assert(err, IsNil) + require.NoError(t, err) _, err = f2.Write([]byte(data)) - c.Assert(err, IsNil) + require.NoError(t, err) err = f2.Close() - c.Assert(err, IsNil) + require.NoError(t, err) err = os.Symlink(path1, filepath.Join(dir2, name1)) - c.Assert(err, IsNil) + require.NoError(t, err) sb, err := ParseBackend("file://"+filepath.ToSlash(dir2), &BackendOptions{}) - c.Assert(err, IsNil) + require.NoError(t, err) store, err := Create(context.TODO(), sb, true) - c.Assert(err, IsNil) + require.NoError(t, err) i := 0 names := []string{name1, name2} err = store.WalkDir(context.TODO(), &WalkOption{}, func(path string, size int64) error { - c.Assert(path, Equals, names[i]) - c.Assert(size, Equals, int64(len(data))) + require.Equal(t, names[i], path) + require.Equal(t, int64(len(data)), size) i++ return nil }) - c.Assert(err, IsNil) - c.Assert(i, Equals, 2) + require.NoError(t, err) + require.Equal(t, 2, i) } diff --git a/br/pkg/storage/parse_test.go b/br/pkg/storage/parse_test.go index 542b5dde3de07..099477b84cdd6 100644 --- a/br/pkg/storage/parse_test.go +++ b/br/pkg/storage/parse_test.go @@ -8,43 +8,38 @@ import ( "path/filepath" "testing" - . "github.com/pingcap/check" backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/stretchr/testify/require" ) -func Test(t *testing.T) { - TestingT(t) -} - -type testStorageSuite struct{} - -var _ = Suite(&testStorageSuite{}) - -func (r *testStorageSuite) TestCreateStorage(c *C) { +func TestCreateStorage(t *testing.T) { _, err := ParseBackend("1invalid:", nil) - c.Assert(err, ErrorMatches, "parse (.*)1invalid:(.*): first path segment in URL cannot contain colon") + require.Error(t, err) + require.Regexp(t, "parse (.*)1invalid:(.*): first path segment in URL cannot contain colon", err.Error()) _, err = ParseBackend("net:storage", nil) - c.Assert(err, ErrorMatches, "storage net not support yet.*") + require.Error(t, err) + require.Regexp(t, "storage net not support yet.*", err.Error()) s, err := ParseBackend("local:///tmp/storage", nil) - c.Assert(err, IsNil) - c.Assert(s.GetLocal().GetPath(), Equals, "/tmp/storage") + require.NoError(t, err) + require.Equal(t, "/tmp/storage", s.GetLocal().GetPath()) s, err = ParseBackend("file:///tmp/storage", nil) - c.Assert(err, IsNil) - c.Assert(s.GetLocal().GetPath(), Equals, "/tmp/storage") + require.NoError(t, err) + require.Equal(t, "/tmp/storage", s.GetLocal().GetPath()) s, err = ParseBackend("noop://", nil) - c.Assert(err, IsNil) - c.Assert(s.GetNoop(), NotNil) + require.NoError(t, err) + require.NotNil(t, s.GetNoop()) s, err = ParseBackend("hdfs://127.0.0.1:1231/backup", nil) - c.Assert(err, IsNil) - c.Assert(s.GetHdfs().GetRemote(), Equals, "hdfs://127.0.0.1:1231/backup") + require.NoError(t, err) + require.Equal(t, "hdfs://127.0.0.1:1231/backup", s.GetHdfs().GetRemote()) _, err = ParseBackend("s3:///bucket/more/prefix/", &BackendOptions{}) - c.Assert(err, ErrorMatches, `please specify the bucket for s3 in s3:///bucket/more/prefix/.*`) + require.Error(t, err) + require.Regexp(t, `please specify the bucket for s3 in s3:///bucket/more/prefix/.*`, err.Error()) s3opt := &BackendOptions{ S3: S3BackendOptions{ @@ -52,36 +47,36 @@ func (r *testStorageSuite) TestCreateStorage(c *C) { }, } s, err = ParseBackend("s3://bucket2/prefix/", s3opt) - c.Assert(err, IsNil) + require.NoError(t, err) s3 := s.GetS3() - c.Assert(s3, NotNil) - c.Assert(s3.Bucket, Equals, "bucket2") - c.Assert(s3.Prefix, Equals, "prefix") - c.Assert(s3.Endpoint, Equals, "https://s3.example.com") - c.Assert(s3.ForcePathStyle, IsFalse) + require.NotNil(t, s3) + require.Equal(t, "bucket2", s3.Bucket) + require.Equal(t, "prefix", s3.Prefix) + require.Equal(t, "https://s3.example.com", s3.Endpoint) + require.False(t, s3.ForcePathStyle) // nolint:lll s, err = ParseBackend(`s3://bucket3/prefix/path?endpoint=https://127.0.0.1:9000&force_path_style=0&SSE=aws:kms&sse-kms-key-id=TestKey&xyz=abc`, nil) - c.Assert(err, IsNil) + require.NoError(t, err) s3 = s.GetS3() - c.Assert(s3, NotNil) - c.Assert(s3.Bucket, Equals, "bucket3") - c.Assert(s3.Prefix, Equals, "prefix/path") - c.Assert(s3.Endpoint, Equals, "https://127.0.0.1:9000") - c.Assert(s3.ForcePathStyle, IsFalse) - c.Assert(s3.Sse, Equals, "aws:kms") - c.Assert(s3.SseKmsKeyId, Equals, "TestKey") + require.NotNil(t, s3) + require.Equal(t, "bucket3", s3.Bucket) + require.Equal(t, "prefix/path", s3.Prefix) + require.Equal(t, "https://127.0.0.1:9000", s3.Endpoint) + require.False(t, s3.ForcePathStyle) + require.Equal(t, "aws:kms", s3.Sse) + require.Equal(t, "TestKey", s3.SseKmsKeyId) // special character in access keys s, err = ParseBackend(`s3://bucket4/prefix/path?access-key=NXN7IPIOSAAKDEEOLMAF&secret-access-key=nREY/7Dt+PaIbYKrKlEEMMF/ExCiJEX=XMLPUANw`, nil) - c.Assert(err, IsNil) + require.NoError(t, err) s3 = s.GetS3() - c.Assert(s3, NotNil) - c.Assert(s3.Bucket, Equals, "bucket4") - c.Assert(s3.Prefix, Equals, "prefix/path") - c.Assert(s3.AccessKey, Equals, "NXN7IPIOSAAKDEEOLMAF") - c.Assert(s3.SecretAccessKey, Equals, "nREY/7Dt+PaIbYKrKlEEMMF/ExCiJEX=XMLPUANw") - c.Assert(s3.ForcePathStyle, IsTrue) + require.NotNil(t, s3) + require.Equal(t, "bucket4", s3.Bucket) + require.Equal(t, "prefix/path", s3.Prefix) + require.Equal(t, "NXN7IPIOSAAKDEEOLMAF", s3.AccessKey) + require.Equal(t, "nREY/7Dt+PaIbYKrKlEEMMF/ExCiJEX=XMLPUANw", s3.SecretAccessKey) + require.True(t, s3.ForcePathStyle) gcsOpt := &BackendOptions{ GCS: GCSBackendOptions{ @@ -89,74 +84,74 @@ func (r *testStorageSuite) TestCreateStorage(c *C) { }, } s, err = ParseBackend("gcs://bucket2/prefix/", gcsOpt) - c.Assert(err, IsNil) + require.NoError(t, err) gcs := s.GetGcs() - c.Assert(gcs, NotNil) - c.Assert(gcs.Bucket, Equals, "bucket2") - c.Assert(gcs.Prefix, Equals, "prefix") - c.Assert(gcs.Endpoint, Equals, "https://gcs.example.com/") - c.Assert(gcs.CredentialsBlob, Equals, "") + require.NotNil(t, gcs) + require.Equal(t, "bucket2", gcs.Bucket) + require.Equal(t, "prefix", gcs.Prefix) + require.Equal(t, "https://gcs.example.com/", gcs.Endpoint) + require.Equal(t, "", gcs.CredentialsBlob) s, err = ParseBackend("gcs://bucket2", gcsOpt) - c.Assert(err, IsNil) + require.NoError(t, err) gcs = s.GetGcs() - c.Assert(gcs, NotNil) - c.Assert(gcs.Bucket, Equals, "bucket2") - c.Assert(gcs.Prefix, Equals, "") - c.Assert(gcs.Endpoint, Equals, "https://gcs.example.com/") - c.Assert(gcs.CredentialsBlob, Equals, "") + require.NotNil(t, gcs) + require.Equal(t, "bucket2", gcs.Bucket) + require.Equal(t, "", gcs.Prefix) + require.Equal(t, "https://gcs.example.com/", gcs.Endpoint) + require.Equal(t, "", gcs.CredentialsBlob) - var credFeilPerm os.FileMode = 0o600 - fakeCredentialsFile := filepath.Join(c.MkDir(), "fakeCredentialsFile") - err = os.WriteFile(fakeCredentialsFile, []byte("fakeCredentials"), credFeilPerm) - c.Assert(err, IsNil) + var credFilePerm os.FileMode = 0o600 + fakeCredentialsFile := filepath.Join(t.TempDir(), "fakeCredentialsFile") + err = os.WriteFile(fakeCredentialsFile, []byte("fakeCredentials"), credFilePerm) + require.NoError(t, err) gcsOpt.GCS.CredentialsFile = fakeCredentialsFile s, err = ParseBackend("gcs://bucket/more/prefix/", gcsOpt) - c.Assert(err, IsNil) + require.NoError(t, err) gcs = s.GetGcs() - c.Assert(gcs, NotNil) - c.Assert(gcs.Bucket, Equals, "bucket") - c.Assert(gcs.Prefix, Equals, "more/prefix") - c.Assert(gcs.Endpoint, Equals, "https://gcs.example.com/") - c.Assert(gcs.CredentialsBlob, Equals, "fakeCredentials") - - err = os.WriteFile(fakeCredentialsFile, []byte("fakeCreds2"), credFeilPerm) - c.Assert(err, IsNil) + require.NotNil(t, gcs) + require.Equal(t, "bucket", gcs.Bucket) + require.Equal(t, "more/prefix", gcs.Prefix) + require.Equal(t, "https://gcs.example.com/", gcs.Endpoint) + require.Equal(t, "fakeCredentials", gcs.CredentialsBlob) + + err = os.WriteFile(fakeCredentialsFile, []byte("fakeCreds2"), credFilePerm) + require.NoError(t, err) s, err = ParseBackend("gs://bucket4/backup/?credentials-file="+url.QueryEscape(fakeCredentialsFile), nil) - c.Assert(err, IsNil) + require.NoError(t, err) gcs = s.GetGcs() - c.Assert(gcs, NotNil) - c.Assert(gcs.Bucket, Equals, "bucket4") - c.Assert(gcs.Prefix, Equals, "backup") - c.Assert(gcs.CredentialsBlob, Equals, "fakeCreds2") + require.NotNil(t, gcs) + require.Equal(t, "bucket4", gcs.Bucket) + require.Equal(t, "backup", gcs.Prefix) + require.Equal(t, "fakeCreds2", gcs.CredentialsBlob) s, err = ParseBackend("/test", nil) - c.Assert(err, IsNil) + require.NoError(t, err) local := s.GetLocal() - c.Assert(local, NotNil) + require.NotNil(t, local) expectedLocalPath, err := filepath.Abs("/test") - c.Assert(err, IsNil) - c.Assert(local.GetPath(), Equals, expectedLocalPath) + require.NoError(t, err) + require.Equal(t, expectedLocalPath, local.GetPath()) } -func (r *testStorageSuite) TestFormatBackendURL(c *C) { - url := FormatBackendURL(&backuppb.StorageBackend{ +func TestFormatBackendURL(t *testing.T) { + backendURL := FormatBackendURL(&backuppb.StorageBackend{ Backend: &backuppb.StorageBackend_Local{ Local: &backuppb.Local{Path: "/tmp/file"}, }, }) - c.Assert(url.String(), Equals, "local:///tmp/file") + require.Equal(t, "local:///tmp/file", backendURL.String()) - url = FormatBackendURL(&backuppb.StorageBackend{ + backendURL = FormatBackendURL(&backuppb.StorageBackend{ Backend: &backuppb.StorageBackend_Noop{ Noop: &backuppb.Noop{}, }, }) - c.Assert(url.String(), Equals, "noop:///") + require.Equal(t, "noop:///", backendURL.String()) - url = FormatBackendURL(&backuppb.StorageBackend{ + backendURL = FormatBackendURL(&backuppb.StorageBackend{ Backend: &backuppb.StorageBackend_S3{ S3: &backuppb.S3{ Bucket: "bucket", @@ -165,9 +160,9 @@ func (r *testStorageSuite) TestFormatBackendURL(c *C) { }, }, }) - c.Assert(url.String(), Equals, "s3://bucket/some%20prefix/") + require.Equal(t, "s3://bucket/some%20prefix/", backendURL.String()) - url = FormatBackendURL(&backuppb.StorageBackend{ + backendURL = FormatBackendURL(&backuppb.StorageBackend{ Backend: &backuppb.StorageBackend_Gcs{ Gcs: &backuppb.GCS{ Bucket: "bucket", @@ -176,5 +171,5 @@ func (r *testStorageSuite) TestFormatBackendURL(c *C) { }, }, }) - c.Assert(url.String(), Equals, "gcs://bucket/some%20prefix/") + require.Equal(t, "gcs://bucket/some%20prefix/", backendURL.String()) } diff --git a/br/pkg/storage/s3_test.go b/br/pkg/storage/s3_test.go index cf30828b07c65..9ea80fdebf830 100644 --- a/br/pkg/storage/s3_test.go +++ b/br/pkg/storage/s3_test.go @@ -10,17 +10,18 @@ import ( "io" "math/rand" "os" + "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" "github.com/golang/mock/gomock" - . "github.com/pingcap/check" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/mock" . "github.com/pingcap/tidb/br/pkg/storage" + "github.com/stretchr/testify/require" ) type s3Suite struct { @@ -29,17 +30,8 @@ type s3Suite struct { storage *S3Storage } -type s3SuiteCustom struct{} - -var ( - _ = Suite(&s3Suite{}) - _ = Suite(&s3SuiteCustom{}) -) - -// FIXME: Cannot use the real SetUpTest/TearDownTest to set up the mock -// otherwise the mock error will be ignored. - -func (s *s3Suite) setUpTest(c gomock.TestReporter) { +func createS3Suite(c gomock.TestReporter) (s *s3Suite, clean func()) { + s = new(s3Suite) s.controller = gomock.NewController(c) s.s3 = mock.NewMockS3API(s.controller) s.storage = NewS3StorageForTest( @@ -53,26 +45,29 @@ func (s *s3Suite) setUpTest(c gomock.TestReporter) { StorageClass: "sc", }, ) -} -func (s *s3Suite) tearDownTest() { - s.controller.Finish() + clean = func() { + s.controller.Finish() + } + + return } -func (s *s3Suite) TestApply(c *C) { +func TestApply(t *testing.T) { type testcase struct { name string options S3BackendOptions errMsg string errReturn bool } - testFn := func(test *testcase, c *C) { - c.Log(test.name) + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) _, err := ParseBackend("s3://bucket2/prefix/", &BackendOptions{S3: test.options}) if test.errReturn { - c.Assert(err, ErrorMatches, test.errMsg) + require.Error(t, err) + require.Regexp(t, test.errMsg, err.Error()) } else { - c.Assert(err, IsNil) + require.NoError(t, err) } } tests := []testcase{ @@ -120,27 +115,26 @@ func (s *s3Suite) TestApply(c *C) { }, } for i := range tests { - testFn(&tests[i], c) + testFn(&tests[i], t) } } -func (s *s3Suite) TestApplyUpdate(c *C) { +func TestApplyUpdate(t *testing.T) { type testcase struct { name string options S3BackendOptions setEnv bool s3 *backuppb.S3 } - testFn := func(test *testcase, c *C) { - c.Log(test.name) + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) if test.setEnv { - os.Setenv("AWS_ACCESS_KEY_ID", "ab") - os.Setenv("AWS_SECRET_ACCESS_KEY", "cd") + require.NoError(t, os.Setenv("AWS_ACCESS_KEY_ID", "ab")) + require.NoError(t, os.Setenv("AWS_SECRET_ACCESS_KEY", "cd")) } u, err := ParseBackend("s3://bucket/prefix/", &BackendOptions{S3: test.options}) - s3 := u.GetS3() - c.Assert(err, IsNil) - c.Assert(s3, DeepEquals, test.s3) + require.NoError(t, err) + require.Equal(t, test.s3, u.GetS3()) } tests := []testcase{ @@ -265,11 +259,11 @@ func (s *s3Suite) TestApplyUpdate(c *C) { }, } for i := range tests { - testFn(&tests[i], c) + testFn(&tests[i], t) } } -func (s *s3Suite) TestS3Storage(c *C) { +func TestS3Storage(t *testing.T) { type testcase struct { name string s3 *backuppb.S3 @@ -277,27 +271,26 @@ func (s *s3Suite) TestS3Storage(c *C) { hackPermission []Permission sendCredential bool } - testFn := func(test *testcase, c *C) { - c.Log(test.name) + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) ctx := aws.BackgroundContext() - s3 := &backuppb.StorageBackend{ + _, err := New(ctx, &backuppb.StorageBackend{ Backend: &backuppb.StorageBackend_S3{ S3: test.s3, }, - } - _, err := New(ctx, s3, &ExternalStorageOptions{ + }, &ExternalStorageOptions{ SendCredentials: test.sendCredential, CheckPermissions: test.hackPermission, }) if test.errReturn { - c.Assert(err, NotNil) + require.Error(t, err) return } - c.Assert(err, IsNil) + require.NoError(t, err) if test.sendCredential { - c.Assert(len(test.s3.AccessKey), Greater, 0) + require.Greater(t, len(test.s3.AccessKey), 0) } else { - c.Assert(len(test.s3.AccessKey), Equals, 0) + require.Equal(t, 0, len(test.s3.AccessKey)) } } tests := []testcase{ @@ -406,141 +399,139 @@ func (s *s3Suite) TestS3Storage(c *C) { }, } for i := range tests { - testFn(&tests[i], c) + testFn(&tests[i], t) } } -func (s *s3Suite) TestS3URI(c *C) { +func TestS3URI(t *testing.T) { backend, err := ParseBackend("s3://bucket/prefix/", nil) - c.Assert(err, IsNil) + require.NoError(t, err) storage, err := New(context.Background(), backend, &ExternalStorageOptions{}) - c.Assert(err, IsNil) - c.Assert(storage.URI(), Equals, "s3://bucket/prefix/") + require.NoError(t, err) + require.Equal(t, "s3://bucket/prefix/", storage.URI()) } -func (s *s3Suite) TestS3Range(c *C) { +func TestS3Range(t *testing.T) { contentRange := "bytes 0-9/443" ri, err := ParseRangeInfo(&contentRange) - c.Assert(err, IsNil) - c.Assert(ri, Equals, RangeInfo{Start: 0, End: 9, Size: 443}) + require.NoError(t, err) + require.Equal(t, RangeInfo{Start: 0, End: 9, Size: 443}, ri) _, err = ParseRangeInfo(nil) - c.Assert(err, ErrorMatches, "ContentRange is empty.*") + require.Error(t, err) + require.Regexp(t, "ContentRange is empty.*", err.Error()) badRange := "bytes " _, err = ParseRangeInfo(&badRange) - c.Assert(err, ErrorMatches, "invalid content range: 'bytes '.*") + require.Error(t, err) + require.Regexp(t, "invalid content range: 'bytes '.*", err.Error()) } // TestWriteNoError ensures the WriteFile API issues a PutObject request and wait // until the object is available in the S3 bucket. -func (s *s3Suite) TestWriteNoError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestWriteNoError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() putCall := s.s3.EXPECT(). PutObjectWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.PutObjectInput, opt ...request.Option) (*s3.PutObjectOutput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Key), Equals, "prefix/file") - c.Assert(aws.StringValue(input.ACL), Equals, "acl") - c.Assert(aws.StringValue(input.ServerSideEncryption), Equals, "sse") - c.Assert(aws.StringValue(input.StorageClass), Equals, "sc") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "prefix/file", aws.StringValue(input.Key)) + require.Equal(t, "acl", aws.StringValue(input.ACL)) + require.Equal(t, "sse", aws.StringValue(input.ServerSideEncryption)) + require.Equal(t, "sc", aws.StringValue(input.StorageClass)) body, err := io.ReadAll(input.Body) - c.Assert(err, IsNil) - c.Assert(body, DeepEquals, []byte("test")) + require.NoError(t, err) + require.Equal(t, []byte("test"), body) return &s3.PutObjectOutput{}, nil }) s.s3.EXPECT(). WaitUntilObjectExistsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.HeadObjectInput, opt ...request.Option) error { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Key), Equals, "prefix/file") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "prefix/file", aws.StringValue(input.Key)) return nil }). After(putCall) err := s.storage.WriteFile(ctx, "file", []byte("test")) - c.Assert(err, IsNil) + require.NoError(t, err) } // TestReadNoError ensures the ReadFile API issues a GetObject request and correctly // read the entire body. -func (s *s3Suite) TestReadNoError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestReadNoError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() s.s3.EXPECT(). GetObjectWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Key), Equals, "prefix/file") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "prefix/file", aws.StringValue(input.Key)) return &s3.GetObjectOutput{ Body: io.NopCloser(bytes.NewReader([]byte("test"))), }, nil }) content, err := s.storage.ReadFile(ctx, "file") - c.Assert(err, IsNil) - c.Assert(content, DeepEquals, []byte("test")) + require.NoError(t, err) + require.Equal(t, []byte("test"), content) } // TestFileExistsNoError ensures the FileExists API issues a HeadObject request // and reports a file exists. -func (s *s3Suite) TestFileExistsNoError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestFileExistsNoError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() s.s3.EXPECT(). HeadObjectWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.HeadObjectInput, opt ...request.Option) (*s3.HeadObjectOutput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Key), Equals, "prefix/file") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "prefix/file", aws.StringValue(input.Key)) return &s3.HeadObjectOutput{}, nil }) exists, err := s.storage.FileExists(ctx, "file") - c.Assert(err, IsNil) - c.Assert(exists, IsTrue) + require.NoError(t, err) + require.True(t, exists) } -func (s *s3Suite) TestDeleteFileNoError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestDeleteFileNoError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() s.s3.EXPECT(). DeleteObjectWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.DeleteObjectInput, opt ...request.Option) (*s3.DeleteObjectInput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Key), Equals, "prefix/file") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "prefix/file", aws.StringValue(input.Key)) return &s3.DeleteObjectInput{}, nil }) err := s.storage.DeleteFile(ctx, "file") - c.Assert(err, IsNil) + require.NoError(t, err) } -func (s *s3Suite) TestDeleteFileMissing(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestDeleteFileMissing(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() - awserr := awserr.New(s3.ErrCodeNoSuchKey, "no such key", nil) - s.s3.EXPECT(). - DeleteObjectWithContext(ctx, gomock.Any()). - Return(nil, awserr) - - err := s.storage.DeleteFile(ctx, "file-missing") - c.Assert(err, ErrorMatches, awserr.Error()) + err := awserr.New(s3.ErrCodeNoSuchKey, "no such key", nil) + s.s3.EXPECT().DeleteObjectWithContext(ctx, gomock.Any()).Return(nil, err) + require.EqualError(t, s.storage.DeleteFile(ctx, "file-missing"), err.Error()) } -func (s *s3Suite) TestDeleteFileError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestDeleteFileError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() expectedErr := errors.New("just some unrelated error") @@ -550,14 +541,15 @@ func (s *s3Suite) TestDeleteFileError(c *C) { Return(nil, expectedErr) err := s.storage.DeleteFile(ctx, "file3") - c.Assert(err, ErrorMatches, `\Q`+expectedErr.Error()+`\E`) + require.Error(t, err) + require.Regexp(t, `\Q`+expectedErr.Error()+`\E`, err.Error()) } // TestFileExistsNoSuckKey ensures FileExists API reports file missing if S3's // HeadObject request replied NoSuchKey. -func (s *s3Suite) TestFileExistsMissing(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestFileExistsMissing(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() s.s3.EXPECT(). @@ -565,14 +557,14 @@ func (s *s3Suite) TestFileExistsMissing(c *C) { Return(nil, awserr.New(s3.ErrCodeNoSuchKey, "no such key", nil)) exists, err := s.storage.FileExists(ctx, "file-missing") - c.Assert(err, IsNil) - c.Assert(exists, IsFalse) + require.NoError(t, err) + require.False(t, exists) } // TestWriteError checks that a PutObject error is propagated. -func (s *s3Suite) TestWriteError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestWriteError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() expectedErr := awserr.New(s3.ErrCodeNoSuchBucket, "no such bucket", nil) @@ -582,13 +574,13 @@ func (s *s3Suite) TestWriteError(c *C) { Return(nil, expectedErr) err := s.storage.WriteFile(ctx, "file2", []byte("test")) - c.Assert(err, ErrorMatches, `\Q`+expectedErr.Error()+`\E`) + require.Regexp(t, `\Q`+expectedErr.Error()+`\E`, err.Error()) } // TestWriteError checks that a GetObject error is propagated. -func (s *s3Suite) TestReadError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestReadError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() expectedErr := awserr.New(s3.ErrCodeNoSuchKey, "no such key", nil) @@ -598,15 +590,14 @@ func (s *s3Suite) TestReadError(c *C) { Return(nil, expectedErr) _, err := s.storage.ReadFile(ctx, "file-missing") - - c.Assert(err, ErrorMatches, "failed to read s3 file, file info: "+ - "input.bucket='bucket', input.key='prefix/file-missing': "+expectedErr.Error()) + require.Error(t, err) + require.Regexp(t, "failed to read s3 file, file info: input.bucket='bucket', input.key='prefix/file-missing': ", err.Error()) } // TestFileExistsError checks that a HeadObject error is propagated. -func (s *s3Suite) TestFileExistsError(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestFileExistsError(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() expectedErr := errors.New("just some unrelated error") @@ -616,19 +607,20 @@ func (s *s3Suite) TestFileExistsError(c *C) { Return(nil, expectedErr) _, err := s.storage.FileExists(ctx, "file3") - c.Assert(err, ErrorMatches, `\Q`+expectedErr.Error()+`\E`) + require.Error(t, err) + require.Regexp(t, `\Q`+expectedErr.Error()+`\E`, err.Error()) } // TestOpenAsBufio checks that we can open a file for reading via bufio. -func (s *s3Suite) TestOpenAsBufio(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestOpenAsBufio(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() s.s3.EXPECT(). GetObjectWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error) { - c.Assert(aws.StringValue(input.Range), Equals, "bytes=0-") + require.Equal(t, "bytes=0-", aws.StringValue(input.Range)) return &s3.GetObjectOutput{ Body: io.NopCloser(bytes.NewReader([]byte("plain text\ncontent"))), ContentRange: aws.String("bytes 0-17/18"), @@ -636,15 +628,15 @@ func (s *s3Suite) TestOpenAsBufio(c *C) { }) reader, err := s.storage.Open(ctx, "plain-text-file") - c.Assert(err, IsNil) - defer c.Assert(reader.Close(), IsNil) + require.NoError(t, err) + require.Nil(t, reader.Close()) bufReader := bufio.NewReaderSize(reader, 5) content, err := bufReader.ReadString('\n') - c.Assert(err, IsNil) - c.Assert(content, Equals, "plain text\n") + require.NoError(t, err) + require.Equal(t, "plain text\n", content) content, err = bufReader.ReadString('\n') - c.Assert(err, ErrorMatches, "EOF") - c.Assert(content, Equals, "content") + require.EqualError(t, err, "EOF") + require.Equal(t, "content", content) } // alphabetReader is used in TestOpenReadSlowly. This Reader produces a single @@ -669,9 +661,9 @@ func (r *alphabetReader) Close() error { // TestOpenReadSlowly checks that we can open a file for reading, even if the // reader emits content one byte at a time. -func (s *s3Suite) TestOpenReadSlowly(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestOpenReadSlowly(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() s.s3.EXPECT(). @@ -682,71 +674,73 @@ func (s *s3Suite) TestOpenReadSlowly(c *C) { }, nil) reader, err := s.storage.Open(ctx, "alphabets") - c.Assert(err, IsNil) + require.NoError(t, err) res, err := io.ReadAll(reader) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ")) + require.NoError(t, err) + require.Equal(t, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ"), res) } // TestOpenSeek checks that Seek is implemented correctly. -func (s *s3Suite) TestOpenSeek(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestOpenSeek(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() someRandomBytes := make([]byte, 1000000) rand.Read(someRandomBytes) // ^ we just want some random bytes for testing, we don't care about its security. - s.expectedCalls(ctx, c, someRandomBytes, []int{0, 998000, 990100}, func(data []byte, offset int) io.ReadCloser { + s.expectedCalls(ctx, t, someRandomBytes, []int{0, 998000, 990100}, func(data []byte, offset int) io.ReadCloser { return io.NopCloser(bytes.NewReader(data[offset:])) }) reader, err := s.storage.Open(ctx, "random") - c.Assert(err, IsNil) - defer reader.Close() + require.NoError(t, err) + defer func() { + require.NoError(t, reader.Close()) + }() // first do some simple read... slice := make([]byte, 100) n, err := io.ReadFull(reader, slice) - c.Assert(err, IsNil) - c.Assert(n, Equals, 100) - c.Assert(slice, DeepEquals, someRandomBytes[:100]) + require.NoError(t, err) + require.Equal(t, 100, n) + require.Equal(t, someRandomBytes[:100], slice) // a short seek will not result in a different GetObject request. offset, err := reader.Seek(2000, io.SeekStart) - c.Assert(err, IsNil) - c.Assert(offset, Equals, int64(2000)) + require.NoError(t, err) + require.Equal(t, int64(2000), offset) n, err = io.ReadFull(reader, slice) - c.Assert(err, IsNil) - c.Assert(n, Equals, 100) - c.Assert(slice, DeepEquals, someRandomBytes[2000:2100]) + require.NoError(t, err) + require.Equal(t, 100, n) + require.Equal(t, someRandomBytes[2000:2100], slice) // a long seek will perform a new GetObject request offset, err = reader.Seek(-2000, io.SeekEnd) - c.Assert(err, IsNil) - c.Assert(offset, Equals, int64(998000)) + require.NoError(t, err) + require.Equal(t, int64(998000), offset) n, err = io.ReadFull(reader, slice) - c.Assert(err, IsNil) - c.Assert(n, Equals, 100) - c.Assert(slice, DeepEquals, someRandomBytes[998000:998100]) + require.NoError(t, err) + require.Equal(t, 100, n) + require.Equal(t, someRandomBytes[998000:998100], slice) // jumping backward should be fine, but would perform a new GetObject request. offset, err = reader.Seek(-8000, io.SeekCurrent) - c.Assert(err, IsNil) - c.Assert(offset, Equals, int64(990100)) + require.NoError(t, err) + require.Equal(t, int64(990100), offset) n, err = io.ReadFull(reader, slice) - c.Assert(err, IsNil) - c.Assert(n, Equals, 100) - c.Assert(slice, DeepEquals, someRandomBytes[990100:990200]) + require.NoError(t, err) + require.Equal(t, 100, n) + require.Equal(t, someRandomBytes[990100:990200], slice) // test seek to the file end or bigger positions for _, p := range []int64{1000000, 1000001, 2000000} { offset, err = reader.Seek(p, io.SeekStart) - c.Assert(offset, Equals, int64(1000000)) - c.Assert(err, IsNil) + require.Equal(t, int64(1000000), offset) + require.NoError(t, err) _, err := reader.Read(slice) - c.Assert(err, Equals, io.EOF) + require.Equal(t, io.EOF, err) } } @@ -768,14 +762,14 @@ func (r *limitedBytesReader) Read(p []byte) (n int, err error) { return } -func (s *s3Suite) expectedCalls(ctx context.Context, c *C, data []byte, startOffsets []int, newReader func(data []byte, offset int) io.ReadCloser) { +func (s *s3Suite) expectedCalls(ctx context.Context, t *testing.T, data []byte, startOffsets []int, newReader func(data []byte, offset int) io.ReadCloser) { var lastCall *gomock.Call for _, offset := range startOffsets { thisOffset := offset thisCall := s.s3.EXPECT(). GetObjectWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.GetObjectInput, opt ...request.Option) (*s3.GetObjectOutput, error) { - c.Assert(aws.StringValue(input.Range), Equals, fmt.Sprintf("bytes=%d-", thisOffset)) + require.Equal(t, fmt.Sprintf("bytes=%d-", thisOffset), aws.StringValue(input.Range)) return &s3.GetObjectOutput{ Body: newReader(data, thisOffset), ContentRange: aws.String(fmt.Sprintf("bytes %d-%d/%d", thisOffset, len(data)-1, len(data))), @@ -789,30 +783,32 @@ func (s *s3Suite) expectedCalls(ctx context.Context, c *C, data []byte, startOff } // TestS3ReaderWithRetryEOF check the Read with retry and end with io.EOF. -func (s *s3Suite) TestS3ReaderWithRetryEOF(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestS3ReaderWithRetryEOF(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() someRandomBytes := make([]byte, 100) rand.Read(someRandomBytes) //nolint:gosec // ^ we just want some random bytes for testing, we don't care about its security. - s.expectedCalls(ctx, c, someRandomBytes, []int{0, 20, 50, 75}, func(data []byte, offset int) io.ReadCloser { + s.expectedCalls(ctx, t, someRandomBytes, []int{0, 20, 50, 75}, func(data []byte, offset int) io.ReadCloser { return io.NopCloser(&limitedBytesReader{Reader: bytes.NewReader(data[offset:]), limit: 30}) }) reader, err := s.storage.Open(ctx, "random") - c.Assert(err, IsNil) - defer reader.Close() + require.NoError(t, err) + defer func() { + require.NoError(t, reader.Close()) + }() var n int slice := make([]byte, 30) readAndCheck := func(cnt, offset int) { n, err = io.ReadFull(reader, slice[:cnt]) - c.Assert(err, IsNil) - c.Assert(n, Equals, cnt) - c.Assert(slice[:cnt], DeepEquals, someRandomBytes[offset:offset+cnt]) + require.NoError(t, err) + require.Equal(t, cnt, n) + require.Equal(t, someRandomBytes[offset:offset+cnt], slice[:cnt]) } // first do some simple read... @@ -826,38 +822,40 @@ func (s *s3Suite) TestS3ReaderWithRetryEOF(c *C) { // there only remains 10 bytes n, err = reader.Read(slice) - c.Assert(err, IsNil) - c.Assert(n, Equals, 5) + require.NoError(t, err) + require.Equal(t, 5, n) _, err = reader.Read(slice) - c.Assert(err, Equals, io.EOF) + require.Equal(t, io.EOF, err) } // TestS3ReaderWithRetryFailed check the Read with retry failed after maxRetryTimes. -func (s *s3Suite) TestS3ReaderWithRetryFailed(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestS3ReaderWithRetryFailed(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() someRandomBytes := make([]byte, 100) rand.Read(someRandomBytes) //nolint:gosec // ^ we just want some random bytes for testing, we don't care about its security. - s.expectedCalls(ctx, c, someRandomBytes, []int{0, 20, 40, 60}, func(data []byte, offset int) io.ReadCloser { + s.expectedCalls(ctx, t, someRandomBytes, []int{0, 20, 40, 60}, func(data []byte, offset int) io.ReadCloser { return io.NopCloser(&limitedBytesReader{Reader: bytes.NewReader(data[offset:]), limit: 30}) }) reader, err := s.storage.Open(ctx, "random") - c.Assert(err, IsNil) - defer reader.Close() + require.NoError(t, err) + defer func() { + require.NoError(t, reader.Close()) + }() var n int slice := make([]byte, 20) readAndCheck := func(cnt, offset int) { n, err = io.ReadFull(reader, slice[:cnt]) - c.Assert(err, IsNil) - c.Assert(n, Equals, cnt) - c.Assert(slice[:cnt], DeepEquals, someRandomBytes[offset:offset+cnt]) + require.NoError(t, err) + require.Equal(t, cnt, n) + require.Equal(t, someRandomBytes[offset:offset+cnt], slice[:cnt]) } // we can retry 3 times, so read will succeed for 4 times @@ -866,13 +864,13 @@ func (s *s3Suite) TestS3ReaderWithRetryFailed(c *C) { } _, err = reader.Read(slice) - c.Assert(err, ErrorMatches, "read exceeded limit") + require.EqualError(t, err, "read exceeded limit") } // TestWalkDir checks WalkDir retrieves all directory content under a prefix. -func (s *s3Suite) TestWalkDir(c *C) { - s.setUpTest(c) - defer s.tearDownTest() +func TestWalkDir(t *testing.T) { + s, clean := createS3Suite(t) + defer clean() ctx := aws.BackgroundContext() contents := []*s3.Object{ @@ -902,11 +900,11 @@ func (s *s3Suite) TestWalkDir(c *C) { firstCall := s.s3.EXPECT(). ListObjectsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput, opt ...request.Option) (*s3.ListObjectsOutput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Prefix), Equals, "prefix/sp/") - c.Assert(aws.StringValue(input.Marker), Equals, "") - c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2)) - c.Assert(aws.StringValue(input.Delimiter), Equals, "") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "prefix/sp/", aws.StringValue(input.Prefix)) + require.Equal(t, "", aws.StringValue(input.Marker)) + require.Equal(t, int64(2), aws.Int64Value(input.MaxKeys)) + require.Equal(t, "", aws.StringValue(input.Delimiter)) return &s3.ListObjectsOutput{ IsTruncated: aws.Bool(true), Contents: contents[:2], @@ -915,8 +913,8 @@ func (s *s3Suite) TestWalkDir(c *C) { secondCall := s.s3.EXPECT(). ListObjectsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput, opt ...request.Option) (*s3.ListObjectsOutput, error) { - c.Assert(aws.StringValue(input.Marker), Equals, aws.StringValue(contents[1].Key)) - c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2)) + require.Equal(t, aws.StringValue(contents[1].Key), aws.StringValue(input.Marker)) + require.Equal(t, int64(2), aws.Int64Value(input.MaxKeys)) return &s3.ListObjectsOutput{ IsTruncated: aws.Bool(true), Contents: contents[2:4], @@ -926,8 +924,8 @@ func (s *s3Suite) TestWalkDir(c *C) { thirdCall := s.s3.EXPECT(). ListObjectsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput, opt ...request.Option) (*s3.ListObjectsOutput, error) { - c.Assert(aws.StringValue(input.Marker), Equals, aws.StringValue(contents[3].Key)) - c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2)) + require.Equal(t, aws.StringValue(contents[3].Key), aws.StringValue(input.Marker)) + require.Equal(t, int64(2), aws.Int64Value(input.MaxKeys)) return &s3.ListObjectsOutput{ IsTruncated: aws.Bool(false), Contents: contents[4:], @@ -937,11 +935,11 @@ func (s *s3Suite) TestWalkDir(c *C) { fourthCall := s.s3.EXPECT(). ListObjectsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput, opt ...request.Option) (*s3.ListObjectsOutput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Prefix), Equals, "prefix/") - c.Assert(aws.StringValue(input.Marker), Equals, "") - c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(4)) - c.Assert(aws.StringValue(input.Delimiter), Equals, "") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "prefix/", aws.StringValue(input.Prefix)) + require.Equal(t, "", aws.StringValue(input.Marker)) + require.Equal(t, int64(4), aws.Int64Value(input.MaxKeys)) + require.Equal(t, "", aws.StringValue(input.Delimiter)) return &s3.ListObjectsOutput{ IsTruncated: aws.Bool(true), Contents: contents[:4], @@ -951,8 +949,8 @@ func (s *s3Suite) TestWalkDir(c *C) { s.s3.EXPECT(). ListObjectsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput, opt ...request.Option) (*s3.ListObjectsOutput, error) { - c.Assert(aws.StringValue(input.Marker), Equals, aws.StringValue(contents[3].Key)) - c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(4)) + require.Equal(t, aws.StringValue(contents[3].Key), aws.StringValue(input.Marker)) + require.Equal(t, int64(4), aws.Int64Value(input.MaxKeys)) return &s3.ListObjectsOutput{ IsTruncated: aws.Bool(false), Contents: contents[4:], @@ -966,15 +964,14 @@ func (s *s3Suite) TestWalkDir(c *C) { ctx, &WalkOption{SubDir: "sp", ListCount: 2}, func(path string, size int64) error { - comment := Commentf("index = %d", i) - c.Assert("prefix/"+path, Equals, *contents[i].Key, comment) - c.Assert(size, Equals, *contents[i].Size, comment) + require.Equal(t, *contents[i].Key, "prefix/"+path, "index = %d", i) + require.Equal(t, *contents[i].Size, size, "index = %d", i) i++ return nil }, ) - c.Assert(err, IsNil) - c.Assert(i, Equals, len(contents)) + require.NoError(t, err) + require.Equal(t, len(contents), i) // test with empty subDir i = 0 @@ -982,20 +979,19 @@ func (s *s3Suite) TestWalkDir(c *C) { ctx, &WalkOption{ListCount: 4}, func(path string, size int64) error { - comment := Commentf("index = %d", i) - c.Assert("prefix/"+path, Equals, *contents[i].Key, comment) - c.Assert(size, Equals, *contents[i].Size, comment) + require.Equal(t, *contents[i].Key, "prefix/"+path, "index = %d", i) + require.Equal(t, *contents[i].Size, size, "index = %d", i) i++ return nil }, ) - c.Assert(err, IsNil) - c.Assert(i, Equals, len(contents)) + require.NoError(t, err) + require.Equal(t, len(contents), i) } // TestWalkDirBucket checks WalkDir retrieves all directory content under a bucket. -func (s *s3SuiteCustom) TestWalkDirWithEmptyPrefix(c *C) { - controller := gomock.NewController(c) +func TestWalkDirWithEmptyPrefix(t *testing.T) { + controller := gomock.NewController(t) s3API := mock.NewMockS3API(controller) storage := NewS3StorageForTest( s3API, @@ -1024,11 +1020,11 @@ func (s *s3SuiteCustom) TestWalkDirWithEmptyPrefix(c *C) { firstCall := s3API.EXPECT(). ListObjectsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput, opt ...request.Option) (*s3.ListObjectsOutput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Prefix), Equals, "") - c.Assert(aws.StringValue(input.Marker), Equals, "") - c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2)) - c.Assert(aws.StringValue(input.Delimiter), Equals, "") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "", aws.StringValue(input.Prefix)) + require.Equal(t, "", aws.StringValue(input.Marker)) + require.Equal(t, int64(2), aws.Int64Value(input.MaxKeys)) + require.Equal(t, "", aws.StringValue(input.Delimiter)) return &s3.ListObjectsOutput{ IsTruncated: aws.Bool(false), Contents: contents, @@ -1037,11 +1033,11 @@ func (s *s3SuiteCustom) TestWalkDirWithEmptyPrefix(c *C) { s3API.EXPECT(). ListObjectsWithContext(ctx, gomock.Any()). DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput, opt ...request.Option) (*s3.ListObjectsOutput, error) { - c.Assert(aws.StringValue(input.Bucket), Equals, "bucket") - c.Assert(aws.StringValue(input.Prefix), Equals, "sp/") - c.Assert(aws.StringValue(input.Marker), Equals, "") - c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2)) - c.Assert(aws.StringValue(input.Delimiter), Equals, "") + require.Equal(t, "bucket", aws.StringValue(input.Bucket)) + require.Equal(t, "sp/", aws.StringValue(input.Prefix)) + require.Equal(t, "", aws.StringValue(input.Marker)) + require.Equal(t, int64(2), aws.Int64Value(input.MaxKeys)) + require.Equal(t, "", aws.StringValue(input.Delimiter)) return &s3.ListObjectsOutput{ IsTruncated: aws.Bool(false), Contents: contents[:1], @@ -1055,15 +1051,14 @@ func (s *s3SuiteCustom) TestWalkDirWithEmptyPrefix(c *C) { ctx, &WalkOption{SubDir: "", ListCount: 2}, func(path string, size int64) error { - comment := Commentf("index = %d", i) - c.Assert(path, Equals, *contents[i].Key, comment) - c.Assert(size, Equals, *contents[i].Size, comment) + require.Equal(t, *contents[i].Key, path, "index = %d", i) + require.Equal(t, *contents[i].Size, size, "index = %d", i) i++ return nil }, ) - c.Assert(err, IsNil) - c.Assert(i, Equals, len(contents)) + require.NoError(t, err) + require.Equal(t, len(contents), i) // test with non-empty sub-dir i = 0 @@ -1071,13 +1066,12 @@ func (s *s3SuiteCustom) TestWalkDirWithEmptyPrefix(c *C) { ctx, &WalkOption{SubDir: "sp", ListCount: 2}, func(path string, size int64) error { - comment := Commentf("index = %d", i) - c.Assert(path, Equals, *contents[i].Key, comment) - c.Assert(size, Equals, *contents[i].Size, comment) + require.Equal(t, *contents[i].Key, path, "index = %d", i) + require.Equal(t, *contents[i].Size, size, "index = %d", i) i++ return nil }, ) - c.Assert(err, IsNil) - c.Assert(i, Equals, 1) + require.NoError(t, err) + require.Equal(t, 1, i) } diff --git a/br/pkg/storage/writer_test.go b/br/pkg/storage/writer_test.go index 68e8beffe347d..bd99b218dde5a 100644 --- a/br/pkg/storage/writer_test.go +++ b/br/pkg/storage/writer_test.go @@ -9,38 +9,39 @@ import ( "os" "path/filepath" "strings" + "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -func (r *testStorageSuite) TestExternalFileWriter(c *C) { - dir := c.MkDir() +func TestExternalFileWriter(t *testing.T) { + dir := t.TempDir() type testcase struct { name string content []string } - testFn := func(test *testcase, c *C) { - c.Log(test.name) + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) backend, err := ParseBackend("local://"+filepath.ToSlash(dir), nil) - c.Assert(err, IsNil) + require.NoError(t, err) ctx := context.Background() storage, err := Create(ctx, backend, true) - c.Assert(err, IsNil) + require.NoError(t, err) fileName := strings.ReplaceAll(test.name, " ", "-") + ".txt" writer, err := storage.Create(ctx, fileName) - c.Assert(err, IsNil) + require.NoError(t, err) for _, str := range test.content { p := []byte(str) written, err2 := writer.Write(ctx, p) - c.Assert(err2, IsNil) - c.Assert(written, Equals, len(p)) + require.Nil(t, err2) + require.Equal(t, len(p), written) } err = writer.Close(ctx) - c.Assert(err, IsNil) + require.NoError(t, err) content, err := os.ReadFile(filepath.Join(dir, fileName)) - c.Assert(err, IsNil) - c.Assert(string(content), Equals, strings.Join(test.content, "")) + require.NoError(t, err) + require.Equal(t, strings.Join(test.content, ""), string(content)) } tests := []testcase{ { @@ -82,57 +83,57 @@ func (r *testStorageSuite) TestExternalFileWriter(c *C) { }, } for i := range tests { - testFn(&tests[i], c) + testFn(&tests[i], t) } } -func (r *testStorageSuite) TestCompressReaderWriter(c *C) { - dir := c.MkDir() +func TestCompressReaderWriter(t *testing.T) { + dir := t.TempDir() type testcase struct { name string content []string compressType CompressType } - testFn := func(test *testcase, c *C) { - c.Log(test.name) + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) backend, err := ParseBackend("local://"+filepath.ToSlash(dir), nil) - c.Assert(err, IsNil) + require.NoError(t, err) ctx := context.Background() storage, err := Create(ctx, backend, true) - c.Assert(err, IsNil) + require.NoError(t, err) storage = WithCompression(storage, Gzip) fileName := strings.ReplaceAll(test.name, " ", "-") + ".txt.gz" writer, err := storage.Create(ctx, fileName) - c.Assert(err, IsNil) + require.NoError(t, err) for _, str := range test.content { p := []byte(str) written, err2 := writer.Write(ctx, p) - c.Assert(err2, IsNil) - c.Assert(written, Equals, len(p)) + require.Nil(t, err2) + require.Equal(t, len(p), written) } err = writer.Close(ctx) - c.Assert(err, IsNil) + require.NoError(t, err) // make sure compressed file is written correctly file, err := os.Open(filepath.Join(dir, fileName)) - c.Assert(err, IsNil) + require.NoError(t, err) r, err := newCompressReader(test.compressType, file) - c.Assert(err, IsNil) + require.NoError(t, err) var bf bytes.Buffer _, err = bf.ReadFrom(r) - c.Assert(err, IsNil) - c.Assert(bf.String(), Equals, strings.Join(test.content, "")) - c.Assert(r.Close(), IsNil) + require.NoError(t, err) + require.Equal(t, strings.Join(test.content, ""), bf.String()) + require.Nil(t, r.Close()) // test withCompression Open r, err = storage.Open(ctx, fileName) - c.Assert(err, IsNil) + require.NoError(t, err) content, err := io.ReadAll(r) - c.Assert(err, IsNil) - c.Assert(string(content), Equals, strings.Join(test.content, "")) + require.NoError(t, err) + require.Equal(t, strings.Join(test.content, ""), string(content)) - c.Assert(file.Close(), IsNil) + require.Nil(t, file.Close()) } compressTypeArr := []CompressType{Gzip} tests := []testcase{ @@ -162,7 +163,7 @@ func (r *testStorageSuite) TestCompressReaderWriter(c *C) { for i := range tests { for _, compressType := range compressTypeArr { tests[i].compressType = compressType - testFn(&tests[i], c) + testFn(&tests[i], t) } } }