diff --git a/.changelog/27811.txt b/.changelog/27811.txt new file mode 100644 index 000000000000..efd85ca5d5c3 --- /dev/null +++ b/.changelog/27811.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_datasync_task: Add `object_tags` attribute to `options` configuration block +``` + +```release-note:bug +resource/aws_datasync_location_object_storage: Don't ignore `server_certificate` argument +``` \ No newline at end of file diff --git a/internal/service/datasync/location_efs.go b/internal/service/datasync/location_efs.go index a35de54d0c26..61decc9a9a26 100644 --- a/internal/service/datasync/location_efs.go +++ b/internal/service/datasync/location_efs.go @@ -168,7 +168,7 @@ func resourceLocationEFSRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DataSync Location EFS (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location EFS (%s): %s", d.Id(), err) diff --git a/internal/service/datasync/location_fsx_lustre_file_system.go b/internal/service/datasync/location_fsx_lustre_file_system.go index 5c3f6b65a39f..4ce0ec3f66bf 100644 --- a/internal/service/datasync/location_fsx_lustre_file_system.go +++ b/internal/service/datasync/location_fsx_lustre_file_system.go @@ -134,7 +134,7 @@ func resourceLocationFSxLustreFileSystemRead(ctx context.Context, d *schema.Reso return sdkdiag.AppendErrorf(diags, "reading DataSync Location Fsx Lustre (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location Fsx Lustre (%s): %s", d.Id(), err) diff --git a/internal/service/datasync/location_fsx_openzfs_file_system.go b/internal/service/datasync/location_fsx_openzfs_file_system.go index c66c44aa1a08..f3f0017ec05d 100644 --- a/internal/service/datasync/location_fsx_openzfs_file_system.go +++ b/internal/service/datasync/location_fsx_openzfs_file_system.go @@ -172,7 +172,7 @@ func resourceLocationFSxOpenZFSFileSystemRead(ctx context.Context, d *schema.Res return sdkdiag.AppendErrorf(diags, "reading DataSync Location Fsx OpenZfs (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location Fsx OpenZfs (%s): %s", d.Id(), err) diff --git a/internal/service/datasync/location_fsx_windows_file_system.go b/internal/service/datasync/location_fsx_windows_file_system.go index c7075dec0e62..87e0cc2ced10 100644 --- a/internal/service/datasync/location_fsx_windows_file_system.go +++ b/internal/service/datasync/location_fsx_windows_file_system.go @@ -162,7 +162,7 @@ func resourceLocationFSxWindowsFileSystemRead(ctx context.Context, d *schema.Res return sdkdiag.AppendErrorf(diags, "reading DataSync Location Fsx Windows (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location Fsx Windows (%s): %s", d.Id(), err) diff --git a/internal/service/datasync/location_hdfs.go b/internal/service/datasync/location_hdfs.go index 5ea6805a5498..55b1415325e9 100644 --- a/internal/service/datasync/location_hdfs.go +++ b/internal/service/datasync/location_hdfs.go @@ -224,7 +224,7 @@ func resourceLocationHDFSRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading DataSync Location HDFS (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location HDFS (%s): %s", d.Id(), err) diff --git a/internal/service/datasync/location_nfs.go b/internal/service/datasync/location_nfs.go index eb54aaeaf301..eb2aee2893f6 100644 --- a/internal/service/datasync/location_nfs.go +++ b/internal/service/datasync/location_nfs.go @@ -151,7 +151,7 @@ func resourceLocationNFSRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DataSync Location NFS (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location NFS (%s): %s", d.Id(), err) diff --git a/internal/service/datasync/location_object_storage.go b/internal/service/datasync/location_object_storage.go index f47e6103cc38..9d88e386b2da 100644 --- a/internal/service/datasync/location_object_storage.go +++ b/internal/service/datasync/location_object_storage.go @@ -132,7 +132,7 @@ func resourceLocationObjectStorageCreate(ctx context.Context, d *schema.Resource input.SecretKey = aws.String(v.(string)) } - if v, ok := d.GetOk("server_certficate"); ok { + if v, ok := d.GetOk("server_certificate"); ok { input.ServerCertificate = []byte(v.(string)) } @@ -163,31 +163,22 @@ func resourceLocationObjectStorageRead(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "reading DataSync Location Object Storage (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + uri := aws.StringValue(output.LocationUri) + hostname, bucketName, subdirectory, err := decodeObjectStorageURI(uri) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing DataSync Location Object Storage (%s) location URI: %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - d.Set("agent_arns", flex.FlattenStringSet(output.AgentArns)) - d.Set("arn", output.LocationArn) - d.Set("server_protocol", output.ServerProtocol) - d.Set("subdirectory", subdirectory) d.Set("access_key", output.AccessKey) - d.Set("server_port", output.ServerPort) + d.Set("agent_arns", aws.StringValueSlice(output.AgentArns)) + d.Set("arn", output.LocationArn) + d.Set("bucket_name", bucketName) d.Set("server_certificate", string(output.ServerCertificate)) - - uri := aws.StringValue(output.LocationUri) - - hostname, bucketName, err := decodeObjectStorageURI(uri) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing DataSync Location Object Storage (%s) object-storage URI: %s", d.Id(), err) - } - d.Set("server_hostname", hostname) - d.Set("bucket_name", bucketName) - + d.Set("server_port", output.ServerPort) + d.Set("server_protocol", output.ServerProtocol) + d.Set("subdirectory", subdirectory) d.Set("uri", uri) return diags @@ -197,19 +188,11 @@ func resourceLocationObjectStorageUpdate(ctx context.Context, d *schema.Resource var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DataSyncConn(ctx) - if d.HasChangesExcept("tags_all", "tags") { + if d.HasChangesExcept("tags", "tags_all") { input := &datasync.UpdateLocationObjectStorageInput{ LocationArn: aws.String(d.Id()), } - if d.HasChange("server_protocol") { - input.ServerProtocol = aws.String(d.Get("server_protocol").(string)) - } - - if d.HasChange("server_port") { - input.ServerPort = aws.Int64(int64(d.Get("server_port").(int))) - } - if d.HasChange("access_key") { input.AccessKey = aws.String(d.Get("access_key").(string)) } @@ -218,12 +201,20 @@ func resourceLocationObjectStorageUpdate(ctx context.Context, d *schema.Resource input.SecretKey = aws.String(d.Get("secret_key").(string)) } - if d.HasChange("subdirectory") { - input.Subdirectory = aws.String(d.Get("subdirectory").(string)) + if d.HasChange("server_certificate") { + input.ServerCertificate = []byte(d.Get("server_certificate").(string)) } - if d.HasChange("server_certficate") { - input.ServerCertificate = []byte(d.Get("server_certficate").(string)) + if d.HasChange("server_port") { + input.ServerPort = aws.Int64(int64(d.Get("server_port").(int))) + } + + if d.HasChange("server_protocol") { + input.ServerProtocol = aws.String(d.Get("server_protocol").(string)) + } + + if d.HasChange("subdirectory") { + input.Subdirectory = aws.String(d.Get("subdirectory").(string)) } _, err := conn.UpdateLocationObjectStorageWithContext(ctx, input) @@ -258,17 +249,17 @@ func resourceLocationObjectStorageDelete(ctx context.Context, d *schema.Resource return diags } -func decodeObjectStorageURI(uri string) (string, string, error) { +func decodeObjectStorageURI(uri string) (string, string, string, error) { prefix := "object-storage://" if !strings.HasPrefix(uri, prefix) { - return "", "", fmt.Errorf("incorrect uri format needs to start with %s", prefix) + return "", "", "", fmt.Errorf("incorrect uri format needs to start with %s", prefix) } trimmedUri := strings.TrimPrefix(uri, prefix) uriParts := strings.Split(trimmedUri, "/") if len(uri) < 2 { - return "", "", fmt.Errorf("incorrect uri format needs to start with %sSERVER-NAME/BUCKET-NAME/SUBDIRECTORY", prefix) + return "", "", "", fmt.Errorf("incorrect uri format needs to start with %sSERVER-NAME/BUCKET-NAME/SUBDIRECTORY", prefix) } - return uriParts[0], uriParts[1], nil + return uriParts[0], uriParts[1], "/" + strings.Join(uriParts[2:], "/"), nil } diff --git a/internal/service/datasync/location_object_storage_test.go b/internal/service/datasync/location_object_storage_test.go index 5c22284efed8..dc3ea896edce 100644 --- a/internal/service/datasync/location_object_storage_test.go +++ b/internal/service/datasync/location_object_storage_test.go @@ -2,12 +2,10 @@ package datasync_test import ( "context" - "errors" "fmt" "regexp" "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/datasync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -20,10 +18,10 @@ import ( func TestAccDataSyncLocationObjectStorage_basic(t *testing.T) { ctx := acctest.Context(t) - var locationObjectStorage1 datasync.DescribeLocationObjectStorageOutput - + var locationObjectStorage datasync.DescribeLocationObjectStorageOutput resourceName := "aws_datasync_location_object_storage.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, @@ -32,15 +30,21 @@ func TestAccDataSyncLocationObjectStorage_basic(t *testing.T) { CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLocationObjectStorageConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage1), - acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "datasync", regexp.MustCompile(`location/loc-.+`)), + Config: testAccLocationObjectStorageConfig_basic(rName, domain), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage), + resource.TestCheckResourceAttr(resourceName, "access_key", ""), resource.TestCheckResourceAttr(resourceName, "agent_arns.#", "1"), - resource.TestCheckResourceAttr(resourceName, "server_hostname", rName), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "datasync", regexp.MustCompile(`location/loc-.+`)), resource.TestCheckResourceAttr(resourceName, "bucket_name", rName), + resource.TestCheckNoResourceAttr(resourceName, "secret_key"), + resource.TestCheckResourceAttr(resourceName, "server_certificate", ""), + resource.TestCheckResourceAttr(resourceName, "server_hostname", domain), + resource.TestCheckResourceAttr(resourceName, "server_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "server_protocol", "HTTP"), + resource.TestCheckResourceAttr(resourceName, "subdirectory", "/"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestMatchResourceAttr(resourceName, "uri", regexp.MustCompile(`^object-storage://.+/`)), + resource.TestCheckResourceAttr(resourceName, "uri", fmt.Sprintf("object-storage://%s/%s/", domain, rName)), ), }, { @@ -54,9 +58,10 @@ func TestAccDataSyncLocationObjectStorage_basic(t *testing.T) { func TestAccDataSyncLocationObjectStorage_disappears(t *testing.T) { ctx := acctest.Context(t) - var locationObjectStorage1 datasync.DescribeLocationObjectStorageOutput + var locationObjectStorage datasync.DescribeLocationObjectStorageOutput resourceName := "aws_datasync_location_object_storage.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, @@ -65,10 +70,9 @@ func TestAccDataSyncLocationObjectStorage_disappears(t *testing.T) { CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLocationObjectStorageConfig_basic(rName), + Config: testAccLocationObjectStorageConfig_basic(rName, domain), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage1), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tfdatasync.ResourceLocationObjectStorage(), resourceName), + testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfdatasync.ResourceLocationObjectStorage(), resourceName), ), ExpectNonEmptyPlan: true, @@ -79,9 +83,10 @@ func TestAccDataSyncLocationObjectStorage_disappears(t *testing.T) { func TestAccDataSyncLocationObjectStorage_tags(t *testing.T) { ctx := acctest.Context(t) - var locationObjectStorage1, locationObjectStorage2, locationObjectStorage3 datasync.DescribeLocationObjectStorageOutput + var locationObjectStorage datasync.DescribeLocationObjectStorageOutput resourceName := "aws_datasync_location_object_storage.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, @@ -90,9 +95,9 @@ func TestAccDataSyncLocationObjectStorage_tags(t *testing.T) { CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLocationObjectStorageConfig_tags1(rName, "key1", "value1"), + Config: testAccLocationObjectStorageConfig_tags1(rName, domain, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage1), + testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -103,20 +108,18 @@ func TestAccDataSyncLocationObjectStorage_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccLocationObjectStorageConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Config: testAccLocationObjectStorageConfig_tags2(rName, domain, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage2), - testAccCheckLocationObjectStorageNotRecreated(&locationObjectStorage1, &locationObjectStorage2), + testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, { - Config: testAccLocationObjectStorageConfig_tags1(rName, "key1", "value1"), + Config: testAccLocationObjectStorageConfig_tags1(rName, domain, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage3), - testAccCheckLocationObjectStorageNotRecreated(&locationObjectStorage2, &locationObjectStorage3), + testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -125,6 +128,43 @@ func TestAccDataSyncLocationObjectStorage_tags(t *testing.T) { }) } +func TestAccDataSyncLocationObjectStorage_serverCertificate(t *testing.T) { + ctx := acctest.Context(t) + var locationObjectStorage datasync.DescribeLocationObjectStorageOutput + resourceName := "aws_datasync_location_object_storage.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + caKey := acctest.TLSRSAPrivateKeyPEM(t, 2048) + caCertificate := acctest.TLSRSAX509SelfSignedCACertificatePEM(t, caKey) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, datasync.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLocationObjectStorageDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLocationObjectStorageConfig_serverCertificate(rName, domain, caCertificate), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckLocationObjectStorageExists(ctx, resourceName, &locationObjectStorage), + resource.TestCheckResourceAttr(resourceName, "bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "server_certificate", caCertificate), + resource.TestCheckResourceAttr(resourceName, "server_hostname", domain), + resource.TestCheckResourceAttr(resourceName, "server_port", "443"), + resource.TestCheckResourceAttr(resourceName, "server_protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "subdirectory", "/test/"), + resource.TestCheckResourceAttr(resourceName, "uri", fmt.Sprintf("object-storage://%s/%s/test/", domain, rName)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckLocationObjectStorageDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) @@ -144,48 +184,35 @@ func testAccCheckLocationObjectStorageDestroy(ctx context.Context) resource.Test return err } - return fmt.Errorf("DataSync Task %s still exists", rs.Primary.ID) + return fmt.Errorf("DataSync Location Object Storage %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckLocationObjectStorageExists(ctx context.Context, resourceName string, locationObjectStorage *datasync.DescribeLocationObjectStorageOutput) resource.TestCheckFunc { +func testAccCheckLocationObjectStorageExists(ctx context.Context, n string, v *datasync.DescribeLocationObjectStorageOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).DataSyncConn(ctx) + output, err := tfdatasync.FindLocationObjectStorageByARN(ctx, conn, rs.Primary.ID) if err != nil { return err } - if output == nil { - return fmt.Errorf("Location %q does not exist", rs.Primary.ID) - } - - *locationObjectStorage = *output + *v = *output return nil } } -func testAccCheckLocationObjectStorageNotRecreated(i, j *datasync.DescribeLocationObjectStorageOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - if !aws.TimeValue(i.CreationTime).Equal(aws.TimeValue(j.CreationTime)) { - return errors.New("DataSync Location Object Storage was recreated") - } - - return nil - } -} - -func testAccLocationObjectStorageBaseConfig(rName string) string { +func testAccLocationObjectStorageConfig_base(rName string) string { return acctest.ConfigCompose( acctest.ConfigVPCWithSubnets(rName, 1), // Reference: https://docs.aws.amazon.com/datasync/latest/userguide/agent-requirements.html @@ -218,6 +245,7 @@ resource "aws_default_route_table" "test" { } resource "aws_security_group" "test" { + name = %[1]q vpc_id = aws_vpc.test.id egress { @@ -260,41 +288,56 @@ resource "aws_datasync_agent" "test" { `, rName)) } -func testAccLocationObjectStorageConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccLocationObjectStorageBaseConfig(rName), fmt.Sprintf(` +func testAccLocationObjectStorageConfig_basic(rName, domain string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_base(rName), fmt.Sprintf(` resource "aws_datasync_location_object_storage" "test" { agent_arns = [aws_datasync_agent.test.arn] - server_hostname = %[1]q + server_hostname = %[2]q bucket_name = %[1]q + server_protocol = "HTTP" + server_port = 8080 } -`, rName)) +`, rName, domain)) } -func testAccLocationObjectStorageConfig_tags1(rName, key1, value1 string) string { - return acctest.ConfigCompose(testAccLocationObjectStorageBaseConfig(rName), fmt.Sprintf(` +func testAccLocationObjectStorageConfig_tags1(rName, domain, key1, value1 string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_base(rName), fmt.Sprintf(` resource "aws_datasync_location_object_storage" "test" { agent_arns = [aws_datasync_agent.test.arn] - server_hostname = %[1]q + server_hostname = %[2]q bucket_name = %[1]q tags = { - %[2]q = %[3]q + %[3]q = %[4]q } } -`, rName, key1, value1)) +`, rName, domain, key1, value1)) } -func testAccLocationObjectStorageConfig_tags2(rName, key1, value1, key2, value2 string) string { - return acctest.ConfigCompose(testAccLocationObjectStorageBaseConfig(rName), fmt.Sprintf(` +func testAccLocationObjectStorageConfig_tags2(rName, domain, key1, value1, key2, value2 string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_base(rName), fmt.Sprintf(` resource "aws_datasync_location_object_storage" "test" { agent_arns = [aws_datasync_agent.test.arn] - server_hostname = %[1]q + server_hostname = %[2]q bucket_name = %[1]q tags = { - %[2]q = %[3]q - %[4]q = %[5]q + %[3]q = %[4]q + %[5]q = %[6]q } } -`, rName, key1, value1, key2, value2)) +`, rName, domain, key1, value1, key2, value2)) +} + +func testAccLocationObjectStorageConfig_serverCertificate(rName, domain, certificate string) string { + return acctest.ConfigCompose(testAccLocationObjectStorageConfig_base(rName), fmt.Sprintf(` +resource "aws_datasync_location_object_storage" "test" { + agent_arns = [aws_datasync_agent.test.arn] + server_hostname = %[2]q + bucket_name = %[1]q + subdirectory = "/test/" + + server_certificate = "%[3]s" +} +`, rName, domain, acctest.TLSPEMEscapeNewlines(certificate))) } diff --git a/internal/service/datasync/location_s3.go b/internal/service/datasync/location_s3.go index 388c29ad680b..911fa6f044cc 100644 --- a/internal/service/datasync/location_s3.go +++ b/internal/service/datasync/location_s3.go @@ -182,7 +182,7 @@ func resourceLocationS3Read(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "reading DataSync Location S3 (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location S3 (%s): %s", d.Id(), err) diff --git a/internal/service/datasync/location_smb.go b/internal/service/datasync/location_smb.go index b1accca48d13..3def2d573c60 100644 --- a/internal/service/datasync/location_smb.go +++ b/internal/service/datasync/location_smb.go @@ -159,7 +159,7 @@ func resourceLocationSMBRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DataSync Location SMB (%s): %s", d.Id(), err) } - subdirectory, err := SubdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) + subdirectory, err := subdirectoryFromLocationURI(aws.StringValue(output.LocationUri)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DataSync Location SMB (%s) tags: %s", d.Id(), err) diff --git a/internal/service/datasync/task.go b/internal/service/datasync/task.go index e241dbefa4cd..16be50c01018 100644 --- a/internal/service/datasync/task.go +++ b/internal/service/datasync/task.go @@ -130,6 +130,12 @@ func ResourceTask() *schema.Resource { Default: datasync.MtimePreserve, ValidateFunc: validation.StringInSlice(datasync.Mtime_Values(), false), }, + "object_tags": { + Type: schema.TypeString, + Optional: true, + Default: datasync.ObjectTagsPreserve, + ValidateFunc: validation.StringInSlice(datasync.ObjectTags_Values(), false), + }, "overwrite_mode": { Type: schema.TypeString, Optional: true, @@ -413,6 +419,7 @@ func flattenOptions(options *datasync.Options) []interface{} { "gid": aws.StringValue(options.Gid), "log_level": aws.StringValue(options.LogLevel), "mtime": aws.StringValue(options.Mtime), + "object_tags": aws.StringValue(options.ObjectTags), "overwrite_mode": aws.StringValue(options.OverwriteMode), "posix_permissions": aws.StringValue(options.PosixPermissions), "preserve_deleted_files": aws.StringValue(options.PreserveDeletedFiles), @@ -439,6 +446,7 @@ func expandOptions(l []interface{}) *datasync.Options { Gid: aws.String(m["gid"].(string)), LogLevel: aws.String(m["log_level"].(string)), Mtime: aws.String(m["mtime"].(string)), + ObjectTags: aws.String(m["object_tags"].(string)), OverwriteMode: aws.String(m["overwrite_mode"].(string)), PreserveDeletedFiles: aws.String(m["preserve_deleted_files"].(string)), PreserveDevices: aws.String(m["preserve_devices"].(string)), diff --git a/internal/service/datasync/task_test.go b/internal/service/datasync/task_test.go index 5ee888bfd13f..2f1ba1fd1f92 100644 --- a/internal/service/datasync/task_test.go +++ b/internal/service/datasync/task_test.go @@ -48,6 +48,7 @@ func TestAccDataSyncTask_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "options.0.gid", "INT_VALUE"), resource.TestCheckResourceAttr(resourceName, "options.0.log_level", "OFF"), resource.TestCheckResourceAttr(resourceName, "options.0.mtime", "PRESERVE"), + resource.TestCheckResourceAttr(resourceName, "options.0.object_tags", "PRESERVE"), resource.TestCheckResourceAttr(resourceName, "options.0.overwrite_mode", "ALWAYS"), resource.TestCheckResourceAttr(resourceName, "options.0.posix_permissions", "PRESERVE"), resource.TestCheckResourceAttr(resourceName, "options.0.preserve_deleted_files", "PRESERVE"), @@ -398,6 +399,44 @@ func TestAccDataSyncTask_DefaultSyncOptions_logLevel(t *testing.T) { }) } +func TestAccDataSyncTask_DefaultSyncOptions_objectTags(t *testing.T) { + ctx := acctest.Context(t) + var task1, task2 datasync.DescribeTaskOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_datasync_task.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, datasync.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTaskDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTaskConfig_defaultSyncOptionsObjectTags(rName, "NONE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckTaskExists(ctx, resourceName, &task1), + resource.TestCheckResourceAttr(resourceName, "options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "options.0.object_tags", "NONE"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTaskConfig_defaultSyncOptionsObjectTags(rName, "PRESERVE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckTaskExists(ctx, resourceName, &task2), + testAccCheckTaskNotRecreated(&task1, &task2), + resource.TestCheckResourceAttr(resourceName, "options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "options.0.object_tags", "PRESERVE"), + ), + }, + }, + }) +} + func TestAccDataSyncTask_DefaultSyncOptions_overwriteMode(t *testing.T) { ctx := acctest.Context(t) var task1, task2 datasync.DescribeTaskOutput @@ -1234,6 +1273,23 @@ resource "aws_datasync_task" "test" { `, rName, logLevel)) } +func testAccTaskConfig_defaultSyncOptionsObjectTags(rName, objectTags string) string { + return acctest.ConfigCompose( + testAccTaskConfig_baseLocationS3(rName), + testAccTaskConfig_baseLocationNFS(rName), + fmt.Sprintf(` +resource "aws_datasync_task" "test" { + destination_location_arn = aws_datasync_location_s3.test.arn + name = %[1]q + source_location_arn = aws_datasync_location_nfs.test.arn + + options { + object_tags = %[2]q + } +} +`, rName, objectTags)) +} + func testAccTaskConfig_defaultSyncOptionsOverwriteMode(rName, overwriteMode string) string { return acctest.ConfigCompose( testAccTaskConfig_baseLocationS3(rName), diff --git a/internal/service/datasync/uri.go b/internal/service/datasync/uri.go index 182a02f19758..fd8597e38d52 100644 --- a/internal/service/datasync/uri.go +++ b/internal/service/datasync/uri.go @@ -8,14 +8,14 @@ import ( ) var ( - locationURIPattern = regexp.MustCompile(`^(efs|hdfs|nfs|s3|smb|fsx[a-z0-9]+|object-storage)://(.+)$`) + locationURIPattern = regexp.MustCompile(`^(efs|hdfs|nfs|s3|smb|fsx[a-z0-9]+)://(.+)$`) locationURIGlobalIDAndSubdirPattern = regexp.MustCompile(`^([a-zA-Z0-9.\-]+)(?::\d{0,5})?(/.*)$`) s3OutpostsAccessPointARNResourcePattern = regexp.MustCompile(`^outpost/.*/accesspoint/.*?(/.*)$`) ) -// SubdirectoryFromLocationURI extracts the subdirectory from a location URI. +// subdirectoryFromLocationURI extracts the subdirectory from a location URI. // https://docs.aws.amazon.com/datasync/latest/userguide/API_LocationListEntry.html#DataSync-Type-LocationListEntry-LocationUri -func SubdirectoryFromLocationURI(uri string) (string, error) { +func subdirectoryFromLocationURI(uri string) (string, error) { submatches := locationURIPattern.FindStringSubmatch(uri) if len(submatches) != 3 { diff --git a/internal/service/datasync/uri_test.go b/internal/service/datasync/uri_test.go index 3ec60b20e80b..a755f538d89c 100644 --- a/internal/service/datasync/uri_test.go +++ b/internal/service/datasync/uri_test.go @@ -1,10 +1,6 @@ -package datasync_test +package datasync -import ( - "testing" - - tfdatasync "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" -) +import "testing" func TestSubdirectoryFromLocationURI(t *testing.T) { t.Parallel() @@ -156,9 +152,9 @@ func TestSubdirectoryFromLocationURI(t *testing.T) { ExpectedSubdirectory: "/my-folder-1/my-folder-2", }, { - TestName: "Object storage one level", - InputURI: "object-storage://tf-acc-test-5815577519131245007/tf-acc-test-5815577519131245008/", - ExpectedSubdirectory: "/tf-acc-test-5815577519131245008/", + TestName: "Object storage two levels", + InputURI: "object-storage://192.168.1.1/tf-acc-test-5815577519131245007/tf-acc-test-5815577519131245008/", + ExpectedError: true, }, } @@ -167,7 +163,7 @@ func TestSubdirectoryFromLocationURI(t *testing.T) { t.Run(testCase.TestName, func(t *testing.T) { t.Parallel() - got, err := tfdatasync.SubdirectoryFromLocationURI(testCase.InputURI) + got, err := subdirectoryFromLocationURI(testCase.InputURI) if err == nil && testCase.ExpectedError { t.Fatalf("expected error") @@ -183,3 +179,77 @@ func TestSubdirectoryFromLocationURI(t *testing.T) { }) } } + +func TestDecodeObjectStorageURI(t *testing.T) { + t.Parallel() + + testCases := []struct { + TestName string + InputURI string + ExpectedError bool + ExpectedHostname string + ExpectedBucketName string + ExpectedSubdirectory string + }{ + { + TestName: "empty URI", + InputURI: "", + ExpectedError: true, + }, + { + TestName: "S3 bucket URI top level", + InputURI: "s3://bucket/", + ExpectedError: true, + }, + { + TestName: "Object storage top level", + InputURI: "object-storage://tawn19fp.test/tf-acc-test-6405856757419817388/", + ExpectedHostname: "tawn19fp.test", + ExpectedBucketName: "tf-acc-test-6405856757419817388", + ExpectedSubdirectory: "/", + }, + { + TestName: "Object storage one level", + InputURI: "object-storage://tawn19fp.test/tf-acc-test-6405856757419817388/test", + ExpectedHostname: "tawn19fp.test", + ExpectedBucketName: "tf-acc-test-6405856757419817388", + ExpectedSubdirectory: "/test", + }, + { + TestName: "Object storage two levels", + InputURI: "object-storage://192.168.1.1/tf-acc-test-5815577519131245007/tf-acc-test-5815577519131245008/tf-acc-test-5815577519131245009/", + ExpectedHostname: "192.168.1.1", + ExpectedBucketName: "tf-acc-test-5815577519131245007", + ExpectedSubdirectory: "/tf-acc-test-5815577519131245008/tf-acc-test-5815577519131245009/", + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.TestName, func(t *testing.T) { + t.Parallel() + + gotHostname, gotBucketName, gotSubdirectory, err := decodeObjectStorageURI(testCase.InputURI) + + if err == nil && testCase.ExpectedError { + t.Fatalf("expected error") + } + + if err != nil && !testCase.ExpectedError { + t.Fatalf("unexpected error: %s", err) + } + + if gotHostname != testCase.ExpectedHostname { + t.Errorf("hostname %s, expected %s", gotHostname, testCase.ExpectedHostname) + } + + if gotBucketName != testCase.ExpectedBucketName { + t.Errorf("bucketName %s, expected %s", gotBucketName, testCase.ExpectedBucketName) + } + + if gotSubdirectory != testCase.ExpectedSubdirectory { + t.Errorf("subdirectory %s, expected %s", gotSubdirectory, testCase.ExpectedSubdirectory) + } + }) + } +} diff --git a/website/docs/r/datasync_location_object_storage.html.markdown b/website/docs/r/datasync_location_object_storage.html.markdown index a3feed1c265f..87a32961fea3 100644 --- a/website/docs/r/datasync_location_object_storage.html.markdown +++ b/website/docs/r/datasync_location_object_storage.html.markdown @@ -30,7 +30,7 @@ The following arguments are supported: * `access_key` - (Optional) The access key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `access_key` and `secret_key` to provide the user name and password, respectively. * `bucket_name` - (Required) The bucket on the self-managed object storage server that is used to read data from. * `secret_key` - (Optional) The secret key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use `access_key` and `secret_key` to provide the user name and password, respectively. -* `server_certificate` - (Optional) Specifies a certificate to authenticate with an object storage system that uses a private or self-signed certificate authority (CA). You must specify a Base64-encoded .pem file (for example, file:///home/user/.ssh/storage_sys_certificate.pem). The certificate can be up to 32768 bytes (before Base64 encoding). +* `server_certificate` - (Optional) Specifies a certificate to authenticate with an object storage system that uses a private or self-signed certificate authority (CA). You must specify a Base64-encoded .pem string. The certificate can be up to 32768 bytes (before Base64 encoding). * `server_hostname` - (Required) The name of the self-managed object storage server. This value is the IP address or Domain Name Service (DNS) name of the object storage server. An agent uses this host name to mount the object storage server in a network. * `server_protocol` - (Optional) The protocol that the object storage server uses to communicate. Valid values are `HTTP` or `HTTPS`. * `server_port` - (Optional) The port that your self-managed object storage server accepts inbound network traffic on. The server port is set by default to TCP 80 (`HTTP`) or TCP 443 (`HTTPS`). You can specify a custom port if your self-managed object storage server requires one. diff --git a/website/docs/r/datasync_task.html.markdown b/website/docs/r/datasync_task.html.markdown index fe4684244dd5..126b8c52691e 100644 --- a/website/docs/r/datasync_task.html.markdown +++ b/website/docs/r/datasync_task.html.markdown @@ -83,11 +83,12 @@ The following arguments are supported inside the `options` configuration block: * `gid` - (Optional) Group identifier of the file's owners. Valid values: `BOTH`, `INT_VALUE`, `NAME`, `NONE`. Default: `INT_VALUE` (preserve integer value of the ID). * `log_level` - (Optional) Determines the type of logs that DataSync publishes to a log stream in the Amazon CloudWatch log group that you provide. Valid values: `OFF`, `BASIC`, `TRANSFER`. Default: `OFF`. * `mtime` - (Optional) A file metadata that indicates the last time a file was modified (written to) before the sync `PREPARING` phase. Value values: `NONE`, `PRESERVE`. Default: `PRESERVE`. +* `object_tags` - (Optional) Specifies whether object tags are maintained when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE value. Valid values: `PRESERVE`, `NONE`. Default value: `PRESERVE`. * `overwrite_mode` - (Optional) Determines whether files at the destination should be overwritten or preserved when copying files. Valid values: `ALWAYS`, `NEVER`. Default: `ALWAYS`. * `posix_permissions` - (Optional) Determines which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file. Valid values: `NONE`, `PRESERVE`. Default: `PRESERVE`. * `preserve_deleted_files` - (Optional) Whether files deleted in the source should be removed or preserved in the destination file system. Valid values: `PRESERVE`, `REMOVE`. Default: `PRESERVE`. * `preserve_devices` - (Optional) Whether the DataSync Task should preserve the metadata of block and character devices in the source files system, and recreate the files with that device name and metadata on the destination. The DataSync Task can’t sync the actual contents of such devices, because many of the devices are non-terminal and don’t return an end of file (EOF) marker. Valid values: `NONE`, `PRESERVE`. Default: `NONE` (ignore special devices). -* `security_descriptor_copy_flags` - (Optional) Determines which components of the SMB security descriptor are copied from source to destination objects. This value is only used for transfers between SMB and Amazon FSx for Windows File Server locations, or between two Amazon FSx for Windows File Server locations. Valid values: `NONE`, `OWNER_DACL`, `OWNER_DACL_SACL`. +* `security_descriptor_copy_flags` - (Optional) Determines which components of the SMB security descriptor are copied from source to destination objects. This value is only used for transfers between SMB and Amazon FSx for Windows File Server locations, or between two Amazon FSx for Windows File Server locations. Valid values: `NONE`, `OWNER_DACL`, `OWNER_DACL_SACL`. Default: `OWNER_DACL`. * `task_queueing` - (Optional) Determines whether tasks should be queued before executing the tasks. Valid values: `ENABLED`, `DISABLED`. Default `ENABLED`. * `transfer_mode` - (Optional) Determines whether DataSync transfers only the data and metadata that differ between the source and the destination location, or whether DataSync transfers all the content from the source, without comparing to the destination location. Valid values: `CHANGED`, `ALL`. Default: `CHANGED` * `uid` - (Optional) User identifier of the file's owners. Valid values: `BOTH`, `INT_VALUE`, `NAME`, `NONE`. Default: `INT_VALUE` (preserve integer value of the ID).