> readToFileWithResponse(String filePath, Fi
.map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
}
+ /**
+ * Reads the entire file into a file specified by the path.
+ *
+ * By default the file will be created and must not exist, if the file already exists a
+ * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
+ * {@link OpenOption OpenOptions}
+ *
+ * Code Samples
+ *
+ *
+ *
+ * ReadToFileOptions options = new ReadToFileOptions();
+ * options.setFilePath(file);
+ * options.setRange(new FileRange(1024, 2048L));
+ * options.setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5));
+ * options.setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+ * StandardOpenOption.WRITE, StandardOpenOption.READ))); //Default options
+ * options.setParallelTransferOptions(new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB));
+ * options.setDataLakeRequestConditions(null);
+ * options.setRangeGetContentMd5(false);
+ *
+ * client.readToFileWithResponse(options)
+ * .subscribe(response -> System.out.println("Completed download to file"));
+ *
+ *
+ *
+ * For more information, see the
+ * Azure Docs
+ *
+ * @param options {@link ReadToFileOptions}
+ * @return A reactive response containing the file properties and metadata.
+ * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB.
+ * @throws UncheckedIOException If an I/O error occurs.
+ */
+ @ServiceMethod(returns = ReturnType.SINGLE)
+ public Mono> readToFileWithResponse(ReadToFileOptions options) {
+ Context context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), null);
+
+ return blockBlobAsyncClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(options.getFilePath())
+ .setRange(Transforms.toBlobRange(options.getRange())).setParallelTransferOptions(options.getParallelTransferOptions())
+ .setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
+ .setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
+ .setRetrieveContentRangeMd5(options.isRangeGetContentMd5()).setOpenOptions(options.getOpenOptions()))
+ .contextWrite(FluxUtil.toReactorContext(context))
+ .onErrorMap(DataLakeImplUtils::transformBlobStorageException)
+ .map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
+ }
+
/**
* Moves the file to another location within the file system.
* For more information see the
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java
index 604a54c7a374e..13db4f0742af3 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java
@@ -30,6 +30,7 @@
import com.azure.storage.common.implementation.StorageImplUtils;
import com.azure.storage.common.implementation.UploadUtils;
import com.azure.storage.file.datalake.implementation.models.InternalDataLakeFileOpenInputStreamResult;
+import com.azure.storage.file.datalake.implementation.util.BuilderHelper;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.implementation.util.ModelHelper;
import com.azure.storage.file.datalake.models.CustomerProvidedKey;
@@ -52,6 +53,7 @@
import com.azure.storage.file.datalake.options.FileParallelUploadOptions;
import com.azure.storage.file.datalake.options.FileQueryOptions;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
+import com.azure.storage.file.datalake.options.ReadToFileOptions;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@@ -1135,6 +1137,8 @@ public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStream
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
+ context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
+
BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
@@ -1212,6 +1216,33 @@ public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}
+ /**
+ * Reads the entire file into a file specified by the path.
+ *
+ * The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
+ * will be thrown.
+ *
+ * Code Samples
+ *
+ *
+ *
+ * client.readToFile(new ReadToFileOptions().setFilePath(file));
+ * System.out.println("Completed download to file");
+ *
+ *
+ *
+ * For more information, see the
+ * Azure Docs
+ *
+ * @param options {@link ReadToFileOptions}
+ * @return The file properties and metadata.
+ * @throws UncheckedIOException If an I/O error occurs
+ */
+ @ServiceMethod(returns = ReturnType.SINGLE)
+ public PathProperties readToFile(ReadToFileOptions options) {
+ return readToFileWithResponse(options, null, Context.NONE).getValue();
+ }
+
/**
* Reads the entire file into a file specified by the path.
*
@@ -1302,7 +1333,57 @@ public Response readToFileWithResponse(String filePath, FileRang
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
- return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue()));
+ return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
+ }, LOGGER);
+ }
+
+ /**
+ * Reads the entire file into a file specified by the path.
+ *
+ * By default the file will be created and must not exist, if the file already exists a
+ * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
+ * {@link OpenOption OpenOptions}
+ *
+ * Code Samples
+ *
+ *
+ *
+ * ReadToFileOptions options = new ReadToFileOptions();
+ * options.setFilePath(file);
+ * options.setRange(new FileRange(1024, 2048L));
+ * options.setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5));
+ * options.setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+ * StandardOpenOption.WRITE, StandardOpenOption.READ))); //Default options
+ * options.setParallelTransferOptions(new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB));
+ * options.setDataLakeRequestConditions(null);
+ * options.setRangeGetContentMd5(false);
+ *
+ * client.readToFileWithResponse(options, timeout, new Context(key2, value2));
+ * System.out.println("Completed download to file");
+ *
+ *
+ *
+ * @param options {@link ReadToFileOptions}
+ * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
+ * @param context Additional context that is passed through the Http pipeline during the service call.
+ * @return A response containing the file properties and metadata.
+ * @throws UncheckedIOException If an I/O error occurs.
+ */
+ @ServiceMethod(returns = ReturnType.SINGLE)
+ public Response readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
+ context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
+ Context finalContext = context;
+
+ return DataLakeImplUtils.returnOrConvertException(() -> {
+ Response response = blockBlobClient.downloadToFileWithResponse(
+ new BlobDownloadToFileOptions(options.getFilePath())
+ .setRange(Transforms.toBlobRange(options.getRange()))
+ .setParallelTransferOptions(options.getParallelTransferOptions())
+ .setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
+ .setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
+ .setRetrieveContentRangeMd5(options.isRangeGetContentMd5())
+ .setOpenOptions(options.getOpenOptions()), timeout, finalContext);
+ return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java
index 39907c8438148..140584cc2ae27 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java
@@ -38,6 +38,7 @@
import com.azure.storage.file.datalake.implementation.models.PathsSetAccessControlRecursiveHeaders;
import com.azure.storage.file.datalake.implementation.models.SetAccessControlRecursiveResponse;
import com.azure.storage.file.datalake.implementation.models.SourceModifiedAccessConditions;
+import com.azure.storage.file.datalake.implementation.util.BuilderHelper;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.implementation.util.DataLakeSasImplUtil;
import com.azure.storage.file.datalake.implementation.util.ModelHelper;
@@ -62,6 +63,7 @@
import com.azure.storage.file.datalake.models.UserDelegationKey;
import com.azure.storage.file.datalake.options.DataLakePathCreateOptions;
import com.azure.storage.file.datalake.options.DataLakePathDeleteOptions;
+import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
import com.azure.storage.file.datalake.options.PathRemoveAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathSetAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathUpdateAccessControlRecursiveOptions;
@@ -874,6 +876,31 @@ public Mono getProperties() {
return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono);
}
+ /**
+ * Returns the resource's metadata and properties.
+ *
+ * Code Samples
+ *
+ *
+ *
+ * PathGetPropertiesOptions options = new PathGetPropertiesOptions().setUserPrincipalName(true);
+ *
+ * client.getProperties(options).subscribe(response ->
+ * System.out.printf("Creation Time: %s, Size: %d%n", response.getCreationTime(), response.getFileSize()));
+ *
+ *
+ *
+ * For more information, see the
+ * Azure Docs
+ *
+ * @param options {@link PathGetPropertiesOptions}
+ * @return A reactive response containing the resource's properties and metadata.
+ */
+ @ServiceMethod(returns = ReturnType.SINGLE)
+ public Mono getProperties(PathGetPropertiesOptions options) {
+ return getPropertiesUsingOptionsWithResponse(options).flatMap(FluxUtil::toMono);
+ }
+
/**
* Returns the resource's metadata and properties.
*
@@ -902,6 +929,25 @@ public Mono> getPropertiesWithResponse(DataLakeRequestC
.map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
}
+ /**
+ * Returns the resource's metadata and properties.
+ *
+ * For more information, see the
+ * Azure Docs
+ *
+ * @param options {@link PathGetPropertiesOptions}
+ * @return A reactive response containing the resource's properties and metadata.
+ */
+ @ServiceMethod(returns = ReturnType.SINGLE)
+ private Mono> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options) {
+ Context context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), null);
+
+ return blockBlobAsyncClient.getPropertiesWithResponse(Transforms.toBlobRequestConditions(options.getRequestConditions()))
+ .contextWrite(FluxUtil.toReactorContext(context))
+ .onErrorMap(DataLakeImplUtils::transformBlobStorageException)
+ .map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
+ }
+
/**
* Determines if the path this client represents exists in the cloud.
* Note that this method does not guarantee that the path type (file/directory) matches expectations.
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java
index a8b3b7fac1292..5ed21113e680f 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java
@@ -19,6 +19,7 @@
import com.azure.storage.file.datalake.implementation.models.CpkInfo;
import com.azure.storage.file.datalake.implementation.models.PathSetAccessControlRecursiveMode;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
+import com.azure.storage.file.datalake.implementation.util.BuilderHelper;
import com.azure.storage.file.datalake.models.AccessControlChangeResult;
import com.azure.storage.file.datalake.models.CustomerProvidedKey;
import com.azure.storage.file.datalake.models.DataLakeAclChangeFailedException;
@@ -34,6 +35,7 @@
import com.azure.storage.file.datalake.models.UserDelegationKey;
import com.azure.storage.file.datalake.options.DataLakePathCreateOptions;
import com.azure.storage.file.datalake.options.DataLakePathDeleteOptions;
+import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
import com.azure.storage.file.datalake.options.PathRemoveAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathSetAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathUpdateAccessControlRecursiveOptions;
@@ -1147,6 +1149,31 @@ public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
+ /**
+ * Returns the resource's metadata and properties.
+ *
+ * Code Samples
+ *
+ *
+ *
+ * PathGetPropertiesOptions options = new PathGetPropertiesOptions().setUserPrincipalName(true);
+ *
+ * System.out.printf("Creation Time: %s, Size: %d%n", client.getProperties(options).getCreationTime(),
+ * client.getProperties(options).getFileSize());
+ *
+ *
+ *
+ * For more information, see the
+ * Azure Docs
+ *
+ * @param options {@link PathGetPropertiesOptions}
+ * @return The resource properties and metadata.
+ */
+ @ServiceMethod(returns = ReturnType.SINGLE)
+ public PathProperties getProperties(PathGetPropertiesOptions options) {
+ return getPropertiesUsingOptionsWithResponse(options, null, Context.NONE).getValue();
+ }
+
/**
* Returns the resource's metadata and properties.
*
@@ -1182,6 +1209,31 @@ public Response getPropertiesWithResponse(DataLakeRequestConditi
}, LOGGER);
}
+ /**
+ * Returns the resource's metadata and properties.
+ *
+ * For more information, see the
+ * Azure Docs
+ *
+ * @param options {@link PathGetPropertiesOptions}
+ * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
+ * @param context Additional context that is passed through the Http pipeline during the service call.
+ * @return A response containing the resource properties and metadata.
+ */
+ @ServiceMethod(returns = ReturnType.SINGLE)
+ private Response getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options, Duration timeout,
+ Context context) {
+ context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
+ Context finalContext = context;
+
+ PathGetPropertiesOptions finalOptions = options;
+ return DataLakeImplUtils.returnOrConvertException(() -> {
+ Response response = blockBlobClient.getPropertiesWithResponse(
+ Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
+ return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
+ }, LOGGER);
+ }
+
/**
* Gets if the path this client represents exists in the cloud.
* Note that this method does not guarantee that the path type (file/directory) matches expectations.
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceVersion.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceVersion.java
index c59c1b53cb64f..1965e090323d9 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceVersion.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceVersion.java
@@ -112,7 +112,12 @@ public enum DataLakeServiceVersion implements ServiceVersion {
/**
* Service version {@code 2024-02-04}.
*/
- V2024_02_04("2024-02-04");
+ V2024_02_04("2024-02-04"),
+
+ /**
+ * Service version {@code 2024-05-04}.
+ */
+ V2024_05_04("2024-05-04");
private final String version;
@@ -134,6 +139,6 @@ public String getVersion() {
* @return the latest {@link DataLakeServiceVersion}
*/
public static DataLakeServiceVersion getLatest() {
- return V2024_02_04;
+ return V2024_05_04;
}
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/Transforms.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/Transforms.java
index 1eeea285493ce..3a1df24ccc6a8 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/Transforms.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/Transforms.java
@@ -88,6 +88,7 @@
import com.azure.storage.file.datalake.models.LeaseStateType;
import com.azure.storage.file.datalake.models.LeaseStatusType;
import com.azure.storage.file.datalake.models.ListFileSystemsOptions;
+import com.azure.storage.file.datalake.models.PathAccessControlEntry;
import com.azure.storage.file.datalake.models.PathDeletedItem;
import com.azure.storage.file.datalake.models.PathHttpHeaders;
import com.azure.storage.file.datalake.models.PathItem;
@@ -126,6 +127,7 @@ class Transforms {
public static final HttpHeaderName X_MS_GROUP = HttpHeaderName.fromString("x-ms-group");
public static final HttpHeaderName X_MS_PERMISSIONS = HttpHeaderName.fromString("x-ms-permissions");
public static final HttpHeaderName X_MS_CONTINUATION = HttpHeaderName.fromString("x-ms-continuation");
+ public static final HttpHeaderName X_MS_ACL = HttpHeaderName.fromString("x-ms-acl");
static {
// https://docs.oracle.com/javase/8/docs/api/java/util/Date.html#getTime--
@@ -338,9 +340,10 @@ static PathProperties toPathProperties(BlobProperties properties, Response> r)
String owner = r.getHeaders().getValue(X_MS_OWNER);
String group = r.getHeaders().getValue(X_MS_GROUP);
String permissions = r.getHeaders().getValue(X_MS_PERMISSIONS);
+ String acl = r.getHeaders().getValue(X_MS_ACL);
return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties,
- properties.getEncryptionScope(), encryptionContext, owner, group, permissions);
+ properties.getEncryptionScope(), encryptionContext, owner, group, permissions, acl);
}
}
}
@@ -452,10 +455,11 @@ static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r
return null;
}
return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(),
- Transforms.toPathReadHeaders(r.getDeserializedHeaders(), r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT)));
+ Transforms.toPathReadHeaders(r.getDeserializedHeaders(), r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT),
+ r.getHeaders().getValue(X_MS_ACL)));
}
- private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) {
+ private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext, String acl) {
if (h == null) {
return null;
}
@@ -491,7 +495,8 @@ private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String e
.setContentCrc64(h.getContentCrc64())
.setErrorCode(h.getErrorCode())
.setCreationTime(h.getCreationTime())
- .setEncryptionContext(encryptionContext);
+ .setEncryptionContext(encryptionContext)
+ .setAccessControlList(PathAccessControlEntry.parseList(acl));
}
static List toBlobIdentifierList(List identifiers) {
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/AccessorUtility.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/AccessorUtility.java
index d199cd3d98849..a81b335d94a20 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/AccessorUtility.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/AccessorUtility.java
@@ -26,7 +26,7 @@ private AccessorUtility() {
*/
public interface PathPropertiesAccessor {
PathProperties setPathProperties(PathProperties properties, String encryptionScope, String encryptionContext,
- String owner, String group, String permissions);
+ String owner, String group, String permissions, String acl);
}
/**
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/BuilderHelper.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/BuilderHelper.java
index 5ce7459026ed7..47b064437f067 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/BuilderHelper.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/BuilderHelper.java
@@ -6,10 +6,12 @@
import com.azure.core.credential.AzureSasCredential;
import com.azure.core.credential.TokenCredential;
import com.azure.core.http.HttpClient;
+import com.azure.core.http.HttpHeaderName;
import com.azure.core.http.HttpHeaders;
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.HttpPipelineBuilder;
import com.azure.core.http.policy.AddDatePolicy;
+import com.azure.core.http.policy.AddHeadersFromContextPolicy;
import com.azure.core.http.policy.AddHeadersPolicy;
import com.azure.core.http.policy.AzureSasCredentialPolicy;
import com.azure.core.http.policy.BearerTokenAuthenticationPolicy;
@@ -22,6 +24,7 @@
import com.azure.core.http.policy.UserAgentPolicy;
import com.azure.core.util.ClientOptions;
import com.azure.core.util.Configuration;
+import com.azure.core.util.Context;
import com.azure.core.util.CoreUtils;
import com.azure.core.util.TracingOptions;
import com.azure.core.util.logging.ClientLogger;
@@ -45,6 +48,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import java.util.function.Supplier;
import static com.azure.storage.common.Utility.STORAGE_TRACING_NAMESPACE_VALUE;
@@ -56,6 +60,7 @@
public final class BuilderHelper {
private static final String CLIENT_NAME;
private static final String CLIENT_VERSION;
+ private static final HttpHeaderName X_MS_UPN = HttpHeaderName.fromString("x-ms-upn");
static {
Map properties = CoreUtils.getProperties("azure-storage-file-datalake.properties");
@@ -105,6 +110,8 @@ public static HttpPipeline buildPipeline(
policies.add(new AddDatePolicy());
+ policies.add(new AddHeadersFromContextPolicy());
+
// We need to place this policy right before the credential policy since headers may affect the string to sign
// of the request.
HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions);
@@ -233,4 +240,20 @@ private static Tracer createTracer(ClientOptions clientOptions) {
return TracerProvider.getDefaultProvider()
.createTracer(CLIENT_NAME, CLIENT_VERSION, STORAGE_TRACING_NAMESPACE_VALUE, tracingOptions);
}
+
+ public static Context addUpnHeader(Supplier upnHeaderValue, Context context) {
+ Boolean value = upnHeaderValue.get();
+ if (value == null) {
+ return context;
+ }
+
+ HttpHeaders headers = new HttpHeaders();
+ headers.set(X_MS_UPN, Boolean.toString(value));
+ if (context == null) {
+ return new Context(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
+ } else {
+ return context.addData(AddHeadersFromContextPolicy.AZURE_REQUEST_HTTP_HEADERS_KEY, headers);
+ }
+
+ }
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/TransformUtils.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/TransformUtils.java
index 1949fc9574c88..fca9b07818bfd 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/TransformUtils.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/implementation/util/TransformUtils.java
@@ -54,6 +54,8 @@ public static BlobServiceVersion toBlobServiceVersion(DataLakeServiceVersion ver
return BlobServiceVersion.V2023_11_03;
} else if (DataLakeServiceVersion.V2024_02_04.ordinal() == version.ordinal()) {
return BlobServiceVersion.V2024_02_04;
+ } else if (DataLakeServiceVersion.V2024_05_04.ordinal() == version.ordinal()) {
+ return BlobServiceVersion.V2024_05_04;
}
return null;
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/FileReadHeaders.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/FileReadHeaders.java
index c47e85503663a..7bd77ff3f959a 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/FileReadHeaders.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/FileReadHeaders.java
@@ -7,6 +7,7 @@
import com.azure.core.util.CoreUtils;
import com.azure.core.util.DateTimeRfc1123;
import java.time.OffsetDateTime;
+import java.util.List;
import java.util.Map;
/**
@@ -46,6 +47,7 @@ public final class FileReadHeaders {
private String errorCode;
private OffsetDateTime creationTime;
private String encryptionContext;
+ private List accessControlList;
/**
* Get the lastModified property: Returns the date and time the container
@@ -854,4 +856,24 @@ public FileReadHeaders setEncryptionContext(String encryptionContext) {
this.encryptionContext = encryptionContext;
return this;
}
+
+ /**
+ * Optional. The POSIX access control list for the file or directory.
+ *
+ * @return the access control list.
+ */
+ public List getAccessControlList() {
+ return accessControlList;
+ }
+
+ /**
+ * Optional. The POSIX access control list for the file or directory.
+ *
+ * @param accessControlList the access control list to be set on the file or directory.
+ * @return the FileReadHeaders object itself.
+ */
+ public FileReadHeaders setAccessControlList(List accessControlList) {
+ this.accessControlList = accessControlList;
+ return this;
+ }
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathAccessControlEntry.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathAccessControlEntry.java
index 1122374a49295..a4b83f6653a6f 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathAccessControlEntry.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathAccessControlEntry.java
@@ -186,6 +186,9 @@ public static String serializeList(List acl) {
* @return The ACL deserialized into a {@code java.util.List}
*/
public static List parseList(String str) {
+ if (str == null) {
+ return null;
+ }
String[] strs = str.split(",");
List acl = new ArrayList<>(strs.length);
for (String entry : strs) {
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathProperties.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathProperties.java
index f383ee2d49675..a3f57b8559936 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathProperties.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/models/PathProperties.java
@@ -8,6 +8,7 @@
import com.azure.storage.file.datalake.implementation.util.AccessorUtility;
import java.time.OffsetDateTime;
+import java.util.List;
import java.util.Map;
/**
@@ -47,15 +48,16 @@ public class PathProperties {
private String owner;
private String group;
private String permissions;
-
+ private List accessControlList;
static {
- AccessorUtility.setPathPropertiesAccessor((properties, encryptionScope, encryptionContext, owner, group, permissions) -> {
+ AccessorUtility.setPathPropertiesAccessor((properties, encryptionScope, encryptionContext, owner, group, permissions, AccessControlList) -> {
properties.encryptionScope = encryptionScope;
properties.encryptionContext = encryptionContext;
properties.owner = owner;
properties.group = group;
properties.permissions = permissions;
+ properties.accessControlList = PathAccessControlEntry.parseList(AccessControlList);
return properties;
});
@@ -426,4 +428,14 @@ public String getGroup() {
public String getPermissions() {
return permissions;
}
+
+ /**
+ * Optional. The POSIX access control list for the file or directory.
+ *
+ * @return the access control list.
+ */
+ public List getAccessControlList() {
+ return accessControlList;
+ }
+
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/DataLakeFileInputStreamOptions.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/DataLakeFileInputStreamOptions.java
index a6371cf58dbf4..fa69204126809 100644
--- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/DataLakeFileInputStreamOptions.java
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/DataLakeFileInputStreamOptions.java
@@ -18,6 +18,7 @@ public final class DataLakeFileInputStreamOptions {
private DataLakeRequestConditions requestConditions;
private Integer blockSize;
private ConsistentReadControl consistentReadControl;
+ private Boolean userPrincipalName;
/**
* @return {@link FileRange}
@@ -86,4 +87,20 @@ public DataLakeFileInputStreamOptions setConsistentReadControl(ConsistentReadCon
this.consistentReadControl = consistentReadControl;
return this;
}
+
+ /**
+ * @return The value for the x-ms-upn header.
+ */
+ public Boolean isUserPrincipalName() {
+ return userPrincipalName;
+ }
+
+ /**
+ * @param userPrincipalName The value for the x-ms-upn header.
+ * @return The updated options.
+ */
+ public DataLakeFileInputStreamOptions setUserPrincipalName(Boolean userPrincipalName) {
+ this.userPrincipalName = userPrincipalName;
+ return this;
+ }
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/PathGetPropertiesOptions.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/PathGetPropertiesOptions.java
new file mode 100644
index 0000000000000..ad253933faf96
--- /dev/null
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/PathGetPropertiesOptions.java
@@ -0,0 +1,47 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.file.datalake.options;
+
+import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
+import com.azure.storage.file.datalake.DataLakePathClient;
+
+/**
+ * Parameters when calling getProperties() on {@link DataLakePathClient}
+ */
+public class PathGetPropertiesOptions {
+ private DataLakeRequestConditions requestConditions;
+ private Boolean userPrincipalName;
+
+ /**
+ * @return {@link DataLakeRequestConditions}
+ */
+ public DataLakeRequestConditions getRequestConditions() {
+ return requestConditions;
+ }
+
+ /**
+ * @param requestConditions {@link DataLakeRequestConditions}
+ * @return The updated options.
+ */
+ public PathGetPropertiesOptions setRequestConditions(DataLakeRequestConditions requestConditions) {
+ this.requestConditions = requestConditions;
+ return this;
+ }
+
+ /**
+ * @return The value for the x-ms-upn header.
+ */
+ public Boolean isUserPrincipalName() {
+ return userPrincipalName;
+ }
+
+ /**
+ * @param userPrincipalName The value for the x-ms-upn header.
+ * @return The updated options.
+ */
+ public PathGetPropertiesOptions setUserPrincipalName(Boolean userPrincipalName) {
+ this.userPrincipalName = userPrincipalName;
+ return this;
+ }
+}
diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/ReadToFileOptions.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/ReadToFileOptions.java
new file mode 100644
index 0000000000000..04814b47f75a4
--- /dev/null
+++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/options/ReadToFileOptions.java
@@ -0,0 +1,160 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package com.azure.storage.file.datalake.options;
+
+import com.azure.storage.common.ParallelTransferOptions;
+import com.azure.storage.file.datalake.DataLakeFileClient;
+import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
+import com.azure.storage.file.datalake.models.DownloadRetryOptions;
+import com.azure.storage.file.datalake.models.FileRange;
+
+import java.io.UncheckedIOException;
+import java.nio.file.OpenOption;
+import java.util.Set;
+
+
+/**
+ * Parameters when calling readToFile() on {@link DataLakeFileClient}
+ */
+public class ReadToFileOptions {
+ private String filePath;
+ private FileRange range;
+ private ParallelTransferOptions parallelTransferOptions;
+ private DownloadRetryOptions downloadRetryOptions;
+ private DataLakeRequestConditions dataLakeRequestConditions;
+ private boolean rangeGetContentMd5;
+ private Set openOptions;
+ private Boolean userPrincipalName;
+
+ /**
+ * @return A {@link String} representing the filePath where the downloaded data will be written.
+ */
+ public String getFilePath() {
+ return filePath;
+ }
+
+ /**
+ * @param filePath A {@link String} representing the filePath where the downloaded data will be written.
+ * @return The updated options.
+ */
+ public ReadToFileOptions setFilePath(String filePath) {
+ this.filePath = filePath;
+ return this;
+ }
+
+ /**
+ * @return {@link FileRange}
+ */
+ public FileRange getRange() {
+ return range;
+ }
+
+ /**
+ * @param range {@link FileRange}
+ * @return The updated options.
+ */
+ public ReadToFileOptions setRange(FileRange range) {
+ this.range = range;
+ return this;
+ }
+
+ /**
+ * @return {@link ParallelTransferOptions} to use to download to file. Number of parallel
+ * transfers parameter is ignored.
+ */
+ public ParallelTransferOptions getParallelTransferOptions() {
+ return parallelTransferOptions;
+ }
+
+ /**
+ * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
+ * transfers parameter is ignored.
+ * @return The updated options.
+ */
+ public ReadToFileOptions setParallelTransferOptions(ParallelTransferOptions parallelTransferOptions) {
+ this.parallelTransferOptions = parallelTransferOptions;
+ return this;
+ }
+
+ /**
+ * @return {@link DownloadRetryOptions}
+ */
+ public DownloadRetryOptions getDownloadRetryOptions() {
+ return downloadRetryOptions;
+ }
+
+ /**
+ * @param downloadRetryOptions {@link DownloadRetryOptions}
+ * @return The updated options.
+ */
+ public ReadToFileOptions setDownloadRetryOptions(DownloadRetryOptions downloadRetryOptions) {
+ this.downloadRetryOptions = downloadRetryOptions;
+ return this;
+ }
+
+ /**
+ * @return requestConditions {@link DataLakeRequestConditions}
+ */
+ public DataLakeRequestConditions getDataLakeRequestConditions() {
+ return dataLakeRequestConditions;
+ }
+
+ /**
+ * @param dataLakeRequestConditions {@link DataLakeRequestConditions}
+ * @return The updated options.
+ * @throws UncheckedIOException If an I/O error occurs.
+ */
+ public ReadToFileOptions setDataLakeRequestConditions(DataLakeRequestConditions dataLakeRequestConditions) {
+ this.dataLakeRequestConditions = dataLakeRequestConditions;
+ return this;
+ }
+
+ /**
+ * @return Whether the contentMD5 for the specified file range should be returned.
+ */
+ public boolean isRangeGetContentMd5() {
+ return rangeGetContentMd5;
+ }
+
+ /**
+ * @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned.
+ * @return The updated options.
+ */
+ public ReadToFileOptions setRangeGetContentMd5(boolean rangeGetContentMd5) {
+ this.rangeGetContentMd5 = rangeGetContentMd5;
+ return this;
+ }
+
+ /**
+ * @return {@link OpenOption OpenOptions} to use to configure how to open or create the file.
+ */
+ public Set getOpenOptions() {
+ return openOptions;
+ }
+
+ /**
+ * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
+ * @return The updated options.
+ */
+ public ReadToFileOptions setOpenOptions(Set openOptions) {
+ this.openOptions = openOptions;
+ return this;
+ }
+
+ /**
+ * @return The value for the x-ms-upn header.
+ */
+ public Boolean isUserPrincipalName() {
+ return userPrincipalName;
+ }
+
+ /**
+ * @param userPrincipalName The value for the x-ms-upn header.
+ * @return The updated options.
+ */
+ public ReadToFileOptions setUserPrincipalName(Boolean userPrincipalName) {
+ this.userPrincipalName = userPrincipalName;
+ return this;
+ }
+}
diff --git a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileAsyncClientJavaDocSamples.java b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileAsyncClientJavaDocSamples.java
index bebf5f7cb99f8..af2498b856e65 100644
--- a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileAsyncClientJavaDocSamples.java
+++ b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileAsyncClientJavaDocSamples.java
@@ -5,6 +5,7 @@
import com.azure.core.util.BinaryData;
import com.azure.storage.common.ParallelTransferOptions;
+import com.azure.storage.common.implementation.Constants;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DownloadRetryOptions;
import com.azure.storage.file.datalake.models.FileQueryDelimitedSerialization;
@@ -20,6 +21,7 @@
import com.azure.storage.file.datalake.models.FileRange;
import com.azure.storage.file.datalake.models.PathHttpHeaders;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
+import com.azure.storage.file.datalake.options.ReadToFileOptions;
import reactor.core.publisher.Flux;
import java.io.ByteArrayOutputStream;
@@ -129,14 +131,22 @@ public void readCodeSnippets() {
}
/**
- * Code snippets for {@link DataLakeFileAsyncClient#readToFile(String)} and {@link DataLakeFileAsyncClient#readToFileWithResponse(String,
+ * Code snippets for {@link DataLakeFileAsyncClient#readToFile(String)}
+ * and {@link DataLakeFileAsyncClient#readToFile(ReadToFileOptions)}
+ * and {@link DataLakeFileAsyncClient#readToFileWithResponse(String,
* FileRange, ParallelTransferOptions, DownloadRetryOptions, DataLakeRequestConditions, boolean, Set)}
+ * and {@link DataLakeFileAsyncClient#readToFileWithResponse(ReadToFileOptions)}
*/
public void downloadToFileCodeSnippet() {
// BEGIN: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#String
client.readToFile(file).subscribe(response -> System.out.println("Completed download to file"));
// END: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#String
+ // BEGIN: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#ReadToFileOptions
+ client.readToFile(new ReadToFileOptions().setFilePath(file))
+ .subscribe(response -> System.out.println("Completed download to file"));
+ // END: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#ReadToFileOptions
+
// BEGIN: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#String-boolean
boolean overwrite = false; // Default value
client.readToFile(file, overwrite).subscribe(response -> System.out.println("Completed download to file"));
@@ -151,6 +161,21 @@ public void downloadToFileCodeSnippet() {
client.readToFileWithResponse(file, fileRange, null, downloadRetryOptions, null, false, openOptions)
.subscribe(response -> System.out.println("Completed download to file"));
// END: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse#String-FileRange-ParallelTransferOptions-DownloadRetryOptions-DataLakeRequestConditions-boolean-Set
+
+ // BEGIN: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse#ReadToFileOptions
+ ReadToFileOptions options = new ReadToFileOptions();
+ options.setFilePath(file);
+ options.setRange(new FileRange(1024, 2048L));
+ options.setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5));
+ options.setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+ StandardOpenOption.WRITE, StandardOpenOption.READ))); //Default options
+ options.setParallelTransferOptions(new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB));
+ options.setDataLakeRequestConditions(null);
+ options.setRangeGetContentMd5(false);
+
+ client.readToFileWithResponse(options)
+ .subscribe(response -> System.out.println("Completed download to file"));
+ // END: com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse#ReadToFileOptions
}
/**
diff --git a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileClientJavaDocSamples.java b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileClientJavaDocSamples.java
index b0d09bc7c6d8a..c79ec17584f71 100644
--- a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileClientJavaDocSamples.java
+++ b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/DataLakeFileClientJavaDocSamples.java
@@ -27,6 +27,7 @@
import com.azure.storage.file.datalake.models.PathHttpHeaders;
import com.azure.storage.file.datalake.models.PathInfo;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
+import com.azure.storage.file.datalake.options.ReadToFileOptions;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
@@ -129,8 +130,11 @@ public void readCodeSnippets() {
/**
* Code snippets for {@link DataLakeFileClient#readToFile(String)} and
+ * {@link DataLakeFileClient#readToFile(ReadToFileOptions)} and
+ * {@link DataLakeFileClient#readToFile(String, boolean)} and
* {@link DataLakeFileClient#readToFileWithResponse(String, FileRange, ParallelTransferOptions, DownloadRetryOptions, DataLakeRequestConditions,
- * boolean, Set, Duration, Context)}
+ * boolean, Set, Duration, Context)} and
+ * {@link DataLakeFileClient#readToFileWithResponse(ReadToFileOptions, Duration, Context)}
*/
public void downloadToFile() {
// BEGIN: com.azure.storage.file.datalake.DataLakeFileClient.readToFile#String
@@ -138,6 +142,11 @@ public void downloadToFile() {
System.out.println("Completed download to file");
// END: com.azure.storage.file.datalake.DataLakeFileClient.readToFile#String
+ // BEGIN: com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions
+ client.readToFile(new ReadToFileOptions().setFilePath(file));
+ System.out.println("Completed download to file");
+ // END: com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions
+
// BEGIN: com.azure.storage.file.datalake.DataLakeFileClient.readToFile#String-boolean
boolean overwrite = false; // Default value
client.readToFile(file, overwrite);
@@ -154,6 +163,23 @@ public void downloadToFile() {
downloadRetryOptions, null, false, openOptions, timeout, new Context(key2, value2));
System.out.println("Completed download to file");
// END: com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse#String-FileRange-ParallelTransferOptions-DownloadRetryOptions-DataLakeRequestConditions-boolean-Set-Duration-Context
+
+ // BEGIN: com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse#ReadToFileOptions-Duration-Context
+ ReadToFileOptions options = new ReadToFileOptions();
+ options.setFilePath(file);
+ options.setRange(new FileRange(1024, 2048L));
+ options.setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5));
+ options.setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+ StandardOpenOption.WRITE, StandardOpenOption.READ))); //Default options
+ options.setParallelTransferOptions(new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB));
+ options.setDataLakeRequestConditions(null);
+ options.setRangeGetContentMd5(false);
+
+ client.readToFileWithResponse(options, timeout, new Context(key2, value2));
+ System.out.println("Completed download to file");
+ // END: com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse#ReadToFileOptions-Duration-Context
+
+
}
/**
diff --git a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathAsyncClientJavaDocCodeSamples.java b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathAsyncClientJavaDocCodeSamples.java
index e985f27d25f32..74e99b89e3d0d 100644
--- a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathAsyncClientJavaDocCodeSamples.java
+++ b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathAsyncClientJavaDocCodeSamples.java
@@ -16,6 +16,7 @@
import com.azure.storage.file.datalake.models.UserDelegationKey;
import com.azure.storage.file.datalake.options.DataLakePathCreateOptions;
import com.azure.storage.file.datalake.options.DataLakePathDeleteOptions;
+import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
import com.azure.storage.file.datalake.options.PathRemoveAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathSetAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathUpdateAccessControlRecursiveOptions;
@@ -154,12 +155,22 @@ public void setHTTPHeadersWithResponseCodeSnippets() {
/**
* Code snippets for {@link DataLakePathAsyncClient#getProperties()}
+ * and {@link DataLakePathAsyncClient#getProperties(PathGetPropertiesOptions)}
*/
public void getPropertiesCodeSnippet() {
// BEGIN: com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties
client.getProperties().subscribe(response ->
System.out.printf("Creation Time: %s, Size: %d%n", response.getCreationTime(), response.getFileSize()));
// END: com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties
+
+ // BEGIN: com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties#PathGetPropertiesOptions
+ PathGetPropertiesOptions options = new PathGetPropertiesOptions().setUserPrincipalName(true);
+
+ client.getProperties(options).subscribe(response ->
+ System.out.printf("Creation Time: %s, Size: %d%n", response.getCreationTime(), response.getFileSize()));
+ // END: com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties#PathGetPropertiesOptions
+
+
}
/**
diff --git a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathClientJavaDocCodeSamples.java b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathClientJavaDocCodeSamples.java
index 1d069ce50f6c8..e38a6e3d67e5a 100644
--- a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathClientJavaDocCodeSamples.java
+++ b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/PathClientJavaDocCodeSamples.java
@@ -21,6 +21,7 @@
import com.azure.storage.file.datalake.models.UserDelegationKey;
import com.azure.storage.file.datalake.options.DataLakePathCreateOptions;
import com.azure.storage.file.datalake.options.DataLakePathDeleteOptions;
+import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
import com.azure.storage.file.datalake.options.PathRemoveAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathSetAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathUpdateAccessControlRecursiveOptions;
@@ -165,13 +166,21 @@ public void setHTTPHeadersWithResponseCodeSnippets() {
}
/**
- * Code snippets for {@link DataLakePathClient#getProperties()}
+ * Code snippets for {@link DataLakePathClient#getProperties()} and {@link DataLakePathClient#getProperties(PathGetPropertiesOptions)}
*/
public void getPropertiesCodeSnippet() {
// BEGIN: com.azure.storage.file.datalake.DataLakePathClient.getProperties
System.out.printf("Creation Time: %s, Size: %d%n", client.getProperties().getCreationTime(),
client.getProperties().getFileSize());
// END: com.azure.storage.file.datalake.DataLakePathClient.getProperties
+
+ // BEGIN: com.azure.storage.file.datalake.DataLakePathClient.getProperties#PathGetPropertiesOptions
+ PathGetPropertiesOptions options = new PathGetPropertiesOptions().setUserPrincipalName(true);
+
+ System.out.printf("Creation Time: %s, Size: %d%n", client.getProperties(options).getCreationTime(),
+ client.getProperties(options).getFileSize());
+ // END: com.azure.storage.file.datalake.DataLakePathClient.getProperties#PathGetPropertiesOptions
+
}
/**
diff --git a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryApiTests.java b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryApiTests.java
index 878641ee5c8ff..8c49534422a5b 100644
--- a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryApiTests.java
+++ b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryApiTests.java
@@ -2124,7 +2124,7 @@ public void renameWithResponse() {
Response resp = dc.renameWithResponse(null, generatePathName(), null, null, null, null);
DataLakeDirectoryClient renamedClient = resp.getValue();
- assertDoesNotThrow(renamedClient::getProperties);
+ assertDoesNotThrow(() -> renamedClient.getProperties());
assertThrows(DataLakeStorageException.class, () -> dc.getProperties());
}
@@ -2136,7 +2136,7 @@ public void renameFilesystemWithResponse() {
DataLakeDirectoryClient renamedClient = resp.getValue();
- assertDoesNotThrow(renamedClient::getProperties);
+ assertDoesNotThrow(() -> renamedClient.getProperties());
assertThrows(DataLakeStorageException.class, () -> dc.getProperties());
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryAsyncApiTests.java b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryAsyncApiTests.java
index 05bb699c55e87..94a9755168c85 100644
--- a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryAsyncApiTests.java
+++ b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/DirectoryAsyncApiTests.java
@@ -2423,7 +2423,7 @@ public void renameWithResponse() {
null))
.assertNext(r -> {
DataLakeDirectoryAsyncClient renamedClient = r.getValue();
- assertDoesNotThrow(renamedClient::getProperties);
+ assertDoesNotThrow(() -> renamedClient.getProperties());
})
.verifyComplete();
@@ -2439,7 +2439,7 @@ public void renameFilesystemWithResponse() {
StepVerifier.create(response)
.assertNext(r -> {
DataLakeDirectoryAsyncClient renamedClient = r.getValue();
- assertDoesNotThrow(renamedClient::getProperties);
+ assertDoesNotThrow(() -> renamedClient.getProperties());
})
.verifyComplete();
diff --git a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileApiTest.java b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileApiTest.java
index d8cba33277a04..f4b3b982ecad2 100644
--- a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileApiTest.java
+++ b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileApiTest.java
@@ -25,6 +25,7 @@
import com.azure.storage.file.datalake.models.AccessControlChangeResult;
import com.azure.storage.file.datalake.models.AccessTier;
import com.azure.storage.file.datalake.models.DataLakeAudience;
+import com.azure.storage.file.datalake.models.DataLakeFileOpenInputStreamResult;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DataLakeStorageException;
import com.azure.storage.file.datalake.models.DownloadRetryOptions;
@@ -55,12 +56,15 @@
import com.azure.storage.file.datalake.models.PathRemoveAccessControlEntry;
import com.azure.storage.file.datalake.models.RolePermissions;
import com.azure.storage.file.datalake.options.DataLakeFileAppendOptions;
+import com.azure.storage.file.datalake.options.DataLakeFileInputStreamOptions;
import com.azure.storage.file.datalake.options.DataLakePathCreateOptions;
import com.azure.storage.file.datalake.options.DataLakePathDeleteOptions;
import com.azure.storage.file.datalake.options.DataLakePathScheduleDeletionOptions;
import com.azure.storage.file.datalake.options.FileParallelUploadOptions;
import com.azure.storage.file.datalake.options.FileQueryOptions;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
+import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
+import com.azure.storage.file.datalake.options.ReadToFileOptions;
import com.azure.storage.file.datalake.sas.DataLakeServiceSasSignatureValues;
import com.azure.storage.file.datalake.sas.FileSystemSasPermission;
import com.azure.storage.file.datalake.specialized.DataLakeLeaseClient;
@@ -1800,7 +1804,7 @@ public void renameMin() {
public void renameWithResponse() {
DataLakeFileClient renamedClient = fc.renameWithResponse(null, generatePathName(), null, null, null, null).getValue();
- assertDoesNotThrow(renamedClient::getProperties);
+ assertDoesNotThrow(() -> renamedClient.getProperties());
assertThrows(DataLakeStorageException.class, fc::getProperties);
}
@@ -1811,7 +1815,7 @@ public void renameFilesystemWithResponse() {
DataLakeFileClient renamedClient = fc.renameWithResponse(newFileSystem.getFileSystemName(), generatePathName(),
null, null, null, null).getValue();
- assertDoesNotThrow(renamedClient::getProperties);
+ assertDoesNotThrow(() -> renamedClient.getProperties());
assertThrows(DataLakeStorageException.class, fc::getProperties);
}
@@ -1917,7 +1921,7 @@ public void renameSasToken() {
DataLakeFileClient destClient = client.rename(dataLakeFileSystemClient.getFileSystemName(), generatePathName());
- assertDoesNotThrow(destClient::getProperties);
+ assertDoesNotThrow(() -> destClient.getProperties());
}
@Test
@@ -1936,7 +1940,7 @@ public void renameSasTokenWithLeadingQuestionMark() {
DataLakeFileClient destClient = client.rename(dataLakeFileSystemClient.getFileSystemName(), generatePathName());
- assertDoesNotThrow(destClient::getProperties);
+ assertDoesNotThrow(() -> destClient.getProperties());
}
@Test
@@ -3403,4 +3407,99 @@ public void audienceFromString() {
assertTrue(aadFileClient.exists());
}
+
+ @RequiredServiceVersion(clazz = DataLakeServiceVersion.class, min = "2024-05-04")
+ @Test
+ public void aclHeaderTests() {
+ dataLakeFileSystemClient = primaryDataLakeServiceClient.getFileSystemClient(generateFileSystemName());
+ dataLakeFileSystemClient.create();
+ dataLakeFileSystemClient.getDirectoryClient(generatePathName()).create();
+ fc = dataLakeFileSystemClient.getFileClient(generatePathName());
+
+ DataLakePathCreateOptions options = new DataLakePathCreateOptions()
+ .setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES);
+ fc.createWithResponse(options, null, Context.NONE);
+
+ //getProperties
+ PathProperties getPropertiesResponse = fc.getProperties();
+ assertTrue(PATH_ACCESS_CONTROL_ENTRIES.containsAll(getPropertiesResponse.getAccessControlList()));
+
+ //readWithResponse
+ FileReadResponse readWithResponse = fc.readWithResponse(new ByteArrayOutputStream(), null,
+ null, null, false, null, Context.NONE);
+ assertTrue(PATH_ACCESS_CONTROL_ENTRIES.containsAll(readWithResponse.getDeserializedHeaders().getAccessControlList()));
+
+ //readToFileWithResponse
+ File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
+ outFile.deleteOnExit();
+ createdFiles.add(outFile);
+
+ if (outFile.exists()) {
+ assertTrue(outFile.delete());
+ }
+
+ Response readToFileResponse = fc.readToFileWithResponse(outFile.getPath(), null,
+ null, null, null, false, null, null,
+ null);
+ assertTrue(PATH_ACCESS_CONTROL_ENTRIES.containsAll(readToFileResponse.getValue().getAccessControlList()));
+ }
+
+ @RequiredServiceVersion(clazz = DataLakeServiceVersion.class, min = "2024-05-04")
+ @ParameterizedTest
+ @MethodSource("upnHeaderTestSupplier")
+ public void upnHeaderTest(Boolean upnHeader) {
+ //feature currently doesn't work in preprod - test uses methods that send the request header. verified in fiddler
+ //that the header is being sent and is properly assigned.
+ dataLakeFileSystemClient = primaryDataLakeServiceClient.getFileSystemClient(generateFileSystemName());
+ dataLakeFileSystemClient.create();
+ dataLakeFileSystemClient.getDirectoryClient(generatePathName()).create();
+ fc = dataLakeFileSystemClient.getFileClient(generatePathName());
+
+ DataLakePathCreateOptions options = new DataLakePathCreateOptions()
+ .setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES);
+ fc.createWithResponse(options, null, Context.NONE);
+
+ //getProperties
+ PathGetPropertiesOptions propertiesOptions = new PathGetPropertiesOptions().setUserPrincipalName(upnHeader);
+
+ PathProperties getPropertiesResponse = fc.getProperties(propertiesOptions);
+ assertNotNull(getPropertiesResponse.getAccessControlList());
+
+ //readToFile
+ File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
+ outFile.deleteOnExit();
+ createdFiles.add(outFile);
+
+ if (outFile.exists()) {
+ assertTrue(outFile.delete());
+ }
+ ReadToFileOptions readToFileOptions = new ReadToFileOptions();
+ readToFileOptions.setUserPrincipalName(upnHeader).setFilePath(outFile.getPath()).setRange(null)
+ .setParallelTransferOptions(null).setDownloadRetryOptions(null).setDataLakeRequestConditions(null)
+ .setRangeGetContentMd5(false).setOpenOptions(null);
+
+ PathProperties readToFileResponse = fc.readToFile(readToFileOptions);
+ assertNotNull(readToFileResponse.getAccessControlList());
+
+ if (outFile.exists()) {
+ assertTrue(outFile.delete());
+ }
+ Response readToFileWithResponse = fc.readToFileWithResponse(readToFileOptions, null, null);
+ assertNotNull(readToFileWithResponse.getValue().getAccessControlList());
+
+ //openInputStream
+ DataLakeFileInputStreamOptions openInputStreamOptions = new DataLakeFileInputStreamOptions().setUserPrincipalName(upnHeader);
+
+ DataLakeFileOpenInputStreamResult openInputStreamResponse = fc.openInputStream(openInputStreamOptions);
+ //no way to pull acl from properties in openInputStream
+ //assertNotNull(openInputStreamResponse.getProperties().getAccessControlList());
+
+ }
+
+ private static Stream upnHeaderTestSupplier() {
+ return Stream.of(
+ Arguments.of(true),
+ Arguments.of(true),
+ Arguments.of((Boolean) null));
+ }
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileAsyncApiTests.java b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileAsyncApiTests.java
index 6e13cb172eda4..510878bf9e400 100644
--- a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileAsyncApiTests.java
+++ b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/FileAsyncApiTests.java
@@ -62,6 +62,8 @@
import com.azure.storage.file.datalake.options.FileParallelUploadOptions;
import com.azure.storage.file.datalake.options.FileQueryOptions;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
+import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
+import com.azure.storage.file.datalake.options.ReadToFileOptions;
import com.azure.storage.file.datalake.sas.DataLakeServiceSasSignatureValues;
import com.azure.storage.file.datalake.sas.FileSystemSasPermission;
import com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient;
@@ -4623,6 +4625,98 @@ public void audienceFromString() {
.verifyComplete();
}
+ @RequiredServiceVersion(clazz = DataLakeServiceVersion.class, min = "2024-05-04")
+ @Test
+ public void aclHeaderTests() {
+ dataLakeFileSystemClient = primaryDataLakeServiceClient.getFileSystemClient(generateFileSystemName());
+ dataLakeFileSystemClient.create();
+ dataLakeFileSystemClient.getDirectoryClient(generatePathName()).create();
+ fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
+
+ DataLakePathCreateOptions options = new DataLakePathCreateOptions()
+ .setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES);
+
+ //getProperties
+ StepVerifier.create(fc.createWithResponse(options).then(fc.getProperties()))
+ .assertNext(r -> assertTrue(PATH_ACCESS_CONTROL_ENTRIES.containsAll(r.getAccessControlList())))
+ .verifyComplete();
+
+ //readWithResponse
+ StepVerifier.create(fc.readWithResponse(null, null, null, false))
+ .assertNext(r -> assertTrue(PATH_ACCESS_CONTROL_ENTRIES.containsAll(r.getDeserializedHeaders().getAccessControlList())))
+ .verifyComplete();
+
+ //readToFileWithResponse
+ File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
+ outFile.deleteOnExit();
+ createdFiles.add(outFile);
+
+ if (outFile.exists()) {
+ assertTrue(outFile.delete());
+ }
+
+ StepVerifier.create(fc.readToFileWithResponse(outFile.getPath(), null,
+ null, null, null, false, null))
+ .assertNext(r -> assertTrue(PATH_ACCESS_CONTROL_ENTRIES.containsAll(r.getValue().getAccessControlList())))
+ .verifyComplete();
+
+ }
+
+ @RequiredServiceVersion(clazz = DataLakeServiceVersion.class, min = "2024-05-04")
+ @ParameterizedTest
+ @MethodSource("upnHeaderTestSupplier")
+ public void upnHeaderTest(Boolean upnHeader) {
+ //feature currently doesn't work in preprod - test uses methods that send the request header. verified in fiddler
+ //that the header is being sent and is properly assigned.
+ dataLakeFileSystemAsyncClient = primaryDataLakeServiceAsyncClient.getFileSystemAsyncClient(generateFileSystemName());
+ dataLakeFileSystemAsyncClient.create().block();
+ dataLakeFileSystemAsyncClient.getDirectoryAsyncClient(generatePathName()).create().block();
+ fc = dataLakeFileSystemAsyncClient.getFileAsyncClient(generatePathName());
+
+ DataLakePathCreateOptions options = new DataLakePathCreateOptions()
+ .setAccessControlList(PATH_ACCESS_CONTROL_ENTRIES);
+ fc.createWithResponse(options).block();
+
+ //getProperties
+ PathGetPropertiesOptions propertiesOptions = new PathGetPropertiesOptions().setUserPrincipalName(upnHeader);
+
+ StepVerifier.create(fc.getProperties(propertiesOptions))
+ .assertNext(r -> assertNotNull(r.getAccessControlList()))
+ .verifyComplete();
+
+ //readToFile
+ File outFile = new File(testResourceNamer.randomName("", 60) + ".txt");
+ outFile.deleteOnExit();
+ createdFiles.add(outFile);
+
+ if (outFile.exists()) {
+ assertTrue(outFile.delete());
+ }
+ ReadToFileOptions readToFileOptions = new ReadToFileOptions();
+ readToFileOptions.setUserPrincipalName(upnHeader).setFilePath(outFile.getPath()).setRange(null)
+ .setParallelTransferOptions(null).setDownloadRetryOptions(null).setDataLakeRequestConditions(null)
+ .setRangeGetContentMd5(false).setOpenOptions(null);
+
+ StepVerifier.create(fc.readToFile(readToFileOptions))
+ .assertNext(r -> assertNotNull(r.getAccessControlList()))
+ .verifyComplete();
+
+ if (outFile.exists()) {
+ assertTrue(outFile.delete());
+ }
+
+ StepVerifier.create(fc.readToFileWithResponse(readToFileOptions))
+ .assertNext(r -> assertNotNull(r.getValue().getAccessControlList()))
+ .verifyComplete();
+ }
+
+ private static Stream upnHeaderTestSupplier() {
+ return Stream.of(
+ Arguments.of(false),
+ Arguments.of(true),
+ Arguments.of((Boolean) null));
+ }
+
}
diff --git a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/SasTests.java b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/SasTests.java
index c6f23cac5a8f3..8d774b0e5a2de 100644
--- a/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/SasTests.java
+++ b/sdk/storage/azure-storage-file-datalake/src/test/java/com/azure/storage/file/datalake/SasTests.java
@@ -139,7 +139,7 @@ public void directorySasPermission() {
DataLakeDirectoryClient client = getDirectoryClient(sas, getFileSystemUrl(), pathName);
- PathProperties properties = assertDoesNotThrow(client::getProperties);
+ PathProperties properties = assertDoesNotThrow(() -> client.getProperties());
validateSasProperties(properties);
assertDoesNotThrow(() -> client.createSubdirectory(generatePathName()));
@@ -257,7 +257,7 @@ public void directoryUserDelegation() {
DataLakeDirectoryClient client = getDirectoryClient(sas, getFileSystemUrl(), pathName);
- PathProperties properties = assertDoesNotThrow(client::getProperties);
+ PathProperties properties = assertDoesNotThrow(() -> client.getProperties());
validateSasProperties(properties);
assertDoesNotThrow(() -> client.createSubdirectory(generatePathName()));
diff --git a/sdk/storage/azure-storage-file-share/assets.json b/sdk/storage/azure-storage-file-share/assets.json
index fe478573e605e..f13e44c89d35a 100644
--- a/sdk/storage/azure-storage-file-share/assets.json
+++ b/sdk/storage/azure-storage-file-share/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "java",
"TagPrefix": "java/storage/azure-storage-file-share",
- "Tag": "java/storage/azure-storage-file-share_f461f13849"
+ "Tag": "java/storage/azure-storage-file-share_dfe3610108"
}
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java
index 57fe1707834d3..07e7f6defd6b5 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java
@@ -2722,7 +2722,7 @@ public Mono> listRangesDiffWithResponse(ShareFileLi
try {
StorageImplUtils.assertNotNull("options", options);
return listRangesWithResponse(options.getRange(), options.getRequestConditions(),
- options.getPreviousSnapshot(), Context.NONE);
+ options.getPreviousSnapshot(), options.isRenameIncluded(), Context.NONE);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
@@ -2734,7 +2734,7 @@ PagedFlux listRangesWithOptionalTimeout(ShareFileRange range,
Function>> retriever =
marker -> StorageImplUtils.applyOptionalTimeout(
- this.listRangesWithResponse(range, requestConditions, null, context), timeout)
+ this.listRangesWithResponse(range, requestConditions, null, null, context), timeout)
.map(response -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
@@ -2748,7 +2748,7 @@ PagedFlux listRangesWithOptionalTimeout(ShareFileRange range,
}
Mono> listRangesWithResponse(ShareFileRange range,
- ShareRequestConditions requestConditions, String previousSnapshot, Context context) {
+ ShareRequestConditions requestConditions, String previousSnapshot, Boolean supportRename, Context context) {
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
@@ -2756,7 +2756,7 @@ Mono> listRangesWithResponse(ShareFileRange range,
context = context == null ? Context.NONE : context;
return this.azureFileStorageClient.getFiles().getRangeListWithResponseAsync(shareName, filePath, snapshot,
- previousSnapshot, null, rangeString, finalRequestConditions.getLeaseId(), context)
+ previousSnapshot, null, rangeString, finalRequestConditions.getLeaseId(), supportRename, context)
.map(response -> new SimpleResponse<>(response, response.getValue()));
}
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java
index 0f33a54f486f2..1da3ca832c26f 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java
@@ -2159,7 +2159,7 @@ public Response listRangesDiffWithResponse(ShareFileListRang
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("options", options);
Mono> response = shareFileAsyncClient.listRangesWithResponse(options.getRange(),
- options.getRequestConditions(), options.getPreviousSnapshot(), context);
+ options.getRequestConditions(), options.getPreviousSnapshot(), options.isRenameIncluded(), context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceVersion.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceVersion.java
index 719bccf5836ab..23f6261bc44fb 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceVersion.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceVersion.java
@@ -113,7 +113,12 @@ public enum ShareServiceVersion implements ServiceVersion {
/**
* Service version {@code 2024-02-04}.
*/
- V2024_02_04("2024-02-04");
+ V2024_02_04("2024-02-04"),
+
+ /**
+ * Service version {@code 2024-05-04}.
+ */
+ V2024_05_04("2024-05-04");
private final String version;
@@ -135,6 +140,6 @@ public String getVersion() {
* @return the latest {@link ShareServiceVersion}
*/
public static ShareServiceVersion getLatest() {
- return V2024_02_04;
+ return V2024_05_04;
}
}
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/FilesImpl.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/FilesImpl.java
index 8271aa70b8d74..9a4a9a057626c 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/FilesImpl.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/FilesImpl.java
@@ -641,6 +641,7 @@ Mono> getRangeList(
@HeaderParam("x-ms-lease-id") String leaseId,
@HeaderParam("x-ms-allow-trailing-dot") Boolean allowTrailingDot,
@HeaderParam("x-ms-file-request-intent") ShareTokenIntent fileRequestIntent,
+ @HeaderParam("x-ms-file-support-rename") Boolean supportRename,
@HeaderParam("Accept") String accept,
Context context);
@@ -660,6 +661,7 @@ Mono> getRangeListNoCustomHeaders(
@HeaderParam("x-ms-lease-id") String leaseId,
@HeaderParam("x-ms-allow-trailing-dot") Boolean allowTrailingDot,
@HeaderParam("x-ms-file-request-intent") ShareTokenIntent fileRequestIntent,
+ @HeaderParam("x-ms-file-support-rename") Boolean supportRename,
@HeaderParam("Accept") String accept,
Context context);
@@ -4974,6 +4976,11 @@ public Mono> uploadRangeFromURLNoCustomHeadersWithResponseAsync(
* Timeouts for File Service Operations.</a>.
* @param range Specifies the range of bytes over which to list ranges, inclusively.
* @param leaseId If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ * @param supportRename This header is allowed only when PrevShareSnapshot query parameter is set. Determines
+ * whether the changed ranges for a file that has been renamed or moved between the target snapshot (or the live
+ * file) and the previous snapshot should be listed. If the value is true, the valid changed ranges for the file
+ * will be returned. If the value is false, the operation will result in a failure with 409 (Conflict) response.
+ * The default value is false.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ShareStorageException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
@@ -4987,7 +4994,8 @@ public Mono> getRange
String prevsharesnapshot,
Integer timeout,
String range,
- String leaseId) {
+ String leaseId,
+ Boolean supportRename) {
final String comp = "rangelist";
final String accept = "application/xml";
return FluxUtil.withContext(
@@ -5005,6 +5013,7 @@ public Mono> getRange
leaseId,
this.client.isAllowTrailingDot(),
this.client.getFileRequestIntent(),
+ supportRename,
accept,
context));
}
@@ -5023,6 +5032,11 @@ public Mono> getRange
* Timeouts for File Service Operations.</a>.
* @param range Specifies the range of bytes over which to list ranges, inclusively.
* @param leaseId If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ * @param supportRename This header is allowed only when PrevShareSnapshot query parameter is set. Determines
+ * whether the changed ranges for a file that has been renamed or moved between the target snapshot (or the live
+ * file) and the previous snapshot should be listed. If the value is true, the valid changed ranges for the file
+ * will be returned. If the value is false, the operation will result in a failure with 409 (Conflict) response.
+ * The default value is false.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ShareStorageException thrown if the request is rejected by server.
@@ -5038,6 +5052,7 @@ public Mono> getRange
Integer timeout,
String range,
String leaseId,
+ Boolean supportRename,
Context context) {
final String comp = "rangelist";
final String accept = "application/xml";
@@ -5054,6 +5069,7 @@ public Mono> getRange
leaseId,
this.client.isAllowTrailingDot(),
this.client.getFileRequestIntent(),
+ supportRename,
accept,
context);
}
@@ -5072,6 +5088,11 @@ public Mono> getRange
* Timeouts for File Service Operations.</a>.
* @param range Specifies the range of bytes over which to list ranges, inclusively.
* @param leaseId If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ * @param supportRename This header is allowed only when PrevShareSnapshot query parameter is set. Determines
+ * whether the changed ranges for a file that has been renamed or moved between the target snapshot (or the live
+ * file) and the previous snapshot should be listed. If the value is true, the valid changed ranges for the file
+ * will be returned. If the value is false, the operation will result in a failure with 409 (Conflict) response.
+ * The default value is false.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ShareStorageException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
@@ -5085,9 +5106,10 @@ public Mono getRangeListAsync(
String prevsharesnapshot,
Integer timeout,
String range,
- String leaseId) {
+ String leaseId,
+ Boolean supportRename) {
return getRangeListWithResponseAsync(
- shareName, fileName, sharesnapshot, prevsharesnapshot, timeout, range, leaseId)
+ shareName, fileName, sharesnapshot, prevsharesnapshot, timeout, range, leaseId, supportRename)
.flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
@@ -5105,6 +5127,11 @@ public Mono getRangeListAsync(
* Timeouts for File Service Operations.</a>.
* @param range Specifies the range of bytes over which to list ranges, inclusively.
* @param leaseId If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ * @param supportRename This header is allowed only when PrevShareSnapshot query parameter is set. Determines
+ * whether the changed ranges for a file that has been renamed or moved between the target snapshot (or the live
+ * file) and the previous snapshot should be listed. If the value is true, the valid changed ranges for the file
+ * will be returned. If the value is false, the operation will result in a failure with 409 (Conflict) response.
+ * The default value is false.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ShareStorageException thrown if the request is rejected by server.
@@ -5120,9 +5147,18 @@ public Mono getRangeListAsync(
Integer timeout,
String range,
String leaseId,
+ Boolean supportRename,
Context context) {
return getRangeListWithResponseAsync(
- shareName, fileName, sharesnapshot, prevsharesnapshot, timeout, range, leaseId, context)
+ shareName,
+ fileName,
+ sharesnapshot,
+ prevsharesnapshot,
+ timeout,
+ range,
+ leaseId,
+ supportRename,
+ context)
.flatMap(res -> Mono.justOrEmpty(res.getValue()));
}
@@ -5140,6 +5176,11 @@ public Mono getRangeListAsync(
* Timeouts for File Service Operations.</a>.
* @param range Specifies the range of bytes over which to list ranges, inclusively.
* @param leaseId If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ * @param supportRename This header is allowed only when PrevShareSnapshot query parameter is set. Determines
+ * whether the changed ranges for a file that has been renamed or moved between the target snapshot (or the live
+ * file) and the previous snapshot should be listed. If the value is true, the valid changed ranges for the file
+ * will be returned. If the value is false, the operation will result in a failure with 409 (Conflict) response.
+ * The default value is false.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ShareStorageException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
@@ -5153,7 +5194,8 @@ public Mono> getRangeListNoCustomHeadersWithRespons
String prevsharesnapshot,
Integer timeout,
String range,
- String leaseId) {
+ String leaseId,
+ Boolean supportRename) {
final String comp = "rangelist";
final String accept = "application/xml";
return FluxUtil.withContext(
@@ -5171,6 +5213,7 @@ public Mono> getRangeListNoCustomHeadersWithRespons
leaseId,
this.client.isAllowTrailingDot(),
this.client.getFileRequestIntent(),
+ supportRename,
accept,
context));
}
@@ -5189,6 +5232,11 @@ public Mono> getRangeListNoCustomHeadersWithRespons
* Timeouts for File Service Operations.</a>.
* @param range Specifies the range of bytes over which to list ranges, inclusively.
* @param leaseId If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ * @param supportRename This header is allowed only when PrevShareSnapshot query parameter is set. Determines
+ * whether the changed ranges for a file that has been renamed or moved between the target snapshot (or the live
+ * file) and the previous snapshot should be listed. If the value is true, the valid changed ranges for the file
+ * will be returned. If the value is false, the operation will result in a failure with 409 (Conflict) response.
+ * The default value is false.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ShareStorageException thrown if the request is rejected by server.
@@ -5204,6 +5252,7 @@ public Mono> getRangeListNoCustomHeadersWithRespons
Integer timeout,
String range,
String leaseId,
+ Boolean supportRename,
Context context) {
final String comp = "rangelist";
final String accept = "application/xml";
@@ -5220,6 +5269,7 @@ public Mono> getRangeListNoCustomHeadersWithRespons
leaseId,
this.client.isAllowTrailingDot(),
this.client.getFileRequestIntent(),
+ supportRename,
accept,
context);
}
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/models/HandleItem.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/models/HandleItem.java
index 1f8e5d84bfd39..e541a9994a10a 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/models/HandleItem.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/models/HandleItem.java
@@ -218,7 +218,7 @@ public HandleItem setClientIp(String clientIp) {
*
* @return the clientName value.
*/
- private String getClientName() {
+ public String getClientName() {
return this.clientName;
}
@@ -228,7 +228,7 @@ private String getClientName() {
* @param clientName the clientName value to set.
* @return the HandleItem object itself.
*/
- private HandleItem setClientName(String clientName) {
+ public HandleItem setClientName(String clientName) {
this.clientName = clientName;
return this;
}
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/util/ModelHelper.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/util/ModelHelper.java
index 4ee6ce77eaa1b..f30ffd5806696 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/util/ModelHelper.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/implementation/util/ModelHelper.java
@@ -218,8 +218,8 @@ public static HandleItem transformHandleItem(com.azure.storage.file.share.implem
.setParentId(handleItem.getParentId())
.setLastReconnectTime(handleItem.getLastReconnectTime())
.setOpenTime(handleItem.getOpenTime())
- .setAccessRights(handleItem.getAccessRightList());
- //.setClientName(handleItem.getClientName()); Re-enable with STG93
+ .setAccessRights(handleItem.getAccessRightList())
+ .setClientName(handleItem.getClientName());
}
public static List transformHandleItems(List handleItems) {
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/HandleItem.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/HandleItem.java
index 872d8d51e335e..122738bec023c 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/HandleItem.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/HandleItem.java
@@ -294,7 +294,7 @@ public HandleItem setAccessRights(List accessRights
*
* @return the clientName value.
*/
- private String getClientName() {
+ public String getClientName() {
return this.clientName;
}
@@ -304,7 +304,7 @@ private String getClientName() {
* @param clientName the clientName value to set.
* @return the HandleItem object itself.
*/
- private HandleItem setClientName(String clientName) {
+ public HandleItem setClientName(String clientName) {
this.clientName = clientName;
return this;
}
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/ShareErrorCode.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/ShareErrorCode.java
index 670d521d70b9e..b9e1ec117d219 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/ShareErrorCode.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/models/ShareErrorCode.java
@@ -190,6 +190,9 @@ public final class ShareErrorCode extends ExpandableStringEnum {
/** Static value ShareHasSnapshots for ShareErrorCode. */
public static final ShareErrorCode SHARE_HAS_SNAPSHOTS = fromString("ShareHasSnapshots");
+ /** Static value PreviousSnapshotNotFound for ShareErrorCode. */
+ public static final ShareErrorCode PREVIOUS_SNAPSHOT_NOT_FOUND = fromString("PreviousSnapshotNotFound");
+
/** Static value ContainerQuotaDowngradeNotAllowed for ShareErrorCode. */
public static final ShareErrorCode CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED =
fromString("ContainerQuotaDowngradeNotAllowed");
diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/options/ShareFileListRangesDiffOptions.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/options/ShareFileListRangesDiffOptions.java
index 720b39deabcee..cec098d946c3e 100644
--- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/options/ShareFileListRangesDiffOptions.java
+++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/options/ShareFileListRangesDiffOptions.java
@@ -17,6 +17,7 @@ public class ShareFileListRangesDiffOptions {
private ShareFileRange range;
private final String previousSnapshot;
private ShareRequestConditions requestConditions;
+ private Boolean supportRename;
/**
* @param previousSnapshot Specifies that the response will contain only ranges that were changed between target
@@ -66,4 +67,27 @@ public ShareFileListRangesDiffOptions setRequestConditions(ShareRequestCondition
this.requestConditions = requestConditions;
return this;
}
+
+ /**
+ * Gets the supportRename value.
+ *
+ * @return Whether the changed ranges for a file that has been renamed or moved between the target snapshot
+ * (or the live file) and the previous snapshot should be listed.
+ */
+ public Boolean isRenameIncluded() {
+ return supportRename;
+ }
+
+ /**
+ * Sets the value for whether the changed ranges for a file that has been renamed or moved should appear in the
+ * snapshot dif.
+ *
+ * @param renameIncluded Whether the changed ranges for a file that has been renamed or moved between the target
+ * snapshot (or the live file) and the previous snapshot should be listed.
+ * @return The updated options.
+ */
+ public ShareFileListRangesDiffOptions setRenameIncluded(Boolean renameIncluded) {
+ this.supportRename = renameIncluded;
+ return this;
+ }
}
diff --git a/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileApiTests.java b/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileApiTests.java
index 5d3ff2fcc7983..382cdf324f595 100644
--- a/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileApiTests.java
+++ b/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileApiTests.java
@@ -2261,6 +2261,55 @@ public void listRangesDiffLease() throws IOException {
FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
}
+ @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-05-04")
+ @ParameterizedTest
+ @MethodSource("listRangesDiffWithRenameSupplier")
+ public void listRangesDiffWithRename(Boolean renameSupport) throws IOException {
+ //create a file
+ String fileName = generateShareName();
+ primaryFileClient.create(Constants.MB);
+
+ //upload some content
+ ByteArrayInputStream content = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB));
+ String uploadFile = FileShareTestHelper.createRandomFileWithLength(Constants.KB, testFolder, fileName);
+ primaryFileClient.uploadFromFile(uploadFile);
+ primaryFileClient.uploadRange(content, Constants.KB);
+
+ //take snapshot
+ ShareSnapshotInfo previousSnapshot = shareClient.createSnapshot();
+
+ //rename file
+ ShareFileClient destFile = primaryFileClient.rename(generatePathName());
+
+ //take another snapshot
+ shareClient.createSnapshot();
+
+ //setup options
+ ShareFileListRangesDiffOptions options = new ShareFileListRangesDiffOptions(previousSnapshot.getSnapshot());
+ options.setRenameIncluded(renameSupport);
+
+ //call
+ if (renameSupport == null || !renameSupport) {
+ ShareStorageException e = assertThrows(ShareStorageException.class,
+ () -> destFile.listRangesDiffWithResponse(options, null, null));
+ assertEquals(ShareErrorCode.PREVIOUS_SNAPSHOT_NOT_FOUND, e.getErrorCode());
+ } else {
+ Response response = destFile.listRangesDiffWithResponse(options, null, null);
+ assertEquals(200, response.getStatusCode());
+ assertEquals(0, response.getValue().getRanges().size());
+ }
+
+ FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
+ destFile.delete();
+ }
+
+ private static Stream listRangesDiffWithRenameSupplier() {
+ return Stream.of(
+ Arguments.of(true),
+ Arguments.of(false),
+ Arguments.of((Boolean) null));
+ }
+
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02")
@Test
public void listRangesDiffTrailingDot() throws IOException {
@@ -2798,7 +2847,6 @@ public void audienceFromString() {
assertTrue(aadFileClient.exists());
}
- /* Uncomment this test when Client Name is enabled with STG 93.
@PlaybackOnly
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04")
@Test
@@ -2808,7 +2856,5 @@ public void listHandlesClientName() {
ShareFileClient fileClient = directoryClient.getFileClient("test.txt");
List list = fileClient.listHandles().stream().collect(Collectors.toList());
assertNotNull(list.get(0).getClientName());
-
}
- */
}
diff --git a/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileAsyncApiTests.java b/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileAsyncApiTests.java
index 7062c996cf2e3..b374c62f688bc 100644
--- a/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileAsyncApiTests.java
+++ b/sdk/storage/azure-storage-file-share/src/test/java/com/azure/storage/file/share/FileAsyncApiTests.java
@@ -10,10 +10,12 @@
import com.azure.storage.common.StorageSharedKeyCredential;
import com.azure.storage.common.implementation.Constants;
import com.azure.storage.common.test.shared.extensions.LiveOnly;
+import com.azure.storage.common.test.shared.extensions.PlaybackOnly;
import com.azure.storage.common.test.shared.extensions.RequiredServiceVersion;
import com.azure.storage.file.share.models.ClearRange;
import com.azure.storage.file.share.models.CopyableFileSmbPropertiesList;
import com.azure.storage.file.share.models.FileRange;
+import com.azure.storage.file.share.models.HandleItem;
import com.azure.storage.file.share.models.NtfsFileAttributes;
import com.azure.storage.file.share.models.PermissionCopyModeType;
import com.azure.storage.file.share.models.ShareAudience;
@@ -30,6 +32,7 @@
import com.azure.storage.file.share.models.ShareStorageException;
import com.azure.storage.file.share.models.ShareTokenIntent;
import com.azure.storage.file.share.options.ShareFileCopyOptions;
+import com.azure.storage.file.share.options.ShareFileListRangesDiffOptions;
import com.azure.storage.file.share.sas.ShareFileSasPermission;
import com.azure.storage.file.share.sas.ShareServiceSasSignatureValues;
import org.junit.jupiter.api.BeforeEach;
@@ -1403,6 +1406,56 @@ public void listRangesDiff(List rangesToUpdate, List range
}).verifyComplete();
}
+ @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-05-04")
+ @ParameterizedTest
+ @MethodSource("listRangesDiffWithRenameSupplier")
+ public void listRangesDiffWithRename(Boolean renameSupport) throws IOException {
+ //create a file
+ String fileName = generateShareName();
+ //upload some content and take snapshot
+ ShareSnapshotInfo previousSnapshot = primaryFileAsyncClient.create(Constants.MB)
+ .then(primaryFileAsyncClient.uploadFromFile(FileShareTestHelper.createRandomFileWithLength(Constants.KB, testFolder, fileName)))
+ .then(primaryFileAsyncClient.uploadRange(Flux.just(FileShareTestHelper.getRandomByteBuffer(Constants.KB)), Constants.KB))
+ .then(primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName()).createSnapshot())
+ .block();
+
+ //rename file
+ ShareFileAsyncClient destFile = primaryFileAsyncClient.rename(generatePathName()).block();
+
+ //take another snapshot
+ primaryFileServiceAsyncClient.getShareAsyncClient(primaryFileAsyncClient.getShareName()).createSnapshot().block();
+
+ //setup options
+ ShareFileListRangesDiffOptions options = new ShareFileListRangesDiffOptions(previousSnapshot.getSnapshot());
+ options.setRenameIncluded(renameSupport);
+
+ //call
+ if (renameSupport == null || !renameSupport) {
+ StepVerifier.create(destFile.listRangesDiffWithResponse(options))
+ .verifyErrorSatisfies(r -> {
+ ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
+ assertEquals(ShareErrorCode.PREVIOUS_SNAPSHOT_NOT_FOUND, e.getErrorCode());
+ });
+ } else {
+ StepVerifier.create(destFile.listRangesDiffWithResponse(options))
+ .assertNext(r -> {
+ assertEquals(200, r.getStatusCode());
+ assertEquals(0, r.getValue().getRanges().size());
+ })
+ .verifyComplete();
+ }
+
+ FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName);
+ destFile.delete().block();
+ }
+
+ private static Stream listRangesDiffWithRenameSupplier() {
+ return Stream.of(
+ Arguments.of(true),
+ Arguments.of(false),
+ Arguments.of((Boolean) null));
+ }
+
@Test
public void listHandles() {
primaryFileAsyncClient.create(1024).block();
@@ -1535,7 +1588,6 @@ public void audienceFromString() {
.verifyComplete();
}
- /* Uncomment this test when Client Name is enabled with STG 93.
@PlaybackOnly
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04")
@Test
@@ -1545,7 +1597,5 @@ public void listHandlesClientName() {
ShareFileAsyncClient fileClient = directoryClient.getFileClient("test.txt");
List list = fileClient.listHandles().collectList().block();
assertNotNull(list.get(0).getClientName());
-
}
- */
}
diff --git a/sdk/storage/azure-storage-file-share/swagger/README.md b/sdk/storage/azure-storage-file-share/swagger/README.md
index 9ecd5dde3d6e3..83f93756a3963 100644
--- a/sdk/storage/azure-storage-file-share/swagger/README.md
+++ b/sdk/storage/azure-storage-file-share/swagger/README.md
@@ -16,7 +16,7 @@ autorest
### Code generation settings
``` yaml
use: '@autorest/java@4.1.16'
-input-file: https://raw.githubusercontent.com/tasherif-msft/azure-rest-api-specs/6aa9b7675e9f306a077a60f3d9405e7fc5e809e1/specification/storage/data-plane/Microsoft.FileStorage/preview/2024-02-04/file.json
+input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/3f9cca0301ffbb8856826d196c567d821ae190d7/specification/storage/data-plane/Microsoft.FileStorage/preview/2024-05-04/file.json
java: true
output-folder: ../
namespace: com.azure.storage.file.share
diff --git a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceVersion.java b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceVersion.java
index cc8cd201dfbef..af7b97f964b16 100644
--- a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceVersion.java
+++ b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceVersion.java
@@ -112,7 +112,12 @@ public enum QueueServiceVersion implements ServiceVersion {
/**
* Service version {@code 2024-02-04}.
*/
- V2024_02_04("2024-02-04");
+ V2024_02_04("2024-02-04"),
+
+ /**
+ * Service version {@code 2024-05-04}.
+ */
+ V2024_05_04("2024-05-04");
private final String version;
@@ -134,6 +139,6 @@ public String getVersion() {
* @return the latest {@link QueueServiceVersion}
*/
public static QueueServiceVersion getLatest() {
- return V2024_02_04;
+ return V2024_05_04;
}
}