Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Api Review Changes #38587

Merged
merged 9 commits into from
Feb 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion sdk/storage/azure-storage-blob/assets.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "java",
"TagPrefix": "java/storage/azure-storage-blob",
"Tag": "java/storage/azure-storage-blob_e8d223adb0"
"Tag": "java/storage/azure-storage-blob_7e4a0926e9"
}
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVers
*
* @return the encryption scope used for encryption.
*/
public String getEncryptionScope() {
protected String getEncryptionScope() {
ibrahimrabab marked this conversation as resolved.
Show resolved Hide resolved
if (encryptionScope == null) {
return null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import com.azure.storage.blob.sas.BlobServiceSasSignatureValues;
import com.azure.storage.blob.specialized.AppendBlobAsyncClient;
import com.azure.storage.blob.specialized.BlobAsyncClientBase;
import com.azure.storage.blob.specialized.BlobBaseTestHelper;
import com.azure.storage.blob.specialized.BlockBlobAsyncClient;
import com.azure.storage.blob.specialized.PageBlobAsyncClient;
import com.azure.storage.blob.specialized.PageBlobClient;
Expand Down Expand Up @@ -375,28 +376,33 @@ public void getEncryptionScopeClient() {
// when: "AppendBlob"
AppendBlobAsyncClient newCpknAppendBlob = cpknAppendBlob.getEncryptionScopeAsyncClient(newEncryptionScope);
assertInstanceOf(AppendBlobAsyncClient.class, newCpknAppendBlob);
assertNotEquals(cpknAppendBlob.getEncryptionScope(), newCpknAppendBlob.getEncryptionScope());
assertNotEquals(BlobBaseTestHelper.getEncryptionScope(cpknAppendBlob),
BlobBaseTestHelper.getEncryptionScope(newCpknAppendBlob));

// when: "BlockBlob"
BlockBlobAsyncClient newCpknBlockBlob = cpknBlockBlob.getEncryptionScopeAsyncClient(newEncryptionScope);
assertInstanceOf(BlockBlobAsyncClient.class, newCpknBlockBlob);
assertNotEquals(cpknBlockBlob.getEncryptionScope(), newCpknBlockBlob.getEncryptionScope());
assertNotEquals(BlobBaseTestHelper.getEncryptionScope(cpknBlockBlob),
BlobBaseTestHelper.getEncryptionScope(newCpknBlockBlob));

// when: "PageBlob"
PageBlobAsyncClient newCpknPageBlob = cpknPageBlob.getEncryptionScopeAsyncClient(newEncryptionScope);
assertInstanceOf(PageBlobAsyncClient.class, newCpknPageBlob);
assertNotEquals(cpknPageBlob.getEncryptionScope(), newCpknPageBlob.getEncryptionScope());
assertNotEquals(BlobBaseTestHelper.getEncryptionScope(cpknPageBlob),
BlobBaseTestHelper.getEncryptionScope(newCpknPageBlob));

// when: "BlobClient"
BlobAsyncClient cpkBlobClient = cpknContainer.getBlobAsyncClient(generateBlobName()); // Inherits container's CPK
BlobAsyncClient newCpknBlobClient = cpkBlobClient.getEncryptionScopeAsyncClient(newEncryptionScope);
assertInstanceOf(BlobAsyncClient.class, newCpknBlobClient);
assertNotEquals(cpkBlobClient.getEncryptionScope(), newCpknBlobClient.getEncryptionScope());
assertNotEquals(BlobBaseTestHelper.getEncryptionScope(cpkBlobClient),
BlobBaseTestHelper.getEncryptionScope(newCpknBlobClient));

// when: "BlobClientBase"
BlobAsyncClientBase newCpknBlobClientBase = ((BlobAsyncClientBase) cpkBlobClient)
.getEncryptionScopeAsyncClient(newEncryptionScope);
assertInstanceOf(BlobAsyncClientBase.class, newCpknBlobClientBase);
assertNotEquals(cpkBlobClient.getEncryptionScope(), newCpknBlobClientBase.getEncryptionScope());
assertNotEquals(BlobBaseTestHelper.getEncryptionScope(cpkBlobClient),
BlobBaseTestHelper.getEncryptionScope(newCpknBlobClientBase));
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

package com.azure.storage.blob.specialized;

public class BlobBaseTestHelper {
public static String getEncryptionScope(BlobAsyncClientBase baseClient) {
return baseClient.getEncryptionScope();
}
}
2 changes: 1 addition & 1 deletion sdk/storage/azure-storage-file-datalake/assets.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "java",
"TagPrefix": "java/storage/azure-storage-file-datalake",
"Tag": "java/storage/azure-storage-file-datalake_3bdc50f65a"
"Tag": "java/storage/azure-storage-file-datalake_aa5a6082f1"
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions;
import com.azure.storage.file.datalake.implementation.models.PathExpiryOptions;
import com.azure.storage.file.datalake.implementation.models.PathResourceType;
import com.azure.storage.file.datalake.implementation.util.BuilderHelper;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.implementation.util.ModelHelper;
import com.azure.storage.file.datalake.models.CustomerProvidedKey;
Expand All @@ -55,6 +56,7 @@
import com.azure.storage.file.datalake.options.FileParallelUploadOptions;
import com.azure.storage.file.datalake.options.FileQueryOptions;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
import com.azure.storage.file.datalake.options.ReadToFileOptions;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuples;
Expand Down Expand Up @@ -1465,6 +1467,32 @@ public Mono<PathProperties> readToFile(String filePath) {
return readToFile(filePath, false);
}

/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#ReadToFileOptions -->
* <pre>
* client.readToFile&#40;new ReadToFileOptions&#40;&#41;.setFilePath&#40;file&#41;&#41;
* .subscribe&#40;response -&gt; System.out.println&#40;&quot;Completed download to file&quot;&#41;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#ReadToFileOptions -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob">Azure Docs</a></p>
*
* @param options {@link ReadToFileOptions}
* @return A reactive response containing the file properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PathProperties> readToFile(ReadToFileOptions options) {
return readToFileWithResponse(options).flatMap(FluxUtil::toMono);
}

/**
* Reads the entire file into a file specified by the path.
*
Expand Down Expand Up @@ -1551,6 +1579,54 @@ public Mono<Response<PathProperties>> readToFileWithResponse(String filePath, Fi
.map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
}

/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse#ReadToFileOptions -->
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&#40;&#41;;
* options.setFilePath&#40;file&#41;;
* options.setRange&#40;new FileRange&#40;1024, 2048L&#41;&#41;;
* options.setDownloadRetryOptions&#40;new DownloadRetryOptions&#40;&#41;.setMaxRetryRequests&#40;5&#41;&#41;;
* options.setOpenOptions&#40;new HashSet&lt;&gt;&#40;Arrays.asList&#40;StandardOpenOption.CREATE_NEW,
* StandardOpenOption.WRITE, StandardOpenOption.READ&#41;&#41;&#41;; &#47;&#47;Default options
* options.setParallelTransferOptions&#40;new ParallelTransferOptions&#40;&#41;.setBlockSizeLong&#40;4L * Constants.MB&#41;&#41;;
* options.setDataLakeRequestConditions&#40;null&#41;;
* options.setRangeGetContentMd5&#40;false&#41;;
*
* client.readToFileWithResponse&#40;options&#41;
* .subscribe&#40;response -&gt; System.out.println&#40;&quot;Completed download to file&quot;&#41;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse#ReadToFileOptions -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob">Azure Docs</a></p>
*
* @param options {@link ReadToFileOptions}
* @return A reactive response containing the file properties and metadata.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<PathProperties>> readToFileWithResponse(ReadToFileOptions options) {
Context context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), null);

return blockBlobAsyncClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(options.getFilePath())
.setRange(Transforms.toBlobRange(options.getRange())).setParallelTransferOptions(options.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(options.isRangeGetContentMd5()).setOpenOptions(options.getOpenOptions()))
.contextWrite(FluxUtil.toReactorContext(context))
.onErrorMap(DataLakeImplUtils::transformBlobStorageException)
.map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
}

/**
* Moves the file to another location within the file system.
* For more information see the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1137,7 +1137,7 @@ public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStream
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);

BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
Expand Down Expand Up @@ -1240,7 +1240,7 @@ public PathProperties readToFile(String filePath) {
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
return readToFileWithResponse(options, null, Context.NONE).getValue();
}

/**
Expand Down Expand Up @@ -1282,44 +1282,6 @@ public PathProperties readToFile(String filePath, boolean overwrite) {
.getValue();
}

/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions-boolean -->
* <pre>
* boolean overwrite1 = false; &#47;&#47; Default value
* client.readToFile&#40;new ReadToFileOptions&#40;&#41;.setFilePath&#40;file&#41;, overwrite1&#41;;
* System.out.println&#40;&quot;Completed download to file&quot;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions-boolean -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob">Azure Docs</a></p>
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); // If the file already exists and it is opened
// for WRITE access, then its length is truncated to 0.
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}

/**
* Reads the entire file into a file specified by the path.
*
Expand Down Expand Up @@ -1387,6 +1349,7 @@ public Response<PathProperties> readToFileWithResponse(String filePath, FileRang
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse#ReadToFileOptions-Duration-Context -->
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&#40;&#41;;
* options.setFilePath&#40;file&#41;;
* options.setRange&#40;new FileRange&#40;1024, 2048L&#41;&#41;;
* options.setDownloadRetryOptions&#40;new DownloadRetryOptions&#40;&#41;.setMaxRetryRequests&#40;5&#41;&#41;;
* options.setOpenOptions&#40;new HashSet&lt;&gt;&#40;Arrays.asList&#40;StandardOpenOption.CREATE_NEW,
Expand All @@ -1408,7 +1371,7 @@ public Response<PathProperties> readToFileWithResponse(String filePath, FileRang
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), context);
Context finalContext = context;

return DataLakeImplUtils.returnOrConvertException(() -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import com.azure.storage.file.datalake.implementation.models.PathsSetAccessControlRecursiveHeaders;
import com.azure.storage.file.datalake.implementation.models.SetAccessControlRecursiveResponse;
import com.azure.storage.file.datalake.implementation.models.SourceModifiedAccessConditions;
import com.azure.storage.file.datalake.implementation.util.BuilderHelper;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.implementation.util.DataLakeSasImplUtil;
import com.azure.storage.file.datalake.implementation.util.ModelHelper;
Expand All @@ -62,6 +63,7 @@
import com.azure.storage.file.datalake.models.UserDelegationKey;
import com.azure.storage.file.datalake.options.DataLakePathCreateOptions;
import com.azure.storage.file.datalake.options.DataLakePathDeleteOptions;
import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
import com.azure.storage.file.datalake.options.PathRemoveAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathSetAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathUpdateAccessControlRecursiveOptions;
Expand Down Expand Up @@ -874,6 +876,31 @@ public Mono<PathProperties> getProperties() {
return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono);
}

/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties#PathGetPropertiesOptions -->
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&#40;&#41;.setUserPrincipalName&#40;true&#41;;
*
* client.getProperties&#40;options&#41;.subscribe&#40;response -&gt;
* System.out.printf&#40;&quot;Creation Time: %s, Size: %d%n&quot;, response.getCreationTime&#40;&#41;, response.getFileSize&#40;&#41;&#41;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties#PathGetPropertiesOptions -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob-properties">Azure Docs</a></p>
*
* @param options {@link PathGetPropertiesOptions}
* @return A reactive response containing the resource's properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PathProperties> getProperties(PathGetPropertiesOptions options) {
return getPropertiesUsingOptionsWithResponse(options).flatMap(FluxUtil::toMono);
}

/**
* Returns the resource's metadata and properties.
*
Expand Down Expand Up @@ -902,6 +929,25 @@ public Mono<Response<PathProperties>> getPropertiesWithResponse(DataLakeRequestC
.map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
}

/**
* Returns the resource's metadata and properties.
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob-properties">Azure Docs</a></p>
*
* @param options {@link PathGetPropertiesOptions}
* @return A reactive response containing the resource's properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
private Mono<Response<PathProperties>> getPropertiesUsingOptionsWithResponse(PathGetPropertiesOptions options) {
Context context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUserPrincipalName(), null);

return blockBlobAsyncClient.getPropertiesWithResponse(Transforms.toBlobRequestConditions(options.getRequestConditions()))
.contextWrite(FluxUtil.toReactorContext(context))
.onErrorMap(DataLakeImplUtils::transformBlobStorageException)
.map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response)));
}

/**
* Determines if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
Expand Down
Loading
Loading