From 50bb6eb5efe5ed7372d9c5d2908740209ae16c54 Mon Sep 17 00:00:00 2001 From: Christian Freitas Date: Thu, 29 Jun 2023 11:28:27 -0400 Subject: [PATCH 1/7] WX-1114 Inital inclusion of Azure NIO 'fork' --- .../com/azure/storage/blob/nio/CHANGELOG.md | 134 ++ .../com/azure/storage/blob/nio/DesignDoc.md | 235 +++ .../java/com/azure/storage/blob/nio/README.md | 333 ++++ .../com/azure/storage/blob/nio/assets.json | 6 + .../blob/nio/AzureBasicFileAttributeView.java | 69 + .../blob/nio/AzureBasicFileAttributes.java | 165 ++ .../blob/nio/AzureBlobFileAttributeView.java | 157 ++ .../blob/nio/AzureBlobFileAttributes.java | 369 +++++ .../blob/nio/AzureDirectoryStream.java | 189 +++ .../storage/blob/nio/AzureFileStore.java | 194 +++ .../storage/blob/nio/AzureFileSystem.java | 492 ++++++ .../blob/nio/AzureFileSystemProvider.java | 1182 ++++++++++++++ .../com/azure/storage/blob/nio/AzurePath.java | 836 ++++++++++ .../azure/storage/blob/nio/AzureResource.java | 284 ++++ .../blob/nio/AzureSeekableByteChannel.java | 245 +++ .../storage/blob/nio/DirectoryStatus.java | 23 + .../storage/blob/nio/LoggingUtility.java | 16 + .../storage/blob/nio/NioBlobInputStream.java | 211 +++ .../storage/blob/nio/NioBlobOutputStream.java | 99 ++ .../azure/storage/blob/nio/package-info.java | 7 + .../java.nio.file.spi.FileSystemProvider | 1 + .../azure-storage-blob-nio.properties | 2 + .../azure/storage/blob/nio/ReadmeSamples.java | 129 ++ .../storage/blob/nio/AttributeViewTests.java | 290 ++++ .../blob/nio/AzureDirectoryStreamTests.java | 215 +++ .../storage/blob/nio/AzureFileStoreTests.java | 96 ++ .../nio/AzureFileSystemProviderTests.java | 1437 +++++++++++++++++ .../blob/nio/AzureFileSystemTests.java | 216 +++ .../storage/blob/nio/AzurePathTests.java | 285 ++++ .../storage/blob/nio/AzureResourceTests.java | 291 ++++ .../nio/AzureSeekableByteChannelTests.java | 412 +++++ .../storage/blob/nio/BlobNioTestBase.java | 416 +++++ .../storage/blob/nio/CompositeTests.java | 198 +++ .../blob/nio/NioBlobInputStreamTests.java | 243 +++ .../blob/nio/NioBlobOutputStreamTests.java | 219 +++ .../blob/nio/NioClientBuilderTests.java | 108 ++ project/Dependencies.scala | 26 +- 37 files changed, 9825 insertions(+), 5 deletions(-) create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/CHANGELOG.md create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/DesignDoc.md create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/README.md create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/assets.json create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/package-info.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/azure-storage-blob-nio.properties create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java create mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/CHANGELOG.md b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/CHANGELOG.md new file mode 100644 index 00000000000..af31b4e0c74 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/CHANGELOG.md @@ -0,0 +1,134 @@ +# Release History + +## 12.0.0-beta.20 (Unreleased) + +### Features Added +- Added support for 2021-12-02 service version. + +### Breaking Changes + +### Bugs Fixed + +### Other Changes +- Migrate test recordings to assets repo. + +## 12.0.0-beta.19 (2022-05-06) + +### Features Added +- Added support for 2021-06-08 service version. + +## 12.0.0-beta.18 (2022-04-07) + +### Other Changes +#### Dependency Updates +- Updated blob dependency to 12.16.0 + +## 12.0.0-beta.17 (2022-03-09) + +### Features Added +- Enabled support for Files.exists() +- Enabled support for Files.walkFileTree() + +### Breaking Changes +- `AzureFileSystemProvider.readAttributes()` no longer throws an IOException for virtual directories and instead returns a set of attributes that are all empty except for an `isVirtual` property set to true. + +### Other Changes +- Enabling support for Files.exists() to support virtual directories required supporting virtual directories in reading file attributes. This required introducing a perf hit in the way of an extra getProps request + +#### Dependency Updates + +- Updated blob dependency to 12.15.0 + +## 12.0.0-beta.16 (2022-02-11) + +### Other Changes + +#### Dependency Updates + +- Upgraded `azure-storage-blob` from `12.15.0-beta.3` to version `12.14.4`. + +## 12.0.0-beta.15 (2022-02-09) + +### Features Added +- Added support for 2021-04-10 service version. +- Added `AzurePath.fromBlobUrl` to help convert from a blob url to an AzurePath +- Added a configuration option `AZURE_STORAGE_SKIP_INITIAL_CONTAINER_CHECK` to skip the initial container check in cases where the authentication method used will not have necessary permissions. + +### Bugs Fixed +- Fixed a bug that would prevent deleting an empty directory in the case where one directory name was a prefix of the other. + + +## 12.0.0-beta.14 (2022-01-14) + +### Other Changes + +#### Dependency Updates + +- Upgraded `azure-core` from `1.23.0` to version `1.24.1`. +- Upgraded `azure-core-http-netty` from `1.11.3` to version `1.11.6`. +- Upgraded `azure-storage-blob` from `12.15.0-beta.2` to version `12.14.3`. + +## 12.0.0-beta.13 (2021-12-07) + +### Features Added +- Added support for 2021-02-12 service version. + +## 12.0.0-beta.12 (2021-11-10) + +### Other Changes + +#### Dependency Updates + +- Upgraded `azure-core` from `1.21.0` to version `1.22.0`. +- Upgraded `azure-core-http-netty` from `1.11.1` to version `1.11.2`. +- Upgraded `azure-storage-blob` from `12.15.0-beta.1` to version `12.14.2. + +## 12.0.0-beta.11 (2021-11-05) + +### Features Added +- Added support for the 2020-12-06 service version. + +### Bugs Fixed +- Fixes an off-by-one error in read() returns 0 bytes read instead of -1 (EOF) when reading at channel position == size. +- Fixes a bug where read() (and write()) do not respect initial position (and limit) of provided ByteBuffer when backed by an array + +## 12.0.0-beta.10 (2021-10-12) + +### Other Changes +#### Dependency Updates +- Updated `azure-storage-blob` to version `12.14.1` + +## 12.0.0-beta.9 (2021-09-15) +### Other changes +- Updated `azure-storage-blob` to version `12.14.0` + +## 12.0.0-beta.8 (2021-07-28) +- Added support for the 2020-10-02 service version. + +## 12.0.0-beta.7 (2021-06-09) +### Dependency Updates +- Updated `azure-storage-blob` to version `12.12.0` + +## 12.0.0-beta.6 (2021-04-29) +- Update `azure-storage-blob` to version `12.11.0` + +## 12.0.0-beta.5 (2021-04-16) +- Fixed a bug where a file would be determined to be a directory if another file with the same prefix exists + +## 12.0.0-beta.4 (2021-03-29) +- Made AzurePath.toBlobClient public +- Added support for Azurite +- Change FileSystem configuration to accept an endpoint and credential types instead of a string for the account name, key, and token + +## 12.0.0-beta.3 (2021-02-10) +- Added support for FileSystemProvider.checkAccess method +- Added support for file key on AzureBasicFileAttributes and AzureBlobFileAttributes +- Added support for SeekableByteChannel +- When an operation is performed on a closed FileSystem, a ClosedFileSystemException is thrown instead of an IOException +- Adjusted the required flags for opening an outputstream + +## 12.0.0-beta.2 (2020-08-13) +- Added checks to ensure file system has not been closed before operating on data + +## 12.0.0-beta.1 (2020-07-17) +- Initial Release. Please see the README for more information. diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/DesignDoc.md b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/DesignDoc.md new file mode 100644 index 00000000000..583d89c1280 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/DesignDoc.md @@ -0,0 +1,235 @@ +# Azure Storage NIO Design Doc + +# Background + +Please refer to the [Project Overview](https://microsoft-my.sharepoint.com/:w:/p/frley/EQfMXjgWA4NPrAE9IIt7PUsBC-sahzFdMkc6im0Y4R4cww) for highlevel background on this project. + +## NIO + +The [nio package](https://docs.oracle.com/javase/7/docs/api/java/nio/file/package-summary.html) is reasonably large and has several subpackages. The docs are quite thorough in specifying expected behavior for implementing the interfaces and extending the abstract types. + +Oracle has written a [tutorial](https://docs.oracle.com/javase/tutorial/essential/io/fileio.html) on this package that can be helpful for getting started and understanding the fundamentals of how customers may use the FileSystem APIs. + +## Providers + +Java frequently works with a Service Provider Interface (SPI) architecture. This architecture is largely built on the [ServiceLoader](https://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html) type. In short, the JDK will define a static factory type, in this case [FileSystems](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html), that is used to instantiate providers, or implementations of the given service. When a client issues a request to the factory for a new instance, the ServiceLoader is invoked to search for installed implementations of this type. The requirements for installation are somewhat specific to the service, but in this case the type must be on the classpath and the package must have a resource file pointing to the implementation type. Once the ServiceLoader loads all available instances, it will query each to see if it fits the criteria that satisfies the client's request. In the case of FileSystems, it will look for a FileSystemProvider that uses a scheme which matches that of the passed URI. Upon finding the appropriate implementation, the service API is interacted with as normal. + +# Entry, Configuration, and Authentication + +## Entry + +The JVM relies on the [FileSystems](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html) API to dynamically load FileSystems. Assuming our package is [properly configured](https://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html) and loaded on the classpath (probably via a Maven dependency), a customer need only call [newFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map)) to create a new FileSystem backed by Azure Blob Storage or [getFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#getFileSystem(java.net.URI)) to retrieve an existing instance. + +A FileSystem is an abstract concept that may be distributed across one or more accounts. In the simple case, a FileSystem corresponds to an account and will be uniquely identified by the account name. E.g. a FileSystem backed by Azure Storage account "xclientdev" will be identified by the URI "azb://?account=xclientdev". Using the account as the analog for a FileSystem allows containers to be used as different FileStores (the equivalent of different drives on Windows). + +If data required by the FileSystem is spread across multiple accounts, the FileSystem will be uniquely identified by a UUID. In this case, the URI must be of the format "azb://?fileSystemId=\<UUID\>". The difference in query parameter will signal to the FileSystem that its storage is distributed across accounts. The account name and fileSystemId will be used to index the open FileSystems in the same way, so these values cannot be shared between two different FileSystems. The difference in query parameter is only a hint to the FileSystem. (See "Configuration and Authentication" below for further information on how this affects configuration). + +The scheme used for Azure Storage's implementation will be "azb". We specify 'b' as it is more flexible. This will leave room for later implementations to be built on top of Datalake ("azd") which will enable scenarios like working with [POSIX permissions](https://docs.oracle.com/javase/tutorial/essential/io/fileAttr.html#posix). It could also allow for loading a provider backed by Azure Share Files ("azs") for a fuller set of FileSystem features. + +A best effort attempt to make a request to the storage account will be made upon initialization by making a call to [getContainerProperties](https://docs.microsoft.com/rest/api/storageservices/get-container-properties) for each container specified (See "Configuration and Authentication" below). Failure to complete this connection on any container will result in an IOException and failure to load the FileSystem. Because this is a best effort check, it merely validates the existence of and minimal authorization to the FileSystem. It does not guarantee that there are sufficient permissions for all FileSystem operations. + +Once a FileSystem instance has been loaded and returned, a customer may perform their normal FileSystem operations backed by Azure Blob Storage. + +## Configuration and Authentication + +A FileSystem will be configured and authenticated via the options map available on [newFile](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map))[S](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map))[ystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map)). It is left to the customer how to build this map (e.g. querying another source, reading a file, etc.), but if only one account is used to back the FileSystem, it must specify one of the following keys with an appropriate value for authentication: + +- AzureStorageAccountKey: String +- AzureStorageSasToken: String + +The map is queried in the above order, and the first one found is the authentication method used. If a Sas token is used, the customer must take care that it has appropriate permissions to perform the actions demanded of the FileSystem in a given workflow, including the initial connection check specified above. Furthermore, it must have an expiry time set after the client is expected to finish using the FileSystem. No token refresh is currently offered by the FileSystem implementation, though it is possible one may be offered in the future through some means of specifying a refresh period and location to read the new token at the correct time in the options. If the FileSystem is backed by multiple accounts, a SasToken must be attached to each container as specified below. + +A client must also specify the FileStores that they would like to configure. FileStores will correspond to containers, and the listed containers will be created if they do not already exist. Existing data will be preserved and if it is in one of the listed containers may be accessed via the FileSystem APIs, though care should be taken to ensure that the hierarchy is structured in a way intelligible to this implementation or behavior will be undefined (See "Directories" below). Any containers otherwise existing in the account will be ignored. The list of specified containers will be the return value for the name property on each value returned from [getFileStores](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getFileStores()). The result of [getRootDirectories](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getRootDirectories()) will be "\<containerName\>:/". This implies that all paths in this FileSystem will be prefixed with "\<containerName\>:/", or, more completely, a URI to a file in a FileSystem backed by Azure Blob Storage will always have the prefix "azb://\<containerName\>:/". The colon indicates a FileStore and is therefore disallowed in path elements other than the root directory. + +This entry must use the key "AzureStorageFileStores" and the value is an Iterable\<String\>. The format of each entry depends on the URI used to create the FileSystem. If the "account" parameter was used, the strings are simply container names. The same credentials will be applied to each container. If the "fileSystemId" parameter was used, the auth parameters will be ignored, and each container name must be fully qualified with the host and include a sas token that can access the container. E.g. "account.blob.core.windows.net/c1?\<sasToken\>". In either case, the first container listed will be considered the default and hence its root directory will be the default directory for the FileSystem. + +The following options allow for configuring the underlying blob client. If they are not specified, defaults from the blob sdk will be used: + +- AzureStorageHttpLogDetailLevel: com.azure.core.http.policy.HttpLogLevelDetail +- AzureStorageMaxTries: Integer +- AzureStorageTryTimeout: Integer +- AzureStorageRetryDelayInMs: Long +- AzureStorageMaxRetryDelayInMs: Long +- AzureStorageRetryPolicyType: com.azure.storage.common.policy.RetryPolicyType +- AzureStorageSecondaryHost: String +- AzureStorageUploadBlockSize: Long +- AzureStorageDownloadResumeRetries: Integer +- AzureStorageUseHttps: Boolean + +Using this map for configuration will allow for future extensibility. See the "Open Questions/Future Development" section below for more details. + +# Technical Details + +## Concurrent Use of Account and Containers by Other Applications + +Taken from the java.nio [package overview](https://docs.oracle.com/javase/7/docs/api/java/nio/file/package-summary.html): + +The view of the files and file system provided by classes in this package are guaranteed to be consistent with other views provided by other instances in the same Java virtual machine. The view may or may not, however, be consistent with the view of the file system as seen by other concurrently running programs due to caching performed by the underlying operating system and delays induced by network-filesystem protocols. This is true regardless of the language in which these other programs are written, and whether they are running on the same machine or on some other machine. The exact nature of any such inconsistencies are system-dependent and are therefore unspecified. + +Likewise for the AzureFileSystem, the view of the FileSystem from within an instance of the JVM will be consistent, but the AzureFileSystem makes no guarantees on behavior or state should other processes operate on the same data. The AzureFileSystem will assume that it has exclusive access to the resources stored in Azure Blob Storage and will behave without regard for potential interfering applications. + +Moreover, even from within a given application, it should be remembered that using a remote FileSystem introduces higher latency. Because of this, particular care must be taken when managing concurrency. Race conditions are more likely to manifest, network failures occur more frequently than disk failures, and other such distributed application scenarios must be considered when working with this FileSystem. While the AzureFileSystem will ensure it takes appropriate steps towards robustness and reliability, the application developer must also design around these failure scenarios and have fallback and retry options available. + +## Limitations + +It is important to recognize that Azure Blob Storage is not a true FileSystem, nor is it the goal of this project to force Azure Blob Storage to act like a full-fledged FileSystem. While providing FileSystem APIs on top of Azure Blob Storage can offer convenience and ease of access in certain cases, trying to force the Storage service to work in scenarios it is not designed for is bound to introduce performance and stability problems. To that end, this project will only offer APIs that can be sensibly and cleanly built on top of Azure Blob Storage APIs. We recognize that this will leave some scenarios unsupported indefinitely, but we would rather offer a product that works predictably and reliably in its well defined scenarios than eagerly support all possible scenarios at the expense of quality. + +Azure Storage has other storage offerings, such as Azure Datalake and Azure Files. Each of these has semantics that approach a traditional FileSystem more closely than Azure Blobs. Should there arise a need for greater nio support on top of Azure Storage, we may choose to implement these APIs on top of one of those services as well. + +## File Open Options + +Due to the above limitations, not all file I/O operations can be supported. In particular, random writes on existing data are not feasible on top of Azure Blob Storage. (See the "Open Questions/Future Development" section for a discussion on random IO. See the write()/close() operation notes in the "API" section below for more information on the implementation of writing). + +Due to these constraints, writing is only permitted in very specific scenarios. The supported [StandardOpenOptions](https://docs.oracle.com/javase/7/docs/api/java/nio/file/StandardOpenOption.html) are as follows: + +- APPEND: It should be possible to append to existing blobs by writing new blocks, retrieving the existing block list, and appending the new blocks to the list before committing. +- CREATE +- CREATE\_NEW +- DELETE\_ON\_CLOSE +- DSYNC: Every write requires a getBlockList + commitBlockList +- READ: Random reads are supported and fairly straightforward with Azure Blob Storage. +- SYNC +- TRUNCATE\_EXISTING: We would not follow the specified behavior exactly as we would simply commit a block list over the existing blob. This has the same result upon closing but does not actually involve a truncate operation. +- WRITE: Must be specified with APPEND to ensure that any write operations will not be random. If TRUNCATE\_EXISTING is specified, we will write as normal and blow away the old blob with a commitBlockList upon closing. + +## Directories + +Azure Blob Storage does not support actual directories. Virtual directories are supported by specifying a blob name that includes one or more path separators. Blobs may then be listed using a prefix and by specifying the delimiter to approximate a directory structure. The delimiter in this case is '/'. + +This project will use the same directory notation as blobFuse and other existing tools. Specifically, when creating a directory a zero-length blob whose name is the desired path and has a metadata value of "is\_hdi\_folder=true" will be created. Operations targeting directories will target blobs with these properties. In cases where there is existing data in the containers that appears to use virtual directories (determined by the presence of path separators) but does not have the empty blob and metadata markers, behavior will be undefined as specified above. One notable example is the case where deleting the only blob in a "directory" that does not have this marker will actually delete the directory because there will be no marker blob present to persist the path. + +## Optimistic Concurrency + +Though there are limitations on how much safety we can guarantee because of the limitations of a remote Storage system, we should attempt to be safe wherever possible and use ETag-locking to ensure we are giving a consistent view of a given file when required. + +# Release Criteria and Timeline + +## Preview Criteria + +In order to preview, the AzureFileSystem must implement the full set of features necessary to support the [Cromwell](https://github.com/broadinstitute/cromwell) scientific engine. Integration into this engine represents our MVP scenario and will give us a live environment in which we can exercise the preview for stress and perf. The set of APIs that must be included are listed below. Unless otherwise specified, their behavior will be as defined in the Oracle javadocs for the given type. Notes about behavior particular to our implementation are included inline. Anything not included in this list but included in the java.nio package will throw an UnsupportedOperationException unless otherwise specified by the Oracle docs. Release of the first preview should be targeted for the end of April. + +## GA Criteria + +In order to release a GA version, the AzureFileSystem must: + +- Be fully integrated into the Azure Sdk Repo. This includes CI checks, docs, samples, and any other infrastructure specified in the repo guidelines. +- Have a fully functional and thorough test suite with sufficient test coverage. Testing should include unit testing on any internal types and scenarios tests that include loading the FileSystemProvider and interacting with it as if it were a production environment (this may require a separate package that simply runs an end to end test). +- A CaseRunner should be written and tested on the Developer Experience team's perf framework. +- At least two extra customers of reasonable size should have engaged with the product in a meaningful way. We should engage the customers who requested this project on the azure-storage-java repo. + +Per Microsoft's guidelines and assuming all criteria are met, the product should GA no later than six months after preview. Additional time may be required for customer adoption, however. + +# APIs for Preview + +## [FileSystemProvider](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html) + +Note that this type contains the core implementations for the FileSystem apis and [Files](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Files.html) methods delegate here. It is also important that these methods are threadsafe. + +- [checkAccess](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#checkAccess(java.nio.file.Path,%20java.nio.file.AccessMode...)): AccessDeniedException is thrown in all cases where the Execute option is passed. In all other cases, no AccessDeniedException will ever be thrown as Azure Blob Storage does not keep track of permissions on a per-blob basis, and it is assumed that the authentication method provided is sufficient for accessing the blobs in the desired manner. While it would be feasible to test read access by attempting a read, it would not be safe to do the same for write access, and in this case it is preferable to keep the assumption consistent, so we check neither. Similarly, we could check the query string of a sas token for permissions, but we cannot do the same for token authentication, and we choose here to be consistent in our assumption for clarity. +- [copy](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#copy(java.nio.file.Path,%20java.nio.file.Path,%20java.nio.file.CopyOption...)): COPY\_ATTRIBUTES must be true as it is impossible not to copy blob properties; if this option is not passed, an UnsupportedOperationException (UOE) will be thrown. All copies within an account are atomic, so ATOMIC\_MOVE should be supported and in fact will always be the case even when unspecified for a FileSystem using one account. If the FileSystem uses multiple accounts, the account name of the source and destination will be compared, and an IOException will be thrown if they do not match. If REPLACE\_EXISTING is not passed, we will use an If-None-Match:"\*" condition on the destination to prevent overwrites. The authentication method used on each will be the same as configured on entry. Note that copies between accounts are implicitly disallowed because we cannot copy from outside the FileSystem. +- [createDirectory](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#createDirectory(java.nio.file.Path,%20java.nio.file.attribute.FileAttribute...)): Use Etag conditions to fulfill the required atomicity of check + create. See the section on directory behavior above. +- [delete](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#delete(java.nio.file.Path)) +- [deleteIfExists](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#deleteIfExists(java.nio.file.Path)) +- [getFileAttributeView](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getFileAttributeView(java.nio.file.Path,%20java.lang.Class,%20java.nio.file.LinkOption...)): Please see the AttributeView section below. +- [getFileStore](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getFileStore(java.nio.file.Path)): The FileStore (container) does not depend on the existence of the file (blob). See the [FileStore](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileStore.html) section below. +- [getFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getFileSystem(java.net.URI)): Once a FileSystem is closed, it will be removed from the FileSystemProvider's internal map. Therefore, trying to retrieve a closed FileSystem will throw a FileSystemNotFoundException. Note that it is possible to create a second instance of a FileSystem with the same URI if the first one was closed. +- [getPath](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getPath(java.net.URI)): See the Path section below. +- [getScheme](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getScheme()): Returns "azb". +- [isSameFile](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Files.html#isSameFile(java.nio.file.Path,%20java.nio.file.Path)) +- [move](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#move(java.nio.file.Path,%20java.nio.file.Path,%20java.nio.file.CopyOption...)): Implemented as a copy and a delete. An AtomicMoveNotSupportedException will be thrown if the ATOMIC\_MOVE flag is passed. The same authentication method will be applied to both the source and the destination. We cannot copy the LMT of the source; the LMT will be updated as the copy time on the new blob, which is in violation of the javadocs but we do not have an alternative. +- [newDirectoryStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newDirectoryStream(java.nio.file.Path,%20java.nio.file.DirectoryStream.Filter)): See the DirectoryStream section below. +- [newFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newFileSystem(java.net.URI,%20java.util.Map)): See the FileSystem section below. +- [newInputStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newInputStream(java.nio.file.Path,%20java.nio.file.OpenOption...)): See the InputStream section below. +- [newOutputStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newOutputStream(java.nio.file.Path,%20java.nio.file.OpenOption...)): See the OutputStream section below. +- [readAttributes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#readAttributes(java.nio.file.Path,%20java.lang.Class,%20java.nio.file.LinkOption...)) +- [readAttributes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#readAttributes(java.nio.file.Path,%20java.lang.String,%20java.nio.file.LinkOption...)) +- [setAttributes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#setAttribute(java.nio.file.Path,%20java.lang.String,%20java.lang.Object,%20java.nio.file.LinkOption...)) + +## [Path](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Path.html) + +Note the need to support empty paths. Most methods in this type are straightforward and do not need further commentary. In this section we list only the methods that will **NOT** be supported. + +- [register](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Path.html#register(java.nio.file.WatchService,%20java.nio.file.WatchEvent.Kind...)) (both overloads; support may come at a later date) +- [toRealPath](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Path.html#toRealPath(java.nio.file.LinkOption...)) (pending sym link support) + +## [InputStream](https://docs.oracle.com/javase/7/docs/api/java/io/InputStream.html)/[OutputStream](https://docs.oracle.com/javase/7/docs/api/java/io/OutputStream.html) + +We should be able to reuse BlobInputStream and BlobOutputStream from the blob package for these types. See above notes on OpenOptions for details on which options may be passed. + +## [DirectoryStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/DirectoryStream.html) + +A blob listing with a prefix and delimiter should suffice as we already return an Iterable. + +## [FileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html) + +A FileSystem is backed by an account. + +- [close](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#close()): The need for throwing a possible exception will require maintaining a "closed" Boolean. Because this closes all associated channels, etc., child objects will need to maintain a reference to their parent FileSystem and query it performing any operations. Because we don't hold onto any system resources outside of making network requests, outstanding operations can be allowed to finish and the channel will be considered closed upon the next attempted operation when the parent FileSystem is queried. +- [getFileStores](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getFileStores()): No permissions are checked. The list of FileStores will be the list passed in upon configuration. An exists call will be made on the container before returning it to ensure it is still viable. +- [getPath](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getPath(java.lang.String,%20java.lang.String...)): See the Path section above. +- [getRootDirectories](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getRootDirectories()): Returns the same list as getFileStores, but each element has a '/' appended to it. +- [getSeparator](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getSeparator()): Returns '/'. +- [isOpen](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#isOpen()) +- [isReadOnly](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#isReadOnly()): Returns false. +- [provider](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#provider()) +- [supportedFileAttributeViews](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#supportedFileAttributeViews()): Set AttributeViews section below. + +## [FileStore](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html) + +A FileStore is backed by a container. As mentioned above, a list of containers is passed in upon initialization of the FileSystem. Because there is no limit to the storage space of a container, unallocated/usable/total space is MAX\_LONG. Other methods are self-evident. + +## AttributeViews + +- [BasicFileAttributeView](https://docs.oracle.com/javase/7/docs/api/java/nio/file/attribute/BasicFileAttributeView.html): + - [setTimes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/attribute/BasicFileAttributeView.html#setTimes(java.nio.file.attribute.FileTime,%20java.nio.file.attribute.FileTime,%20java.nio.file.attribute.FileTime)): a copy in place can be used to update the LMT. UnsupportedOperationException thrown for other time values + - Symlink support pending +- [UserDefinedFileAttributeView](https://docs.oracle.com/javase/7/docs/api/java/nio/file/attribute/UserDefinedFileAttributeView.html): Stored as metadata on the blob. Both keys and values are Strings. RuntimePermission("accessUserDefinedAttributes") is not required. +- AzureStorageFileAttributeView: A new type that will allow clients to set Storage related properties such as tier. + +## [File](https://docs.oracle.com/javase/7/docs/api/java/io/File.html) + +Many of these methods are implemented by deferring to the Files implementation of many of these methods (paying attention to differences in behavior). Here again, only the methods that are NOT implemented are listed as most of these methods can be deferred to another type and are therefore fairly transparent to implement. + +- [isHidden](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#isHidden()) +- [setWritable](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setWritable(boolean,%20boolean))/[setReadable](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setReadable(boolean,%20boolean))/[setExecutable](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setExecutable(boolean,%20boolean))/[setLastmodified](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setLastModified(long))/[setReadOnly](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setReadOnly()) + +## [AsynchronousFileChannel](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html) + +- [force](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#force(boolean)): No-op as we don't keep a local cache, so all write go directly to the service. +- [open](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#open(java.nio.file.Path,%20java.nio.file.OpenOption...)): See the above OpenOptions section for more information. Opening with the ExecutorService is not initially supported. +- [read](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#read(java.nio.ByteBuffer,%20long)): CompletionEventHandler not initially supported. +- [size](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#size()) +- [write](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#write(java.nio.ByteBuffer,%20long,%20A,%20java.nio.channels.CompletionHandler)): CompletionEventHandler not initially supported. Additional checks are required before closing. Each write will add an entry to a (threadsafe) set of Strings that represent the range. At the time of closing, the set will be examined to ensure it forms a continuous range from 0 to the size of the blob. If it does not, an IOException will be thrown. If it does, the ranges will be converted to blockIDs and the list will be committed. This will enable parallel write scenarios for writing an entire file while ensuring that there is no random IO happening. Note that the docs do not specify the APPEND option for the open API. In this case, TRUNCATE\_EXISTING must be specified. + +## [SeekableByteChannel](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html) + +See the above OpenOptions section for more information. + +- [position](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#position(long)): If the position is set to any value other than the current size of the file, attempting to write will throw an UnsupportedOperationException. In read-only workloads, the position may be set wherever the client desires. Reading may fail even after a write if the channel is opened to a new blob because the data will not have been committed yet. +- [read](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#read(java.nio.ByteBuffer)) +- [size](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#size()) +- [write](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#write(java.nio.ByteBuffer)) + +## [FileChannel](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/FileChannel.html) + +Note that this implements a SeekableByteChannel. Many of the methods should be deferred to an internal instance or this type should extend from our implementation of SeekableByteChannel. As such, it's seeking and writing behavior is the same as SeekableByteChannel. Mapping is not supported. Locking is not supported. Note the possible connection between a FileChannel and the stream that created it; where possible, a FileChannel should reference the position of the stream rather than maintaining its own pointer. + +# Open Questions/Further Development + +The following are not immediately necessary but may reasonably be implemented at a later time. + +- Symbolic links (createSymbolicLink) could be an empty blob with metadata field like x-ms-meta-link-target:path. Must be wary of link chains and circular links. +- Hard links (createLink) +- Hidden files (isHidden) could be a metadata field like x-ms-meta-is-hidden:true +- Random IO (newAsynchronousFileChannel, newSeekableByteChannel, newFileChannel). It would be theoretically possible to implement this functionality by downloading the file, working on a locally cached copy, and reuploading the file, but that incurs significant performance costs on large files (caching also introduces a significant amount of complexity when trying to maintain cache correctness and consistency in multithreaded environments). Because our MVP scenario is in workloads with large files, there is not much benefit to this option. Another alternative would be to use system wherein blocks roughly correlate to pages in traditional file I/O: the blockIds correspond to the range of data they hold. A random write would require downloading only a few blocks containing that range, making the desired edits, uploading the edited blocks, and re-committing the block list. This, too, introduces a large amount of complexity, a high number of round trip requests, and can be unsafe in multithreaded environments. +- Watches on directory events. +- PathMatcher (glob syntax?) +- File locks (leases? Can only be over the whole file. Can only be exclusive.) +- Read only FileSystem/Containers. Marking certain stores as read only could be configured in the initialization step if necessary. It could be a locally maintained list or we could require that the customer preconfigure the containers to be WORM containers. +- Opening AsyncFileChannel with the ExecutorService; reading with a CompletionEventHandler +- FileOwnership and POSIX permissions require ADLS. Random I/O may be improved with the use of Azure Files. +- Should we support AAD tokens? If so, we should probably look at azcopy for token refresh strategies. +- Which version should we release for GA? Should we jump to v12 to be consistent with other Storage offerings? +- Allowing customers access to single blobs. It is possible that customers may only need to access one blob from a given account. If that is the case, their credentials will likely be scoped just to that blob, and even checking the existence of a container upon initialization will be too restrictive. We can add an AzureStorageSkipInitialConnectionCheck parameter that bypasses this check and trusts the users credentials, allowing them access just to that blob. +- Providers built on other services. See comments in the "Entry" section. +- Some possible options for new flags include flags to optimize for certain behavior, to allow the filesystem to use all extant containers as FileStores rather than being restricted to the specified list, toggle the creation of specified containers, or to allow for specifying a CDN that can override the account name found in the URI. diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/README.md b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/README.md new file mode 100644 index 00000000000..8bd92fcac36 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/README.md @@ -0,0 +1,333 @@ +# Azure Storage Blob NIO FileSystemProvider + +This package allows you to interact with Azure Blob Storage through the standard Java NIO Filesystem APIs. + +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][samples] + +## Getting started + +### Prerequisites + +- [Java Development Kit (JDK)][jdk] with version 8 or above +- [Azure Subscription][azure_subscription] +- [Create Storage Account][storage_account] + +### Include the package + +[//]: # ({x-version-update-start;com.azure:azure-storage-blob-nio;current}) +```xml + + com.azure + azure-storage-blob-nio + 12.0.0-beta.19 + +``` +[//]: # ({x-version-update-end}) + +### Create a Storage Account +To create a Storage Account you can use the [Azure Portal][storage_account_create_portal] or [Azure CLI][storage_account_create_cli]. + +```bash +az storage account create \ + --resource-group \ + --name \ + --location +``` + +### Authenticate the client + +The simplest way to interact with the Storage Service is to create an instance of the [FileSystem][file_system] class using the [FileSystems][file_systems] API. +To make this possible you'll need the Account SAS (shared access signature) string of the Storage Account or a Shared Key. Learn more at [SAS Token][sas_token] and [Shared Key][shared_key] + +#### Get credentials + +##### SAS Token + +a. Use the Azure CLI snippet below to get the SAS token from the Storage Account. + +```bash +az storage blob generate-sas \ + --account-name {Storage Account name} \ + --container-name {container name} \ + --name {blob name} \ + --permissions {permissions to grant} \ + --expiry {datetime to expire the SAS token} \ + --services {storage services the SAS allows} \ + --resource-types {resource types the SAS allows} +``` + +Example: + +```bash +CONNECTION_STRING= + +az storage blob generate-sas \ + --account-name MyStorageAccount \ + --container-name MyContainer \ + --name MyBlob \ + --permissions racdw \ + --expiry 2020-06-15 +``` + +b. Alternatively, get the Account SAS Token from the Azure Portal. + +1. Go to your Storage Account +2. Select `Shared access signature` from the menu on the left +3. Click on `Generate SAS and connection string` (after setup) + +##### **Shared Key Credential** + +Use Account name and Account key. Account name is your Storage Account name. + +1. Go to your Storage Account +2. Select `Access keys` from the menu on the left +3. Under `key1`/`key2` copy the contents of the `Key` field + +## Key concepts + +NIO on top of Blob Storage is designed for: + +- Working with Blob Storage as though it were a local file system +- Random access reads on large blobs without downloading the entire blob +- Uploading full files as blobs +- Creating and navigating a directory structure within an account +- Reading and setting attributes on blobs + +## Design Notes +It is important to recognize that Azure Blob Storage is not a true FileSystem, nor is it the goal of this project to +force Azure Blob Storage to act like a full-fledged FileSystem. While providing FileSystem APIs on top of Azure Blob +Storage can offer convenience and ease of access in certain cases, trying to force the Storage service to work in +scenarios it is not designed for is bound to introduce performance and stability problems. + +To that end, this project will only offer APIs that can be sensibly and cleanly built on top of Azure Blob Storage APIs. +We recognize that this will leave some scenarios unsupported indefinitely, but we would rather offer a product that +works predictably and reliably in its well defined scenarios than eagerly support all possible scenarios at the expense +of quality. Even still, supporting some fundamentally required use cases, such as directories, can result in unexpected +behavior due to the difference between blob storage and a file system. The javadocs on each type and method should +therefore be read and understood for ways in which they may diverge from the standard specified by the JDK. + +Moreover, even from within a given application, it should be remembered that using a remote FileSystem introduces higher +latency. Because of this, particular care must be taken when managing concurrency. Race conditions are more likely to +manifest, network failures occur more frequently than disk failures, and other such distributed application scenarios +must be considered when working with this FileSystem. While the AzureFileSystem will ensure it takes appropriate steps +towards robustness and reliability, the application developer must also design around these failure scenarios and have +fallback and retry options available. + +The view of the FileSystem from within an instance of the JVM will be consistent, but the AzureFileSystem makes no +guarantees on behavior or state should other processes operate on the same data. The AzureFileSystem will assume that it +has exclusive access to the resources stored in Azure Blob Storage and will behave without regard for potential +interfering applications. + +Finally, this implementation has currently chosen to always read/write directly to/from Azure Storage without a local +cache. Our team has determined that with the tradeoffs of complexity, correctness, safety, performance, debuggability, +etc. one option is not inherently better than the other and that this choice most directly addresses the current known +use cases for this project. While this has consequences for every API, of particular note is the limitations on writing +data. Data may only be written as an entire file (i.e. random IO or appends are not supported), and data is not +committed or available to be read until the write stream is closed. + +## Examples + +The following sections provide several code snippets covering some of the most common Azure Storage Blob NIO tasks, including: + +- [URI format](#uri-format) +- [Create a `FileSystem`](#create-a-filesystem) +- [Create a directory](#create-a-directory) +- [Iterate over directory contents](#iterate-over-directory-contents) +- [Read a file](#read-a-file) +- [Write to a file](#write-to-a-file) +- [Copy a file](#copy-a-file) +- [Delete a file](#delete-a-file) +- [Read attributes on a file](#read-attributes-on-a-file) +- [Write attributes to a file](#write-attributes-to-a-file) + +### URI format +URIs are the fundamental way of identifying a resource. This package defines its URI format as follows: + +The scheme for this provider is `"azb"`, and the format of the URI to identify an `AzureFileSystem` is +`"azb://?endpoint="`. The endpoint of the Storage account is used to uniquely identify the filesystem. + +The root component, if it is present, is the first element of the path and is denoted by a `':'` as the last character. +Hence, only one instance of `':'` may appear in a path string, and it may only be the last character of the first +element in the path. The root component is used to identify which container a path belongs to. + +All other path elements, including separators, are considered as the blob name. `AzurePath#fromBlobUrl` +may be used to convert a typical http url pointing to a blob into an `AzurePath` object pointing to the same resource. + +### Create a `FileSystem` + +Create a `FileSystem` using the [`shared key`](#get-credentials) retrieved above. + +Note that you can further configure the file system using constants available in `AzureFileSystem`. +Please see the docs for `AzureFileSystemProvider` for a full explanation of initializing and configuring a filesystem + +```java readme-sample-createAFileSystem +Map config = new HashMap<>(); +String stores = ","; // A comma separated list of container names +StorageSharedKeyCredential credential = new StorageSharedKeyCredential(" attributes = Files.readAttributes(filePath, "azureBlob:metadata,headers"); +``` + +### Write attributes to a file + +Set attributes of a file through the `AzureBlobFileAttributeView`. + +```java readme-sample-writeAttributesToAFile +AzureBlobFileAttributeView view = Files.getFileAttributeView(filePath, AzureBlobFileAttributeView.class); +view.setMetadata(Collections.emptyMap()); +``` + +Or set an attribute dynamically by specifying the attribute as a string. + +```java readme-sample-writeAttributesToAFileString +Files.setAttribute(filePath, "azureBlob:blobHttpHeaders", new BlobHttpHeaders()); +``` + +## Troubleshooting + +When using the NIO implementation for Azure Blob Storage, errors returned by the service are manifested as an +`IOException` which wraps a `BlobStorageException` having the same HTTP status codes returned for +[REST API][error_codes] requests. For example, if you try to read a file that doesn't exist in your Storage Account, a +`404` error is returned, indicating `Not Found`. + +### Default HTTP Client +All client libraries by default use the Netty HTTP client. Adding the above dependency will automatically configure +the client library to use the Netty HTTP client. Configuring or changing the HTTP client is detailed in the +[HTTP clients wiki](https://github.com/Azure/azure-sdk-for-java/wiki/HTTP-clients). + +### Default SSL library +All client libraries, by default, use the Tomcat-native Boring SSL library to enable native-level performance for SSL +operations. The Boring SSL library is an uber jar containing native libraries for Linux / macOS / Windows, and provides +better performance compared to the default SSL implementation within the JDK. For more information, including how to +reduce the dependency size, refer to the [performance tuning][performance_tuning] section of the wiki. + +## Continued development + +This project is still actively being developed in an effort to move from preview to GA. Below is a list of features that +are not currently supported but are under consideration and may be added before GA. We welcome feedback and input on +which of these may be most useful and are open to suggestions for items not included in this list. While all of these +items are being considered, they have not been investigated and designed and therefore we cannot confirm their +feasibility within Azure Blob Storage. Therefore, it may be the case that further investigation reveals a feature may +not be possible or otherwise may conflict with established design goals and therefor will not ultimately be supported. + +- Symbolic links +- Hard links +- Hidden files +- Random writes +- File locks +- Read only files or file stores +- Watches on directory events +- Support for other Azure Storage services such as ADLS Gen 2 (Datalake) and Azure Files (shares) +- Token authentication +- Multi-account filesystems +- Delegating access to single files +- Normalizing directory structure of data upon loading a FileSystem +- Local caching +- Other `OpenOptions` such as append or dsync +- Flags to toggle certain behaviors such as FileStore (container) creation, etc. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a [Contributor License Agreement (CLA)][cla] declaring that you have the right to, and actually do, grant us the rights to use your contribution. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For more information see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. + + +[source]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob-nio/src +[samples_readme]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob-nio/src/samples/README.md +[docs]: https://azure.github.io/azure-sdk-for-java/ +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview +[sas_token]: https://docs.microsoft.com/azure/storage/common/storage-dotnet-shared-access-signature-part-1 +[shared_key]: https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key +[jdk]: https://docs.microsoft.com/java/azure/jdk/ +[azure_subscription]: https://azure.microsoft.com/free/ +[storage_account]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[identity]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/identity/azure-identity/README.md +[error_codes]: https://docs.microsoft.com/rest/api/storageservices/blob-service-error-codes +[samples]: https://docs.oracle.com/javase/tutorial/essential/io/fileio.html +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com +[performance_tuning]: https://github.com/Azure/azure-sdk-for-java/wiki/Performance-Tuning +[file_system]: https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html +[file_systems]: https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-java%2Fsdk%2Fstorage%2Fazure-storage-blob%2FREADME.png) diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/assets.json b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/assets.json new file mode 100644 index 00000000000..c262f7ebafc --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "java", + "TagPrefix": "java/storage/azure-storage-blob-nio", + "Tag": "java/storage/azure-storage-blob-nio_b2a0ce219e" +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java new file mode 100644 index 00000000000..43744893ccb --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.FileTime; + +/** + * Provides support for basic file attributes. + *

+ * The operations supported by this view and the attributes it reads are a strict subset of + * {@link AzureBlobFileAttributeView} and has the same network behavior. Therefore, while this type is offered for + * compliance with the NIO spec, {@link AzureBlobFileAttributeView} is generally preferred. + *

+ * {@link #setTimes(FileTime, FileTime, FileTime)} is not supported. + */ +public final class AzureBasicFileAttributeView implements BasicFileAttributeView { + private static final ClientLogger LOGGER = new ClientLogger(AzureBasicFileAttributeView.class); + + static final String NAME = "azureBasic"; + + private final Path path; + + AzureBasicFileAttributeView(Path path) { + this.path = path; + } + + /** + * Returns the name of the attribute view: {@code "azureBasic"} + * + * @return the name of the attribute view: {@code "azureBasic"} + */ + @Override + public String name() { + return NAME; + } + + /** + * Reads the basic file attributes as a bulk operation. + *

+ * All file attributes are read as an atomic operation with respect to other file system operations. + * + * @return {@link AzureBasicFileAttributes} + */ + @Override + public AzureBasicFileAttributes readAttributes() throws IOException { + AzurePath.ensureFileSystemOpen(path); + return new AzureBasicFileAttributes(path); + } + + /** + * Unsupported. + * + * @param lastModifiedTime the new last modified time, or null to not change the value + * @param lastAccessTime the last access time, or null to not change the value + * @param createTime the file's create time, or null to not change the value + * @throws UnsupportedOperationException Operation not supported. + * @throws IOException never + */ + @Override + public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java new file mode 100644 index 00000000000..d1ab6d28562 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.FileTime; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** + * Provides support for basic file attributes. + *

+ * The properties available on this type are a strict subset of {@link AzureBlobFileAttributes}, and the two types have + * the same network behavior. Therefore, while this type is offered for compliance with the NIO spec, + * {@link AzureBlobFileAttributes} is generally preferred. + *

+ * Some attributes are not supported. Refer to the javadocs on each method for more information. + *

+ * If the target file is a virtual directory, most attributes will be set to null. + */ +public final class AzureBasicFileAttributes implements BasicFileAttributes { + // For verifying parameters on FileSystemProvider.readAttributes + static final Set ATTRIBUTE_STRINGS; + static { + Set set = new HashSet<>(); + set.add("lastModifiedTime"); + set.add("isRegularFile"); + set.add("isDirectory"); + set.add("isVirtualDirectory"); + set.add("isSymbolicLink"); + set.add("isOther"); + set.add("size"); + set.add("creationTime"); + ATTRIBUTE_STRINGS = Collections.unmodifiableSet(set); + } + + private final AzureBlobFileAttributes internalAttributes; + + /* + In order to support Files.exist() and other methods like Files.walkFileTree() which depend on it, we have had to add + support for virtual directories. This is not ideal as customers will have to now perform null checks when inspecting + attributes (or at least check if it is a virtual directory before inspecting properties). It also incurs extra + network requests as we have to call a checkDirectoryExists() after receiving the initial 404. This is two + additional network requests, though they only happen in the case when a file doesn't exist or is virtual, so it + shouldn't happen in the majority of api calls. + */ + AzureBasicFileAttributes(Path path) throws IOException { + this.internalAttributes = new AzureBlobFileAttributes(path); + } + + /** + * Returns the time of last modification or null if this is a virtual directory. + * + * @return the time of last modification or null if this is a virtual directory + */ + @Override + public FileTime lastModifiedTime() { + return this.internalAttributes.lastModifiedTime(); + } + + /** + * Returns the time of last modification or null if this is a virtual directory + *

+ * Last access time is not supported by the blob service. In this case, it is typical for implementations to return + * the {@link #lastModifiedTime()}. + * + * @return the time of last modification or null if this is a virtual directory + */ + @Override + public FileTime lastAccessTime() { + return this.internalAttributes.lastAccessTime(); + } + + /** + * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a + * virtual directory. + * + * @return The creation time or null if this is a virtual directory + */ + @Override + public FileTime creationTime() { + return this.internalAttributes.creationTime(); + } + + /** + * Tells whether the file is a regular file with opaque content. + * + * @return whether the file is a regular file. + */ + @Override + public boolean isRegularFile() { + return this.internalAttributes.isRegularFile(); + } + + /** + * Tells whether the file is a directory. + *

+ * Will only return true if the directory is a concrete directory. See + * {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and + * concrete directories. + * + * @return whether the file is a directory + */ + @Override + public boolean isDirectory() { + return this.internalAttributes.isDirectory(); + } + + /** + * Tells whether the file is a virtual directory. + *

+ * See {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and + * concrete directories. + * + * @return whether the file is a virtual directory + */ + public boolean isVirtualDirectory() { + return this.internalAttributes.isVirtualDirectory(); + } + + /** + * Tells whether the file is a symbolic link. + * + * @return false. Symbolic links are not supported. + */ + @Override + public boolean isSymbolicLink() { + return this.internalAttributes.isSymbolicLink(); + } + + /** + * Tells whether the file is something other than a regular file, directory, or symbolic link. + * + * @return false. No other object types are supported. + */ + @Override + public boolean isOther() { + return this.internalAttributes.isOther(); + } + + /** + * Returns the size of the file (in bytes). + * + * @return the size of the file + */ + @Override + public long size() { + return this.internalAttributes.size(); + } + + /** + * Returns the url of the resource. + * + * @return The file key, which is the url. + */ + @Override + public Object fileKey() { + return this.internalAttributes.fileKey(); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java new file mode 100644 index 00000000000..d9366e22417 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.models.AccessTier; +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.specialized.BlobClientBase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.FileTime; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; + +/** + * A file attribute view that provides a view of attributes specific to files stored as blobs in Azure Storage. + *

+ * All attributes are retrieved from the file system as a bulk operation. + *

+ * {@link #setTimes(FileTime, FileTime, FileTime)} is not supported. + */ +public final class AzureBlobFileAttributeView implements BasicFileAttributeView { + private static final ClientLogger LOGGER = new ClientLogger(AzureBlobFileAttributeView.class); + + static final String ATTR_CONSUMER_ERROR = "Exception thrown by attribute consumer"; + static final String NAME = "azureBlob"; + + private final Path path; + + AzureBlobFileAttributeView(Path path) { + this.path = path; + } + + @SuppressWarnings("unchecked") + static Map> setAttributeConsumers(AzureBlobFileAttributeView view) { + Map> map = new HashMap<>(); + map.put("blobHttpHeaders", obj -> { + try { + view.setBlobHttpHeaders((BlobHttpHeaders) obj); + } catch (IOException e) { + throw LoggingUtility.logError(LOGGER, new UncheckedIOException(ATTR_CONSUMER_ERROR, e)); + } + }); + map.put("metadata", obj -> { + try { + Map m = (Map) obj; + if (m == null) { + throw LoggingUtility.logError(LOGGER, new ClassCastException()); + } + view.setMetadata(m); + } catch (IOException e) { + throw LoggingUtility.logError(LOGGER, new UncheckedIOException(ATTR_CONSUMER_ERROR, e)); + } + }); + map.put("tier", obj -> { + try { + view.setTier((AccessTier) obj); + } catch (IOException e) { + throw LoggingUtility.logError(LOGGER, new UncheckedIOException(ATTR_CONSUMER_ERROR, e)); + } + }); + + return map; + } + + /** + * Returns the name of the attribute view: {@code "azureBlob"} + * + * @return the name of the attribute view: {@code "azureBlob"} + */ + @Override + public String name() { + return NAME; + } + + /** + * Reads the file attributes as a bulk operation. + *

+ * All file attributes are read as an atomic operation with respect to other file system operations. A fresh copy is + * retrieved every time this method is called. + * @return {@link AzureBlobFileAttributes} + * @throws IOException if an IOException occurs. + */ + @Override + public AzureBlobFileAttributes readAttributes() throws IOException { + AzurePath.ensureFileSystemOpen(path); + return new AzureBlobFileAttributes(path); + } + + /** + * Sets the {@link BlobHttpHeaders} as an atomic operation. + *

+ * See {@link BlobClientBase#setHttpHeaders(BlobHttpHeaders)} for more information. + * @param headers {@link BlobHttpHeaders} + * @throws IOException if an IOException occurs. + */ + public void setBlobHttpHeaders(BlobHttpHeaders headers) throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + new AzureResource(this.path).getBlobClient().setHttpHeaders(headers); + } catch (BlobStorageException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + /** + * Sets the metadata as an atomic operation. + *

+ * See {@link BlobClientBase#setMetadata(Map)} for more information. + * @param metadata The metadata to associate with the blob + * @throws IOException if an IOException occurs. + */ + public void setMetadata(Map metadata) throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + new AzureResource(this.path).getBlobClient().setMetadata(metadata); + } catch (BlobStorageException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + /** + * Sets the {@link AccessTier} on the file. + *

+ * See {@link BlobClientBase#setAccessTier(AccessTier)} for more information. + * @param tier {@link AccessTier} + * @throws IOException if an IOException occurs. + */ + public void setTier(AccessTier tier) throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + new AzureResource(this.path).getBlobClient().setAccessTier(tier); + } catch (BlobStorageException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + /** + * Unsupported. + * + * @param lastModifiedTime the new last modified time, or null to not change the value + * @param lastAccessTime the last access time, or null to not change the value + * @param createTime the file's create time, or null to not change the value + * @throws UnsupportedOperationException Operation not supported. + * @throws IOException never + */ + @Override + public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java new file mode 100644 index 00000000000..c73d062e117 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java @@ -0,0 +1,369 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.FileTime; +import java.time.OffsetDateTime; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.models.AccessTier; +import com.azure.storage.blob.models.ArchiveStatus; +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.blob.models.BlobProperties; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.BlobType; +import com.azure.storage.blob.models.CopyStatusType; + +/** + * Provides support for attributes associated with a file stored as a blob in Azure Storage. + *

+ * Some of the attributes inherited from {@link BasicFileAttributes} are not supported. See the docs on each method for + * more information. + *

+ * If the target file is a virtual directory, most attributes will be set to null. + */ +public final class AzureBlobFileAttributes implements BasicFileAttributes { + /* + Some blob properties do not have getters as they do not make sense in the context of nio. These properties are: + - incremental snapshot related properties (only for page blobs) + - lease related properties (leases not currently supported) + - sequence number (only for page blobs) + - encryption key sha256 (cpk not supported) + - committed block count (only for append blobs) + */ + + private static final ClientLogger LOGGER = new ClientLogger(AzureBlobFileAttributes.class); + + private final BlobProperties properties; + private final AzureResource resource; + private final boolean isVirtualDirectory; + + AzureBlobFileAttributes(Path path) throws IOException { + this.resource = new AzureResource(path); + BlobProperties props = null; + try { + props = resource.getBlobClient().getProperties(); + } catch (BlobStorageException e) { + if (e.getStatusCode() == 404 && this.resource.checkVirtualDirectoryExists()) { + this.isVirtualDirectory = true; + this.properties = null; + return; + } else { + throw LoggingUtility.logError(LOGGER, new IOException("Path: " + path.toString(), e)); + } + } + this.properties = props; + this.isVirtualDirectory = false; + } + + static Map> getAttributeSuppliers(AzureBlobFileAttributes attributes) { + Map> map = new HashMap<>(); + map.put("creationTime", attributes::creationTime); + map.put("lastModifiedTime", attributes::lastModifiedTime); + map.put("eTag", attributes::eTag); + map.put("blobHttpHeaders", attributes::blobHttpHeaders); + map.put("blobType", attributes::blobType); + map.put("copyId", attributes::copyId); + map.put("copyStatus", attributes::copyStatus); + map.put("copySource", attributes::copySource); + map.put("copyProgress", attributes::copyProgress); + map.put("copyCompletionTime", attributes::copyCompletionTime); + map.put("copyStatusDescription", attributes::copyStatusDescription); + map.put("isServerEncrypted", attributes::isServerEncrypted); + map.put("accessTier", attributes::accessTier); + map.put("isAccessTierInferred", attributes::isAccessTierInferred); + map.put("archiveStatus", attributes::archiveStatus); + map.put("accessTierChangeTime", attributes::accessTierChangeTime); + map.put("metadata", attributes::metadata); + map.put("isRegularFile", attributes::isRegularFile); + map.put("isDirectory", attributes::isDirectory); + map.put("isVirtualDirectory", attributes::isVirtualDirectory); + map.put("isSymbolicLink", attributes::isSymbolicLink); + map.put("isOther", attributes::isOther); + map.put("size", attributes::size); + return map; + } + + /** + * Returns the creation time. The creation time is the time that the file was created. Returns null if this is a + * virtual directory. + * + * @return The creation time or null if this is a virtual directory + */ + @Override + public FileTime creationTime() { + return !this.isVirtualDirectory ? FileTime.from(this.properties.getCreationTime().toInstant()) : null; + } + + /** + * Returns the time of last modification. Returns null if this is a virtual directory + * + * @return the time of last modification or null if this is a virtual directory + */ + @Override + public FileTime lastModifiedTime() { + return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastModified().toInstant()) : null; + } + + /** + * Returns the eTag of the blob or null if this is a virtual directory + * + * @return the eTag of the blob or null if this is a virtual directory + */ + public String eTag() { + return !this.isVirtualDirectory ? this.properties.getETag() : null; + } + + /** + * Returns the {@link BlobHttpHeaders} of the blob or null if this is a virtual directory. + * + * @return {@link BlobHttpHeaders} or null if this is a virtual directory + */ + public BlobHttpHeaders blobHttpHeaders() { + if (this.isVirtualDirectory) { + return null; + } + /* + We return these all as one value, so it's consistent with the way of setting, especially the setAttribute method + that accepts a string argument for the name of the property. Returning them individually would mean we have to + support setting them individually as well, which is not possible due to service constraints. + */ + return new BlobHttpHeaders() + .setContentType(this.properties.getContentType()) + .setContentLanguage(this.properties.getContentLanguage()) + .setContentMd5(this.properties.getContentMd5()) + .setContentDisposition(this.properties.getContentDisposition()) + .setContentEncoding(this.properties.getContentEncoding()) + .setCacheControl(this.properties.getCacheControl()); + } + + /** + * Returns the type of the blob or null if this is a virtual directory + * + * @return the type of the blob or null if this is a virtual directory + */ + public BlobType blobType() { + return !this.isVirtualDirectory ? this.properties.getBlobType() : null; + } + + /** + * Returns the identifier of the last copy operation. If this blob hasn't been the target of a copy operation or has + * been modified since this won't be set. Returns null if this is a virtual directory + * + * @return the identifier of the last copy operation or null if this is a virtual directory + */ + public String copyId() { + return !this.isVirtualDirectory ? this.properties.getCopyId() : null; + } + + /** + * Returns the status of the last copy operation. If this blob hasn't been the target of a copy operation or has + * been modified since this won't be set. Returns null if this is a virtual directory + * + * @return the status of the last copy operation or null if this is a virtual directory + */ + public CopyStatusType copyStatus() { + return !this.isVirtualDirectory ? this.properties.getCopyStatus() : null; + } + + /** + * Returns the source blob URL from the last copy operation. If this blob hasn't been the target of a copy operation + * or has been modified since this won't be set. Returns null if this is a virtual directory + * + * @return the source blob URL from the last copy operation or null if this is a virtual directory + */ + public String copySource() { + return !this.isVirtualDirectory ? this.properties.getCopySource() : null; + } + + /** + * Returns the number of bytes copied and total bytes in the source from the last copy operation (bytes copied/total + * bytes). If this blob hasn't been the target of a copy operation or has been modified since this won't be set. + * Returns null if this is a virtual directory + * + * @return the number of bytes copied and total bytes in the source from the last copy operation null if this is a + * virtual directory + */ + public String copyProgress() { + return !this.isVirtualDirectory ? this.properties.getCopyProgress() : null; + } + + /** + * Returns the completion time of the last copy operation. If this blob hasn't been the target of a copy operation + * or has been modified since this won't be set. Returns null if this is a virtual directory. + * + * @return the completion time of the last copy operation or null if this is a virtual directory + */ + public OffsetDateTime copyCompletionTime() { + return !this.isVirtualDirectory ? this.properties.getCopyCompletionTime() : null; + } + + /** + * Returns the description of the last copy failure, this is set when the {@link #copyStatus() getCopyStatus} is + * {@link CopyStatusType#FAILED failed} or {@link CopyStatusType#ABORTED aborted}. If this blob hasn't been the + * target of a copy operation or has been modified since this won't be set. Returns null if this is a virtual + * directory. + * + * @return the description of the last copy failure or null if this is a virtual directory + */ + public String copyStatusDescription() { + return !this.isVirtualDirectory ? this.properties.getCopyStatusDescription() : null; + } + + /** + * Returns the status of the blob being encrypted on the server or null if this is a virtual directory. + * + * @return the status of the blob being encrypted on the server or null if this is a virtual directory + */ + public Boolean isServerEncrypted() { + return !this.isVirtualDirectory ? this.properties.isServerEncrypted() : null; + } + + /** + * Returns the tier of the blob. This is only set for Page blobs on a premium storage account or for Block blobs on + * blob storage or general purpose V2 account. Returns null if this is a virtual directory. + * + * @return the tier of the blob or null if this is a virtual directory + */ + public AccessTier accessTier() { + return !this.isVirtualDirectory ? this.properties.getAccessTier() : null; + } + + /** + * Returns the status of the tier being inferred for the blob. This is only set for Page blobs on a premium storage + * account or for Block blobs on blob storage or general purpose V2 account. Returns null if this is a virtual + * directory. + * + * @return the status of the tier being inferred for the blob or null if this is a virtual directory + */ + public Boolean isAccessTierInferred() { + return !this.isVirtualDirectory ? this.properties.isAccessTierInferred() : null; + } + + /** + * Returns the archive status of the blob. This is only for blobs on a blob storage and general purpose v2 account. + * Returns null if this is a virtual directory. + * + * @return the archive status of the blob or null if this is a virtual directory + */ + public ArchiveStatus archiveStatus() { + return !this.isVirtualDirectory ? this.properties.getArchiveStatus() : null; + } + + /** + * Returns the time when the access tier for the blob was last changed or null if this is a virtual directory. + * + * @return the time when the access tier for the blob was last changed or null if this is a virtual directory + */ + public OffsetDateTime accessTierChangeTime() { + return !this.isVirtualDirectory ? this.properties.getAccessTierChangeTime() : null; + } + + /** + * Returns the metadata associated with this blob or null if this is a virtual directory. + * + * @return the metadata associated with this blob or null if this is a virtual directory + */ + public Map metadata() { + return !this.isVirtualDirectory ? Collections.unmodifiableMap(this.properties.getMetadata()) : null; + } + + /** + * Returns the time of last modification or null if this is a virtual directory. + *

+ * Last access time is not supported by the blob service. In this case, it is typical for implementations to return + * the {@link #lastModifiedTime()}. + * + * @return the time of last modification or null if this is a virtual directory + */ + @Override + public FileTime lastAccessTime() { + return !this.isVirtualDirectory ? FileTime.from(this.properties.getLastAccessedTime().toInstant()) : null; + } + + /** + * Tells whether the file is a regular file with opaque content. + * + * @return whether the file is a regular file. + */ + @Override + public boolean isRegularFile() { + return !this.isVirtualDirectory + && !this.properties.getMetadata().getOrDefault(AzureResource.DIR_METADATA_MARKER, "false").equals("true"); + } + + /** + * Tells whether the file is a directory. + *

+ * Will return true if the directory is a concrete or virtual directory. See + * {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and + * concrete directories. + * + * @return whether the file is a directory + */ + @Override + public boolean isDirectory() { + return !this.isRegularFile(); + } + + /** + * Tells whether the file is a virtual directory. + *

+ * See {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} for more information on virtual and + * concrete directories. + * + * @return whether the file is a virtual directory + */ + public boolean isVirtualDirectory() { + return this.isVirtualDirectory; + } + + /** + * Tells whether the file is a symbolic link. + * + * @return false. Symbolic links are not supported. + */ + @Override + public boolean isSymbolicLink() { + return false; + } + + /** + * Tells whether the file is something other than a regular file, directory, or symbolic link. + * + * @return false. No other object types are supported. + */ + @Override + public boolean isOther() { + return false; + } + + /** + * Returns the size of the file (in bytes). + * + * @return the size of the file + */ + @Override + public long size() { + return !this.isVirtualDirectory ? properties.getBlobSize() : 0; + } + + /** + * Returns the url of the resource. + * + * @return The file key, which is the url. + */ + @Override + public Object fileKey() { + return resource.getBlobClient().getBlobUrl(); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java new file mode 100644 index 00000000000..917f712ddfc --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java @@ -0,0 +1,189 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobListDetails; +import com.azure.storage.blob.models.ListBlobsOptions; + +import java.io.IOException; +import java.nio.file.DirectoryIteratorException; +import java.nio.file.DirectoryStream; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Set; + +/** + * A type for iterating over the contents of a directory. + * + * This type is asynchronously closeable, i.e. closing the stream from any thread will cause the stream to stop + * returning elements at that point. + * + * {@inheritDoc} + */ +public final class AzureDirectoryStream implements DirectoryStream { + private static final ClientLogger LOGGER = new ClientLogger(AzureDirectoryStream.class); + + private final AzurePath path; + private final DirectoryStream.Filter filter; + private boolean iteratorRequested = false; + private final AzureDirectoryIterator iterator; + boolean closed = false; + + AzureDirectoryStream(AzurePath path, DirectoryStream.Filter filter) throws IOException { + this.path = path; + this.filter = filter; + this.iterator = new AzureDirectoryIterator(this, this.path, this.filter); + } + + @Override + public Iterator iterator() { + if (this.iteratorRequested) { + throw LoggingUtility.logError(LOGGER, + new IllegalStateException("Only one iterator may be requested from a given directory stream")); + } + this.iteratorRequested = true; + return this.iterator; + } + + @Override + public void close() throws IOException { + this.closed = true; + } + + private static class AzureDirectoryIterator implements Iterator { + private static final ClientLogger LOGGER = new ClientLogger(AzureDirectoryIterator.class); + + private final AzureDirectoryStream parentStream; + private final DirectoryStream.Filter filter; + private final Iterator blobIterator; + private final AzurePath path; + private final Path withoutRoot; + private Path bufferedNext = null; + private final Set directoryPaths; + + AzureDirectoryIterator(AzureDirectoryStream parentStream, AzurePath path, + DirectoryStream.Filter filter) throws IOException { + this.parentStream = parentStream; + this.filter = filter; + this.path = path; + + /* + Resolving two paths requires that either both have a root or neither does. Because the paths returned from + listing will never have a root, we prepare a copy of the list path without a root for quick resolving later. + */ + Path root = this.path.getRoot(); + this.withoutRoot = root == null ? this.path : root.relativize(this.path); + + directoryPaths = new HashSet<>(); + + BlobContainerClient containerClient; + ListBlobsOptions listOptions = new ListBlobsOptions() + .setDetails(new BlobListDetails().setRetrieveMetadata(true)); + if (path.isRoot()) { + String containerName = path.toString().substring(0, path.toString().length() - 1); + AzureFileSystem afs = ((AzureFileSystem) path.getFileSystem()); + containerClient = ((AzureFileStore) afs.getFileStore(containerName)).getContainerClient(); + } else { + AzureResource azureResource = new AzureResource(path); + listOptions.setPrefix(azureResource.getBlobClient().getBlobName() + AzureFileSystem.PATH_SEPARATOR); + containerClient = azureResource.getContainerClient(); + } + this.blobIterator = containerClient + .listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR, listOptions, null).iterator(); + } + + @Override + public boolean hasNext() { + AzurePath.ensureFileSystemOpen(path); + + // Closing the parent stream halts iteration. + if (parentStream.closed) { + return false; + } + + // In case a customer calls hasNext multiple times in a row. If we've buffered an element, we have a next. + if (this.bufferedNext != null) { + return true; + } + + /* + Search for a new element that passes the filter and buffer it when found. If no such element is found, + return false. + */ + while (this.blobIterator.hasNext()) { + BlobItem nextBlob = this.blobIterator.next(); + Path nextPath = getNextListResult(nextBlob); + try { + if (this.filter.accept(nextPath) && isNotDuplicate(nextPath, nextBlob)) { + this.bufferedNext = nextPath; + return true; + } + } catch (IOException e) { + throw LoggingUtility.logError(LOGGER, new DirectoryIteratorException(e)); + } + } + return false; + } + + @Override + public Path next() { + if (this.bufferedNext == null) { + if (!this.hasNext()) { // This will populate bufferedNext in the process. + throw LoggingUtility.logError(LOGGER, new NoSuchElementException()); + } + } + Path next = this.bufferedNext; // bufferedNext will have been populated by hasNext() + this.bufferedNext = null; + return next; + } + + @Override + public void remove() { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } + + private Path getNextListResult(BlobItem blobItem) { + /* + Listing results return the full blob path, and we don't want to duplicate the path we listed off of, so + we relativize to remove it. + */ + String blobName = blobItem.getName(); + Path relativeResult = this.withoutRoot.relativize( + this.path.getFileSystem().getPath(blobName)); + + // Resolve the cleaned list result against the original path for the final result. + return this.path.resolve(relativeResult); + } + + /* + If there is a concrete directory with children, a given path will be returned twice: once as the marker blob + and once as the prefix for its children. We don't want to return the item twice, and we have no guarantees on + result ordering, so we have to maintain a cache of directory paths we've seen in order to de-dup. + */ + private boolean isNotDuplicate(Path path, BlobItem blob) { + /* + If the blob is not a prefix and the blob does not contain the directory metadata marker, it is a normal blob + and therefore will not be duplicated. + */ + if (!(blob.isPrefix() != null && blob.isPrefix()) + && !(blob.getMetadata() != null && blob.getMetadata().containsKey(AzureResource.DIR_METADATA_MARKER))) { + return true; + } + + // If the set contains this path, it means we've seen it before and we shouldn't return it again. + if (this.directoryPaths.contains(path.toString())) { + return false; + } + + // We haven't seen this before. Track it and indicate it should be returned. + this.directoryPaths.add(path.toString()); + return true; + } + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java new file mode 100644 index 00000000000..bbe361864bc --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java @@ -0,0 +1,194 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobContainerClient; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.attribute.FileStoreAttributeView; +import java.util.Objects; + +/** + * An {@code AzureFileStore} is a {@link FileStore} backed by an Azure Blob Storage container. + */ +public final class AzureFileStore extends FileStore { + private static final ClientLogger LOGGER = new ClientLogger(AzureFileStore.class); + + private static final String AZURE_FILE_STORE_TYPE = "AzureBlobContainer"; + + private final AzureFileSystem parentFileSystem; + private final BlobContainerClient containerClient; + + + AzureFileStore(AzureFileSystem parentFileSystem, String containerName, Boolean skipConnectionCheck) + throws IOException { + // A FileStore should only ever be created by a FileSystem. + if (Objects.isNull(parentFileSystem)) { + throw LoggingUtility.logError(LOGGER, new IllegalStateException("AzureFileStore cannot be instantiated " + + "without a parent FileSystem")); + } + this.parentFileSystem = parentFileSystem; + this.containerClient = this.parentFileSystem.getBlobServiceClient().getBlobContainerClient(containerName); + + if (skipConnectionCheck == null || !skipConnectionCheck) { + try { + // This also serves as our connection check. + if (!this.containerClient.exists()) { + this.containerClient.create(); + } + } catch (Exception e) { + throw LoggingUtility.logError(LOGGER, new IOException("There was an error in establishing the existence of " + + "container: " + containerName, e)); + } + } + } + + /** + * Returns the name of the container that underlies this file store. + * + * @return the name of the container that underlies this file store. + */ + @Override + public String name() { + return this.containerClient.getBlobContainerName(); + } + + /** + * Returns the {@code String "AzureBlobContainer"} to indicate that the file store is backed by a remote blob + * container in Azure Storage. + * + * @return {@code "AzureBlobContainer"} + */ + @Override + public String type() { + return AZURE_FILE_STORE_TYPE; + } + + /** + * Always returns false. + *

+ * It may be the case that the authentication method provided to this file system only + * supports read operations and hence the file store is implicitly read only in this view, but that does not + * imply the underlying container/file store is inherently read only. Creating/specifying read only file stores + * is not currently supported. + * + * @return false. + */ + @Override + public boolean isReadOnly() { + return false; + } + + /** + * Returns the size, in bytes, of the file store. + *

+ * Containers do not limit the amount of data stored. This method will always return max long. + * + * @return the size of the file store. + * @throws IOException If an I/O error occurs. + */ + @Override + public long getTotalSpace() throws IOException { + return Long.MAX_VALUE; + } + + /** + * Returns the number of bytes available to this Java virtual machine on the file store. + *

+ * Containers do not limit the amount of data stored. This method will always return max long. + * + * @return the number of bytes available on the file store. + * @throws IOException If an I/O error occurs. + */ + @Override + public long getUsableSpace() throws IOException { + return Long.MAX_VALUE; + } + + /** + * Returns the number of unallocated bytes in the file store. + *

+ * Containers do not limit the amount of data stored. This method will always return max long. + * + * @return the number of unallocated bytes in the file store. + * @throws IOException If an I/O error occurs. + */ + @Override + public long getUnallocatedSpace() throws IOException { + return Long.MAX_VALUE; + } + + /** + * Tells whether this file store supports the file attributes identified by the given file attribute view. + *

+ * All file stores in this file system support the following views: + *

    + *
  • {@link java.nio.file.attribute.BasicFileAttributeView}
  • + *
  • {@link AzureBasicFileAttributeView}
  • + *
  • {@link AzureBlobFileAttributeView}
  • + *
+ * + * @param type the file attribute view type + * @return Whether the file attribute view is supported. + */ + @Override + public boolean supportsFileAttributeView(Class type) { + return AzureFileSystem.SUPPORTED_ATTRIBUTE_VIEWS.containsKey(type); + } + + /** + * Tells whether this file store supports the file attributes identified by the given file attribute view. + *

+ * All file stores in this file system support the following views: + *

    + *
  • {@link java.nio.file.attribute.BasicFileAttributeView}
  • + *
  • {@link AzureBasicFileAttributeView}
  • + *
  • {@link AzureBlobFileAttributeView}
  • + *
+ * + * @param name the name of the file attribute view + * @return whether the file attribute view is supported. + */ + @Override + public boolean supportsFileAttributeView(String name) { + return AzureFileSystem.SUPPORTED_ATTRIBUTE_VIEWS.containsValue(name); + } + + /** + * Returns a FileStoreAttributeView of the given type. + *

+ * This method always returns null as no {@link FileStoreAttributeView} is currently supported. + * + * @param aClass a class + * @return null + */ + @Override + public V getFileStoreAttributeView(Class aClass) { + return null; + } + + /** + * Unsupported. + *

+ * This method always throws an {@code UnsupportedOperationException} as no {@link FileStoreAttributeView} is + * currently supported. + * + * @param s a string + * @return The attribute value. + * @throws UnsupportedOperationException unsupported + * @throws IOException never + */ + @Override + public Object getAttribute(String s) throws IOException { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException("FileStoreAttributeViews aren't" + + " supported.")); + } + + BlobContainerClient getContainerClient() { + return this.containerClient; + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java new file mode 100644 index 00000000000..6f981b1b45e --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java @@ -0,0 +1,492 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.credential.AzureSasCredential; +import com.azure.core.http.HttpClient; +import com.azure.core.http.policy.HttpLogDetailLevel; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.util.CoreUtils; +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.implementation.util.BlobUserAgentModificationPolicy; +import com.azure.storage.common.StorageSharedKeyCredential; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.FileSystem; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; +import java.nio.file.PathMatcher; +import java.nio.file.WatchService; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.attribute.UserPrincipalLookupService; +import java.nio.file.spi.FileSystemProvider; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.regex.PatternSyntaxException; +import java.util.stream.Collectors; + +/** + * Implement's Java's {@link FileSystem} interface for Azure Blob Storage. + *

+ * The following behavior is specific to this FileSystem: + *

+ * In the hierarchy of this file system, an {@code AzureFileSystem} corresponds to an Azure Blob Storage account. A + * file store is represented by a container in the storage account. Each container has one root directory. + *

+ * Closing the file system will not block on outstanding operations. Any operations in progress will be allowed to + * terminate naturally after the file system is closed, though no further operations may be started after the parent + * file system is closed. + *

+ * All instance of {@code AzureFileSystem} are opened for read-write access. + *

+ * For a more complete description of the uses for the constants described here, please see the instructions for opening + * and configuring a FileSystem in the docs of {@link FileSystemProvider}. + */ +public final class AzureFileSystem extends FileSystem { + private static final ClientLogger LOGGER = new ClientLogger(AzureFileSystem.class); + + // Configuration constants for blob clients. + /** + * Expected type: String + */ + public static final String AZURE_STORAGE_SHARED_KEY_CREDENTIAL = "AzureStorageSharedKeyCredential"; + + /** + * Expected type: String + */ + public static final String AZURE_STORAGE_SAS_TOKEN_CREDENTIAL = "AzureStorageSasTokenCredential"; + + /** + * Expected type: com.azure.core.http.policy.HttpLogLevelDetail + */ + public static final String AZURE_STORAGE_HTTP_LOG_DETAIL_LEVEL = "AzureStorageHttpLogDetailLevel"; + + /** + * Expected type: Integer + */ + public static final String AZURE_STORAGE_MAX_TRIES = "AzureStorageMaxTries"; + + /** + * Expected type: Integer + */ + public static final String AZURE_STORAGE_TRY_TIMEOUT = "AzureStorageTryTimeout"; + + /** + * Expected type: Long + */ + public static final String AZURE_STORAGE_RETRY_DELAY_IN_MS = "AzureStorageRetryDelayInMs"; + + /** + * Expected type: Long + */ + public static final String AZURE_STORAGE_MAX_RETRY_DELAY_IN_MS = "AzureStorageMaxRetryDelayInMs"; + + /** + * Expected type: com.azure.storage.common.policy.RetryPolicyType + */ + public static final String AZURE_STORAGE_RETRY_POLICY_TYPE = "AzureStorageRetryPolicyType"; + + /** + * Expected type: String + */ + public static final String AZURE_STORAGE_SECONDARY_HOST = "AzureStorageSecondaryHost"; + + /** + * Expected type: Long + */ + public static final String AZURE_STORAGE_UPLOAD_BLOCK_SIZE = "AzureStorageUploadBlockSize"; + + /** + * Expected type: Integer + */ + public static final String AZURE_STORAGE_MAX_CONCURRENCY_PER_REQUEST = "AzureStorageMaxConcurrencyPerRequest"; + + /** + * Expected type: Long + */ + public static final String AZURE_STORAGE_PUT_BLOB_THRESHOLD = "AzureStoragePutBlobThreshold"; + + /** + * Expected type: Integer + */ + public static final String AZURE_STORAGE_DOWNLOAD_RESUME_RETRIES = "AzureStorageDownloadResumeRetries"; + + static final String AZURE_STORAGE_HTTP_CLIENT = "AzureStorageHttpClient"; // undocumented; for test. + static final String AZURE_STORAGE_HTTP_POLICIES = "AzureStorageHttpPolicies"; // undocumented; for test. + + /** + * Expected type: String + */ + public static final String AZURE_STORAGE_FILE_STORES = "AzureStorageFileStores"; + + /** + * Expected type: Boolean + */ + public static final String AZURE_STORAGE_SKIP_INITIAL_CONTAINER_CHECK = "AzureStorageSkipInitialContainerCheck"; + + static final String PATH_SEPARATOR = "/"; + + private static final Map PROPERTIES = + CoreUtils.getProperties("azure-storage-blob-nio.properties"); + private static final String SDK_NAME = "name"; + private static final String SDK_VERSION = "version"; + private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); + private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); + + static final Map, String> SUPPORTED_ATTRIBUTE_VIEWS; + static { + Map, String> map = new HashMap<>(); + map.put(BasicFileAttributeView.class, "basic"); + map.put(AzureBasicFileAttributeView.class, "azureBasic"); + map.put(AzureBlobFileAttributeView.class, "azureBlob"); + SUPPORTED_ATTRIBUTE_VIEWS = Collections.unmodifiableMap(map); + } + + private final AzureFileSystemProvider parentFileSystemProvider; + private final BlobServiceClient blobServiceClient; + private final Long blockSize; + private final Long putBlobThreshold; + private final Integer maxConcurrencyPerRequest; + private final Integer downloadResumeRetries; + private final Map fileStores; + private FileStore defaultFileStore; + private boolean closed; + + AzureFileSystem(AzureFileSystemProvider parentFileSystemProvider, String endpoint, Map config) + throws IOException { + // A FileSystem should only ever be instantiated by a provider. + if (Objects.isNull(parentFileSystemProvider)) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException("AzureFileSystem cannot be instantiated" + + " without a parent FileSystemProvider")); + } + this.parentFileSystemProvider = parentFileSystemProvider; + + // Read configurations and build client. + try { + this.blobServiceClient = this.buildBlobServiceClient(endpoint, config); + this.blockSize = (Long) config.get(AZURE_STORAGE_UPLOAD_BLOCK_SIZE); + this.putBlobThreshold = (Long) config.get(AZURE_STORAGE_PUT_BLOB_THRESHOLD); + this.maxConcurrencyPerRequest = (Integer) config.get(AZURE_STORAGE_MAX_CONCURRENCY_PER_REQUEST); + this.downloadResumeRetries = (Integer) config.get(AZURE_STORAGE_DOWNLOAD_RESUME_RETRIES); + + // Initialize and ensure access to FileStores. + this.fileStores = this.initializeFileStores(config); + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException("There was an error parsing the " + + "configurations map. Please ensure all fields are set to a legal value of the correct type.", e)); + } catch (IOException e) { + throw LoggingUtility.logError(LOGGER, + new IOException("Initializing FileStores failed. FileSystem could not be opened.", e)); + } + + this.closed = false; + } + + /** + * Returns the provider that created this file system. + * + * @return the provider that created this file system. + */ + @Override + public FileSystemProvider provider() { + return this.parentFileSystemProvider; + } + + /** + * Closes this file system. + *

+ * After a file system is closed then all subsequent access to the file system, either by methods defined by this + * class or on objects associated with this file system, throw ClosedFileSystemException. If the file system is + * already closed then invoking this method has no effect. + *

+ * Closing the file system will not block on outstanding operations. Any operations in progress will be allowed to + * terminate naturally after the file system is closed, though no further operations may be started after the + * parent file system is closed. + *

+ * Once closed, a file system with the same identifier as the one closed may be re-opened. + * + * @throws IOException If an I/O error occurs. + */ + @Override + public void close() throws IOException { + this.closed = true; + this.parentFileSystemProvider.closeFileSystem(this.getFileSystemUrl()); + } + + /** + * Tells whether this file system is open. + * + * @return whether this file system is open. + */ + @Override + public boolean isOpen() { + return !this.closed; + } + + /** + * Tells whether this file system allows only read-only access to its file stores. + *

+ * Always returns false. It may be the case that the authentication method provided to this file system only + * supports read operations and hence the file system is implicitly read only in this view, but that does not + * imply the underlying account/file system is inherently read only. Creating/specifying read only file + * systems is not supported. + * + * @return false + */ + @Override + public boolean isReadOnly() { + return false; + } + + /** + * Returns the name separator, represented as a string. + *

+ * The separator used in this file system is {@code "/"}. + * + * @return "/" + */ + @Override + public String getSeparator() { + return AzureFileSystem.PATH_SEPARATOR; + } + + /** + * Returns an object to iterate over the paths of the root directories. + *

+ * The list of root directories corresponds to the list of available file stores and therefore containers specified + * upon initialization. A root directory always takes the form {@code ":"}. This list will + * respect the parameters provided during initialization. + *

+ * If a finite list of containers was provided on start up, this list will not change during the lifetime of this + * object. If containers are added to the account after initialization, they will be ignored. If a container is + * deleted or otherwise becomes unavailable, its root directory will still be returned but operations to it will + * fail. + * + * @return an object to iterate over the paths of the root directories + */ + @Override + public Iterable getRootDirectories() { + /* + Should we add different initialization options later: + If the file system was set to use all containers in the account, the account will be re-queried and the + list may grow or shrink if containers were added or deleted. + */ + return fileStores.keySet().stream() + .map(name -> this.getPath(name + AzurePath.ROOT_DIR_SUFFIX)) + .collect(Collectors.toList()); + } + + /** + * Returns an object to iterate over the underlying file stores + *

+ * This list will respect the parameters provided during initialization. + *

+ * If a finite list of containers was provided on start up, this list will not change during the lifetime of this + * object. If containers are added to the account after initialization, they will be ignored. If a container is + * deleted or otherwise becomes unavailable, its root directory will still be returned but operations to it will + * fail. + */ + @Override + public Iterable getFileStores() { + /* + Should we add different initialization options later: + If the file system was set to use all containers in the account, the account will be re-queried and the + list may grow or shrink if containers were added or deleted. + */ + return this.fileStores.values(); + } + + /** + * Returns the set of the names of the file attribute views supported by this FileSystem. + *

+ * This file system supports the following views: + *

    + *
  • {@link java.nio.file.attribute.BasicFileAttributeView}
  • + *
  • {@link AzureBasicFileAttributeView}
  • + *
  • {@link AzureBlobFileAttributeView}
  • + *
+ */ + @Override + public Set supportedFileAttributeViews() { + return new HashSet<>(SUPPORTED_ATTRIBUTE_VIEWS.values()); + } + + /** + * Converts a path string, or a sequence of more that when joined form a path string, to a Path. + *

+ * If more does not specify any elements then the value of the first parameter is the path string to convert. If + * more specifies one or more elements than each non-empty string, including first, is considered to be a sequence + * of name elements (see Path) and is joined to form a path string. The more will be joined using the name + * separator. + *

+ * Each name element will be {@code String}-joined to the other elements by this file system's first path separator. + * Naming conventions and allowed characters are as + * defined + * by the Azure Blob Storage service. The root component is interpreted as the container name and all name elements + * are interpreted as a part of the blob name. The character {@code ':'} is only allowed in the root component and + * must be the last character of the root component. + * + * @param first the path string or initial part of the path string + * @param more additional strings to be joined to form the path string + * @throws InvalidPathException if the path string cannot be converted. + */ + @Override + public Path getPath(String first, String... more) { + return new AzurePath(this, first, more); + } + + /** + * Unsupported. + * + * @param s the matcher + * @throws UnsupportedOperationException unsupported. + * @throws IllegalArgumentException never + * @throws PatternSyntaxException never + */ + @Override + public PathMatcher getPathMatcher(String s) throws IllegalArgumentException, PatternSyntaxException { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } + + /** + * Unsupported. + * + * @throws UnsupportedOperationException unsupported. + */ + @Override + public UserPrincipalLookupService getUserPrincipalLookupService() { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } + + /** + * Unsupported. + * + * @throws UnsupportedOperationException unsupported. + * @throws IOException Never thrown. + */ + @Override + public WatchService newWatchService() throws IOException { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } + + String getFileSystemUrl() { + return this.blobServiceClient.getAccountUrl(); + } + + BlobServiceClient getBlobServiceClient() { + return this.blobServiceClient; + } + + private BlobServiceClient buildBlobServiceClient(String endpoint, Map config) { + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(endpoint); + + // Set the credentials. + if (config.containsKey(AZURE_STORAGE_SHARED_KEY_CREDENTIAL)) { + builder.credential((StorageSharedKeyCredential) config.get(AZURE_STORAGE_SHARED_KEY_CREDENTIAL)); + } else if (config.containsKey(AZURE_STORAGE_SAS_TOKEN_CREDENTIAL)) { + builder.credential((AzureSasCredential) config.get(AZURE_STORAGE_SAS_TOKEN_CREDENTIAL)); + } else { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException(String.format("No credentials were " + + "provided. Please specify one of the following when constructing an AzureFileSystem: %s, %s.", + AZURE_STORAGE_SHARED_KEY_CREDENTIAL, AZURE_STORAGE_SAS_TOKEN_CREDENTIAL))); + } + + // Configure options and client. + builder.httpLogOptions(BlobServiceClientBuilder.getDefaultHttpLogOptions() + .setLogLevel((HttpLogDetailLevel) config.get(AZURE_STORAGE_HTTP_LOG_DETAIL_LEVEL))); + + RequestRetryOptions retryOptions = new RequestRetryOptions( + (RetryPolicyType) config.get(AZURE_STORAGE_RETRY_POLICY_TYPE), + (Integer) config.get(AZURE_STORAGE_MAX_TRIES), + (Integer) config.get(AZURE_STORAGE_TRY_TIMEOUT), + (Long) config.get(AZURE_STORAGE_RETRY_DELAY_IN_MS), + (Long) config.get(AZURE_STORAGE_MAX_RETRY_DELAY_IN_MS), + (String) config.get(AZURE_STORAGE_SECONDARY_HOST)); + builder.retryOptions(retryOptions); + + builder.httpClient((HttpClient) config.get(AZURE_STORAGE_HTTP_CLIENT)); + + // Add BlobUserAgentModificationPolicy + builder.addPolicy(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); + + if (config.containsKey(AZURE_STORAGE_HTTP_POLICIES)) { + for (HttpPipelinePolicy policy : (HttpPipelinePolicy[]) config.get(AZURE_STORAGE_HTTP_POLICIES)) { + builder.addPolicy(policy); + } + } + + return builder.buildClient(); + } + + private Map initializeFileStores(Map config) throws IOException { + String fileStoreNames = (String) config.get(AZURE_STORAGE_FILE_STORES); + if (CoreUtils.isNullOrEmpty(fileStoreNames)) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException("The list of FileStores cannot be " + + "null.")); + } + + Boolean skipConnectionCheck = (Boolean) config.get(AZURE_STORAGE_SKIP_INITIAL_CONTAINER_CHECK); + Map fileStores = new HashMap<>(); + for (String fileStoreName : fileStoreNames.split(",")) { + FileStore fs = new AzureFileStore(this, fileStoreName, skipConnectionCheck); + if (this.defaultFileStore == null) { + this.defaultFileStore = fs; + } + fileStores.put(fileStoreName, fs); + } + return fileStores; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AzureFileSystem that = (AzureFileSystem) o; + return Objects.equals(this.getFileSystemUrl(), that.getFileSystemUrl()); + } + + @Override + public int hashCode() { + return Objects.hash(this.getFileSystemUrl()); + } + + Path getDefaultDirectory() { + return this.getPath(this.defaultFileStore.name() + AzurePath.ROOT_DIR_SUFFIX); + } + + FileStore getFileStore(String name) throws IOException { + FileStore store = this.fileStores.get(name); + if (store == null) { + throw LoggingUtility.logError(LOGGER, new IOException("Invalid file store: " + name)); + } + return store; + } + + Long getBlockSize() { + return this.blockSize; + } + + Long getPutBlobThreshold() { + return this.putBlobThreshold; + } + + Integer getMaxConcurrencyPerRequest() { + return this.maxConcurrencyPerRequest; + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java new file mode 100644 index 00000000000..6881341d218 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java @@ -0,0 +1,1182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.HttpURLConnection; +import java.net.URI; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.AccessDeniedException; +import java.nio.file.AccessMode; +import java.nio.file.CopyOption; +import java.nio.file.DirectoryNotEmptyException; +import java.nio.file.DirectoryStream; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileStore; +import java.nio.file.FileSystem; +import java.nio.file.FileSystemAlreadyExistsException; +import java.nio.file.FileSystemNotFoundException; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.NoSuchFileException; +import java.nio.file.NotDirectoryException; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.spi.FileSystemProvider; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import com.azure.core.util.CoreUtils; +import com.azure.core.util.logging.ClientLogger; +import com.azure.core.util.polling.SyncPoller; +import com.azure.storage.blob.models.BlobCopyInfo; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ParallelTransferOptions; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * The {@code AzureFileSystemProvider} is Azure Storage's implementation of the nio interface on top of Azure Blob + * Storage. + *

+ * Particular care should be taken when working with a remote storage service. This implementation makes no guarantees + * on behavior or state should other processes operate on the same data concurrently; file systems from this provider + * will assume they have exclusive access to their data and will behave without regard for potential of interfering + * applications. Moreover, remote file stores introduce higher latencies. Therefore, additional consideration should be + * given to managing concurrency: race conditions are more likely to manifest and network failures occur more frequently + * than disk failures. These and other such distributed application scenarios must be considered when working with this + * file system. While the {@code AzureFileSystem} will ensure it takes appropriate steps towards robustness and + * reliability, the application developer must design around these failure scenarios and have fallback and retry options + * available. + *

+ * The Azure Blob Storage service backing these APIs is not a true FileSystem, nor is it the goal of this implementation + * to force Azure Blob Storage to act like a full-fledged file system. Some APIs and scenarios will remain unsupported + * indefinitely until they may be sensibly implemented. Other APIs may experience lower performance than is expected + * because of the number of network requests needed to ensure correctness. The javadocs for each type and method should + * also be read carefully to understand what guarantees are made and how they may differ from the contract defined by + * {@link FileSystemProvider}. + *

+ * The scheme for this provider is {@code "azb"}, and the format of the URI to identify an {@code AzureFileSystem} is + * {@code "azb://?endpoint="}. The endpoint of the Storage account is used to uniquely identify the + * filesystem. + *

+ * An {@link AzureFileSystem} is backed by an account. An {@link AzureFileStore} is backed by a container. Any number of + * containers may be specified as file stores upon creation of the file system. When a file system is created, + * it will try to retrieve the properties of each container to ensure connection to the account. If any of the + * containers does not exist, it will be created. Failure to access or create containers as necessary will result in + * an exception and failure to create the file system. Any data existing in the containers will be preserved and + * accessible via the file system, though customers should be aware that it must be in a format understandable by + * the types in this package or behavior will be undefined. + *

+ * {@link #newFileSystem(URI, Map)} will check for the following keys in the configuration map and expect the named + * types. Any entries not listed here will be ignored. Note that {@link AzureFileSystem} has public constants defined + * for each of the keys for convenience. Most values are documented in the blob package. Any values which are unique to + * nio will be documented here. + *

    + *
  • {@code AzureStorageSharedKeyCredential:}{@link com.azure.storage.common.StorageSharedKeyCredential}
  • + *
  • {@code AzureStorageSasTokenCredential:}{@link com.azure.core.credential.AzureSasCredential}
  • + *
  • {@code AzureStorageHttpLogDetailLevel:}{@link com.azure.core.http.policy.HttpLogDetailLevel}
  • + *
  • {@code AzureStorageMaxTries:}{@link Integer}
  • + *
  • {@code AzureStorageTryTimeout:}{@link Integer}
  • + *
  • {@code AzureStorageRetryDelayInMs:}{@link Long}
  • + *
  • {@code AzureStorageMaxRetryDelayInMs:}{@link Long}
  • + *
  • {@code AzureStorageRetryPolicyType:}{@link com.azure.storage.common.policy.RetryPolicyType}
  • + *
  • {@code AzureStorageSecondaryHost:}{@link String}
  • + *
  • {@code AzureStorageSecondaryHost:}{@link Integer}
  • + *
  • {@code AzureStorageBlockSize:}{@link Long}
  • + *
  • {@code AzureStoragePutBlobThreshold:}{@link Long}
  • + *
  • {@code AzureStorageMaxConcurrencyPerRequest:}{@link Integer}
  • + *
  • {@code AzureStorageDownloadResumeRetries:}{@link Integer}
  • + *
  • {@code AzureStorageFileStores:}{@link String}
  • + *
  • {@code AzureStorageSkipInitialContainerCheck:}{@link Boolean}. Indicates that the initial check which + * confirms the existence of the containers meant to act as file stores should be skipped. This can be useful in + * cases where a sas token that is scoped to only one file is used to authenticate.
  • + *
+ *

+ * Either an account key or a sas token must be specified. If both are provided, the account key will be preferred. If + * a sas token is specified, the customer must take care that it has appropriate permissions to perform the actions + * demanded of the file system in a given workflow, including the initial connection check specified above. The same + * token will be applied to all operations. + *

+ * An iterable of file stores must also be provided; each entry should simply be the name of a container. The first + * container listed will be considered the default file store and the root directory of which will be the file system's + * default directory. All other values listed are used to configure the underlying + * {@link com.azure.storage.blob.BlobServiceClient}. Please refer to that type for more information on these values. + * + * @see FileSystemProvider + */ +public final class AzureFileSystemProvider extends FileSystemProvider { + /* + * A static inner class is used to hold the ClientLogger for AzureFileSystemProvider to defer creating the + * ClientLogger until logging is needed. Some implementations of SLF4J may make calls to load FileSystemProviders + * which results in a load FileSystemProviders to occur during a call to load FileSystemProviders. This results in + * the JVM to throw an exception that a circular call to load FileSystemProviders has occurred. + */ + private static final class ClientLoggerHolder { + private static final ClientLogger LOGGER = new ClientLogger(AzureFileSystemProvider.class); + } + + /** + * A helper for setting the HTTP properties when creating a directory. + */ + public static final String CONTENT_TYPE = "Content-Type"; + + /** + * A helper for setting the HTTP properties when creating a directory. + */ + public static final String CONTENT_DISPOSITION = "Content-Disposition"; + + /** + * A helper for setting the HTTP properties when creating a directory. + */ + public static final String CONTENT_LANGUAGE = "Content-Language"; + + /** + * A helper for setting the HTTP properties when creating a directory. + */ + public static final String CONTENT_ENCODING = "Content-Encoding"; + + /** + * A helper for setting the HTTP properties when creating a directory. + */ + public static final String CONTENT_MD5 = "Content-MD5"; + + /** + * A helper for setting the HTTP properties when creating a directory. + */ + public static final String CACHE_CONTROL = "Cache-Control"; + + private static final String ENDPOINT_QUERY_KEY = "endpoint"; + private static final int COPY_TIMEOUT_SECONDS = 30; + private static final Set OUTPUT_STREAM_DEFAULT_OPTIONS = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING))); + private static final Set OUTPUT_STREAM_SUPPORTED_OPTIONS = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList( + StandardOpenOption.CREATE_NEW, + StandardOpenOption.CREATE, + StandardOpenOption.WRITE, + // Though we don't actually truncate, the same result is achieved by overwriting the destination. + StandardOpenOption.TRUNCATE_EXISTING))); + + private final ConcurrentMap openFileSystems; + + + // Specs require a public zero argument constructor. + /** + * Creates an AzureFileSystemProvider. + */ + public AzureFileSystemProvider() { + this.openFileSystems = new ConcurrentHashMap<>(); + } + + /** + * Returns the URI scheme that identifies this provider: {@code "azb".} + * + * @return {@code "azb"} + */ + @Override + public String getScheme() { + return "azb"; + } + + /** + * Constructs a new FileSystem object identified by a URI. + *

+ * The format of a {@code URI} identifying a file system is {@code "azb://?endpoint="}. + *

+ * Once closed, a file system with the same identifier may be reopened. + * + * @param uri URI reference + * @param config A map of provider specific properties to configure the file system + * @return a new file system. + * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter + * does not contain properties required by the provider, or a property value is invalid. + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + * @throws FileSystemAlreadyExistsException If the file system has already been created. + */ + @Override + public FileSystem newFileSystem(URI uri, Map config) throws IOException { + String endpoint = extractAccountEndpoint(uri); + + if (this.openFileSystems.containsKey(endpoint)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new FileSystemAlreadyExistsException("Name: " + endpoint)); + } + + AzureFileSystem afs = new AzureFileSystem(this, endpoint, config); + this.openFileSystems.put(endpoint, afs); + + return afs; + } + + /** + * Returns an existing FileSystem created by this provider. + *

+ * The format of a {@code URI} identifying a file system is {@code "azb://?endpoint=<endpoint>"}. + *

+ * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a + * file system with the same identifier may be reopened. + * + * @param uri URI reference + * @return the file system + * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met + * @throws FileSystemNotFoundException If the file system already exists + * @throws SecurityException never + */ + @Override + public FileSystem getFileSystem(URI uri) { + String endpoint = extractAccountEndpoint(uri); + if (!this.openFileSystems.containsKey(endpoint)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new FileSystemNotFoundException("Name: " + endpoint)); + } + return this.openFileSystems.get(endpoint); + } + + /** + * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already + * exists. + * + * @param uri The URI to convert + * @return The path identified by the URI. + * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the + * uri parameter do not hold + * @throws FileSystemNotFoundException if the file system identified by the query does not exist + * @throws SecurityException never + * + * @see #getFileSystem(URI) for information on the URI format + */ + @Override + public Path getPath(URI uri) { + return getFileSystem(uri).getPath(uri.getPath()); + } + + /** + * Opens or creates a file, returning a seekable byte channel to access the file. + *

+ * This method is primarily offered to support some jdk convenience methods such as + * {@link Files#createFile(Path, FileAttribute[])} which requires opening a channel and closing it. A channel may + * only be opened in read mode OR write mode. It may not be opened in read/write mode. Seeking is supported for + * reads, but not for writes. Modifications to existing files is not permitted--only creating new files or + * overwriting existing files. + *

+ * This type is not threadsafe to prevent having to hold locks across network calls. + * + * @param path the path of the file to open + * @param set options specifying how the file should be opened + * @param fileAttributes an optional list of file attributes to set atomically when creating the directory + * @return a new seekable byte channel + * @throws UnsupportedOperationException Operation is not supported. + * @throws IllegalArgumentException if the set contains an invalid combination of options + * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified + * (optional specific exception) + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public SeekableByteChannel newByteChannel(Path path, Set set, + FileAttribute... fileAttributes) throws IOException { + if (Objects.isNull(set)) { + set = Collections.emptySet(); + } + + if (set.contains(StandardOpenOption.WRITE)) { + return new AzureSeekableByteChannel( + (NioBlobOutputStream) this.newOutputStreamInternal(path, set, fileAttributes), path); + } else { + return new AzureSeekableByteChannel( + (NioBlobInputStream) this.newInputStream(path, set.toArray(new OpenOption[0])), path); + } + } + + /** + * Opens an {@link InputStream} to the given path. + *

+ * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always + * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are + * supported. + *

+ * Only {@link StandardOpenOption#READ} is supported. Any other option will throw. + * + * @param path the path to the file to open + * @param options options specifying how the file is opened + * @return a new input stream + * @throws IllegalArgumentException if an invalid combination of options is specified + * @throws UnsupportedOperationException if an unsupported option is specified + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public InputStream newInputStream(Path path, OpenOption... options) throws IOException { + // Validate options. Only read is supported. + if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new UnsupportedOperationException("Only the read option is supported.")); + } + + AzureResource resource = new AzureResource(path); + AzurePath.ensureFileSystemOpen(resource.getPath()); + + // Ensure the path points to a file. + if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IOException("Path either does not exist or points to a directory." + + "Path must point to a file. Path: " + path.toString())); + } + + // Note that methods on BlobInputStream are already synchronized. + return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); + } + + /** + * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. + *

+ * The only supported options are {@link StandardOpenOption#CREATE}, {@link StandardOpenOption#CREATE_NEW}, + * {@link StandardOpenOption#WRITE}, {@link StandardOpenOption#TRUNCATE_EXISTING}. Any other options will throw an + * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an + * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. + *

+ * This stream will not attempt to buffer the entire file, however some buffering will be done for potential + * optimizations and to avoid network thrashing. Specifically, up to + * {@link AzureFileSystem#AZURE_STORAGE_PUT_BLOB_THRESHOLD} bytes will be buffered initially. If that threshold is + * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of + * {@link AzureFileSystem#AZURE_STORAGE_UPLOAD_BLOCK_SIZE}. The maximum number of buffers of this size to be + * allocated is defined by {@link AzureFileSystem#AZURE_STORAGE_MAX_CONCURRENCY_PER_REQUEST}, which also configures + * the level of parallelism with which we may write and thus may affect write speeds as well. + *

+ * The data is only committed when the steam is closed. Hence, data cannot be read from the destination until the + * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized + * and available for reading. + *

+ * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are + * met at which time they are sent to the service. When the write method returns, there is no guarantee about which + * phase of this process the data is in other than it has been accepted and will be written. Again, closing will + * guarantee that the data is written and available. + *

+ * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. + * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write + * may not otherwise be thrown unless the stream is flushed, closed, or written to again. + * + * @param path the path to the file to open or create + * @param options options specifying how the file is opened + * @return a new output stream + * @throws IllegalArgumentException if an invalid combination of options is specified + * @throws UnsupportedOperationException if an unsupported option is specified + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { + return newOutputStreamInternal(path, new HashSet<>(Arrays.asList(options))); + } + + OutputStream newOutputStreamInternal(Path path, Set optionsSet, + FileAttribute... fileAttributes) throws IOException { + // If options are empty, add Create, Write, TruncateExisting as defaults per nio docs. + if (optionsSet == null || optionsSet.size() == 0) { + optionsSet = OUTPUT_STREAM_DEFAULT_OPTIONS; + } + + // Check for unsupported options. + for (OpenOption option : optionsSet) { + if (!OUTPUT_STREAM_SUPPORTED_OPTIONS.contains(option)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new UnsupportedOperationException("Unsupported option: " + option.toString())); + } + } + + /* + Write must be specified. Either create_new or truncate must be specified. This is to ensure that no edits or + appends are allowed. + */ + if (!optionsSet.contains(StandardOpenOption.WRITE) + || !(optionsSet.contains(StandardOpenOption.TRUNCATE_EXISTING) + || optionsSet.contains(StandardOpenOption.CREATE_NEW))) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Write and either CreateNew or TruncateExisting must be specified to open " + + "an OutputStream")); + } + + AzureResource resource = new AzureResource(path); + AzurePath.ensureFileSystemOpen(resource.getPath()); + DirectoryStatus status = resource.checkDirStatus(); + + // Cannot write to a directory. + if (DirectoryStatus.isDirectory(status)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); + } + + // Writing to an empty location requires a create option. + if (status.equals(DirectoryStatus.DOES_NOT_EXIST) + && !(optionsSet.contains(StandardOpenOption.CREATE) + || optionsSet.contains(StandardOpenOption.CREATE_NEW))) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IOException("Writing to an empty location requires a create option. Path: " + path.toString())); + } + + // Cannot write to an existing file if create new was specified. + if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsSet.contains(StandardOpenOption.CREATE_NEW)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IOException("A file already exists at this location and " + + "CREATE_NEW was specified. Path: " + path.toString())); + } + + // Create options based on file system config + AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); + Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); + Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); + ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, + putBlobThreshold); + + // Add an extra etag check for create new + BlobRequestConditions rq = null; + if (optionsSet.contains(StandardOpenOption.CREATE_NEW)) { + rq = new BlobRequestConditions().setIfNoneMatch("*"); + } + + // For parsing properties and metadata + if (fileAttributes == null) { + fileAttributes = new FileAttribute[0]; + } + resource.setFileAttributes(Arrays.asList(fileAttributes)); + + return new NioBlobOutputStream(resource.getBlobOutputStream(pto, rq), resource.getPath()); + } + + /** + * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by + * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path + * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the + * iterator are filtered by the given filter. + *

+ * When not using the try-with-resources construct, then directory stream's close method should be invoked after + * iteration is completed to free any resources held for the open directory. + *

+ * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or + * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a + * DirectoryIteratorException with the IOException as the cause. + * + * @param path the path to the directory + * @param filter the directory stream filter + * @return a new and open {@code DirectoryStream} object + * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. + * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public DirectoryStream newDirectoryStream(Path path, DirectoryStream.Filter filter) + throws IOException { + if (!(path instanceof AzurePath)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("This provider cannot operate on subtypes of Path other than AzurePath")); + } + AzurePath.ensureFileSystemOpen(path); + + /* + Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be + caught in instantiating the stream below. + + Possible optimization later is to save the result of the list call to use as the first list call inside the + stream rather than a list call for checking the status and a list call for listing. + */ + if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new NotDirectoryException(path.toString())); + } + + return new AzureDirectoryStream((AzurePath) path, filter); + } + + /** + * Creates a new directory at the specified path. + *

+ * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. Weak existence is + * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also + * known as a virtual directory and enables the file system to work with containers that were pre-loaded + * with data by another source but need to be accessed by this file system. Strong existence is defined as + * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length + * blob whose name is the directory path with a particular metadata field indicating the blob's status as a + * directory. This is also known as a concrete directory. Directories created by this file system will + * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target + * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name + * prefix. + *

+ * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the + * directory if it does not exist are a single operation that is atomic with respect to all other filesystem + * activities that might affect the directory." More specifically, this method will atomically check for strong + * existence of another file or directory at the given path and fail if one is present. On the other hand, we + * only check for weak existence of the parent to determine if the given path is valid. Additionally, the + * action of checking whether the parent exists, is not atomic with the creation of the directory. Note that + * while it is possible that the parent may be deleted between when the parent is determined to exist and the + * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the + * child will never be left floating and unreachable. The different checks on parent and child is due to limitations + * in the Storage service API. + *

+ * There may be some unintuitive behavior when working with directories in this file system, particularly virtual + * directories (usually those not created by this file system). A virtual directory will disappear as soon as all + * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of + * calling this method, this method will still return success and create a concrete directory at the target + * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is + * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while + * creating a concrete directory and because such behavior will have minimal side effects--no files will be + * overwritten and the directory will still be available for writing as intended, though it may not be empty. This + * is not a complete list of such unintuitive behavior. + *

+ * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them + * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be + * converted to a {@code String} except the Content-MD5 attribute which expects a {@code byte[]}. + * When extracting the content headers, the following strings will be used for comparison (constants for these + * values can be found on this type): + *

    + *
  • {@code Content-Type}
  • + *
  • {@code Content-Disposition}
  • + *
  • {@code Content-Language}
  • + *
  • {@code Content-Encoding}
  • + *
  • {@code Content-MD5}
  • + *
  • {@code Cache-Control}
  • + *
+ * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other + * words, if any of the above is set, all those that are not set will be cleared. See the + * Azure Docs for more + * information. + * + * @param path the directory to create + * @param fileAttributes an optional list of file attributes to set atomically when creating the directory + * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. + * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when + * creating the directory + * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name + * already exists + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public void createDirectory(Path path, FileAttribute... fileAttributes) throws IOException { + fileAttributes = fileAttributes == null ? new FileAttribute[0] : fileAttributes; + + // Get the destination for the directory. Will throw if path is a root. + AzureResource azureResource = new AzureResource(path); + AzurePath.ensureFileSystemOpen(azureResource.getPath()); + + // Check if parent exists. If it does, atomically check if a file already exists and create a new dir if not. + if (azureResource.checkParentDirectoryExists()) { + try { + azureResource.setFileAttributes(Arrays.asList(fileAttributes)) + .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); + } catch (BlobStorageException e) { + if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT + && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new FileAlreadyExistsException(azureResource.getPath().toString())); + } else { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IOException("An error occurred when creating the directory", e)); + } + } + } else { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IOException("Parent directory does not exist for path: " + azureResource.getPath())); + } + } + + /** + * Deletes the specified resource. + *

+ * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by + * another process, and doing so will not immediately invalidate any channels open to that file--they will simply + * start to fail. Root directories cannot be deleted even when empty. + * + * @param path the path to the file to delete + * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. + * @throws NoSuchFileException if the file does not exist + * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the + * directory is not empty + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public void delete(Path path) throws IOException { + // Basic validation. Must be an AzurePath. Cannot be a root. + AzureResource azureResource = new AzureResource(path); + AzurePath.ensureFileSystemOpen(azureResource.getPath()); + + // Check directory status--possibly throw DirectoryNotEmpty or NoSuchFile. + DirectoryStatus dirStatus = azureResource.checkDirStatus(); + if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new NoSuchFileException(path.toString())); + } + if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new DirectoryNotEmptyException(path.toString())); + } + + // After all validation has completed, delete the resource. + try { + azureResource.getBlobClient().delete(); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new NoSuchFileException(path.toString())); + } + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new IOException(e)); + } + } + + /** + * Copies the resource at the source location to the destination. + *

+ * This method is not atomic with respect to other file system operations. More specifically, the checks necessary + * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy + * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. + *

+ * In addition to those in the docs for {@link FileSystemProvider#copy(Path, Path, CopyOption...)}, this method has + * the following requirements for successful completion. {@link StandardCopyOption#COPY_ATTRIBUTES} must be passed + * as it is impossible not to copy blob properties; if this option is not passed, an + * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root + * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent + * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. + * The only supported option other than {@link StandardCopyOption#COPY_ATTRIBUTES} is + * {@link StandardCopyOption#REPLACE_EXISTING}; the presence of any other option will result in an + * {@link UnsupportedOperationException}. + *

+ * This method supports both virtual and concrete directories as both the source and destination. Unlike when + * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. + * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as + * mentioned above, this check is not atomic with the creation of the resultant directory. + * + * @param source the path to the file to copy + * @param destination the path to the target file + * @param copyOptions specifying how the copy should be done + * @throws UnsupportedOperationException if the array contains a copy option that is not supported + * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING + * option is not specified + * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced + * because it is a non-empty directory + * @throws IOException If an I/O error occurs. + * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. + * @throws SecurityException never + * @see #createDirectory(Path, FileAttribute[]) for more information about directory existence. + */ + @Override + public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { + // If paths point to the same file, operation is a no-op. + if (source.equals(destination)) { + return; + } + + // Read and validate options. + // Remove accepted options as we find them. Anything left we don't support. + boolean replaceExisting = false; + List optionsList = new ArrayList<>(Arrays.asList(copyOptions)); + if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new UnsupportedOperationException( + "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + + "file attributes.")); + } + optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); + if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { + replaceExisting = true; + optionsList.remove(StandardCopyOption.REPLACE_EXISTING); + } + if (!optionsList.isEmpty()) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new UnsupportedOperationException("Unsupported copy option found. Only " + + "StandardCopyOption.COPY_ATTRIBUTES and StandardCopyOption.REPLACE_EXISTING are supported.")); + } + + // Validate paths. Build resources. + // Copying a root directory or attempting to create/overwrite a root directory is illegal. + AzureResource sourceRes = new AzureResource(source); + AzurePath.ensureFileSystemOpen(sourceRes.getPath()); + AzureResource destinationRes = new AzureResource(destination); + AzurePath.ensureFileSystemOpen(destinationRes.getPath()); + + // Check destination is not a directory with children. + DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); + if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new DirectoryNotEmptyException(destination.toString())); + } + + /* + Set request conditions if we should not overwrite. We can error out here if we know something already exists, + but we will also create request conditions as a safeguard against overwriting something that was created + between our check and put. + */ + BlobRequestConditions requestConditions = null; + if (!replaceExisting) { + if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new FileAlreadyExistsException(destinationRes.getPath().toString())); + } + requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); + } + + /* + More path validation + + Check that the parent for the destination exists. We only need to perform this check if there is nothing + currently at the destination, for if the destination exists, its parent at least weakly exists and we + can skip a service call. + */ + if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IOException("Parent directory of destination location does not exist. The destination path is " + + "therefore invalid. Destination: " + destinationRes.getPath())); + } + + /* + Try to copy the resource at the source path. + + There is an optimization here where we try to do the copy first and only check for a virtual directory if + there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual + directories, however, this requires three requests: failed copy, check status, create directory. Depending on + customer scenarios and how many virtual directories they copy, it could be better to check the directory status + first and then do a copy or createDir, which would always be two requests for all resource types. + */ + try { + SyncPoller pollResponse = + destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, + null, requestConditions, null); + pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); + } catch (BlobStorageException e) { + // If the source was not found, it could be because it's a virtual directory. Check the status. + // If a non-dir resource existed, it would have been copied above. This check is therefore sufficient. + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) + && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { + /* + We already checked that the parent exists and validated the paths above, so we can put the blob + directly. + */ + destinationRes.putDirectoryBlob(requestConditions); + } else { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new IOException(e)); + } + } catch (RuntimeException e) { // To better log possible timeout from poller. + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new IOException(e)); + } + } + + // Used for checking the status of the root directory. To be implemented later when needed. + /*int checkRootDirStatus(BlobContainerClient rootClient) { + + }*/ + + /** + * Unsupported. + * + * @param path path + * @param path1 path + * @param copyOptions options + * @throws UnsupportedOperationException Operation is not supported. + */ + @Override + public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new UnsupportedOperationException()); + } + + /** + * Unsupported. + * + * @param path path + * @param path1 path + * @throws UnsupportedOperationException Operation is not supported. + */ + @Override + public boolean isSameFile(Path path, Path path1) throws IOException { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new UnsupportedOperationException()); + } + + /** + * Always returns false as hidden files are not supported. + * + * @param path the path + * @return false + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public boolean isHidden(Path path) throws IOException { + return false; + } + + /** + * Unsupported. + * + * @param path path + * @return the file store where the file is stored. + * @throws UnsupportedOperationException Operation is not supported. + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public FileStore getFileStore(Path path) throws IOException { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new UnsupportedOperationException()); + } + + /** + * Checks the existence, and optionally the accessibility, of a file. + *

+ * This method may only be used to check the existence of a file. It is not possible to determine the permissions + * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be + * thrown. + * + * @param path the path to the file to check + * @param accessModes The access modes to check; may have zero elements + * @throws NoSuchFileException if a file does not exist + * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be + * determined because the Java virtual machine has insufficient privileges or other reasons + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public void checkAccess(Path path, AccessMode... accessModes) throws IOException { + if (accessModes != null && accessModes.length != 0) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new AccessDeniedException("The access cannot be determined.")); + } + AzurePath.ensureFileSystemOpen(path); + + /* + Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on + roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation + and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. + */ + if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { + return; + } + + // Read attributes already wraps BlobStorageException in an IOException. + try { + readAttributes(path, BasicFileAttributes.class); + } catch (IOException e) { + Throwable cause = e.getCause(); + if (cause instanceof BlobStorageException + && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new NoSuchFileException(path.toString())); + } else { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, e); + } + } + } + + /** + * Returns a file attribute view of a given type. + *

+ * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. + *

+ * Reading attributes on a virtual directory will return {@code null} for most properties other than + * {@link AzureBlobFileAttributes#isVirtualDirectory()}, which will return true. See + * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories. + * + * @param path the path to the file + * @param type the Class object corresponding to the file attribute view + * @param linkOptions ignored + * @return a file attribute view of the specified type, or null if the attribute view type is not available + */ + @Override + @SuppressWarnings("unchecked") + public V getFileAttributeView(Path path, Class type, LinkOption... linkOptions) { + /* + No resource validation is necessary here. That can happen at the time of making a network requests internal to + the view object. + */ + if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { + return (V) new AzureBasicFileAttributeView(path); + } else if (type == AzureBlobFileAttributeView.class) { + return (V) new AzureBlobFileAttributeView(path); + } else { + return null; + } + } + + /** + * Reads a file's attributes as a bulk operation. + *

+ * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. + *

+ * Reading attributes on a virtual directory will return {@code null} for most properties other than + * {@link AzureBlobFileAttributes#isVirtualDirectory()}, which will return true. See + * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories. + * + * @param path the path to the file + * @param type the Class of the file attributes required to read + * @param linkOptions ignored + * @return the file attributes + * @throws UnsupportedOperationException if an attributes of the given type are not supported + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + @SuppressWarnings("unchecked") + public A readAttributes(Path path, Class type, LinkOption... linkOptions) + throws IOException { + AzurePath.ensureFileSystemOpen(path); + + Class view; + if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { + view = AzureBasicFileAttributeView.class; + } else if (type == AzureBlobFileAttributes.class) { + view = AzureBlobFileAttributeView.class; + } else { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new UnsupportedOperationException()); + } + + /* + Resource validation will happen in readAttributes of the view. We don't want to double-check, and checking + internal to the view ensures it is always checked no matter which code path is taken. + */ + return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); + } + + /** + * Reads a set of file attributes as a bulk operation. + *

+ * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. + *

+ * Reading attributes on a virtual directory will return {@code null} for all properties other than + * {@link AzureBlobFileAttributes#isVirtualDirectory()}, which will return true. See + * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories. + * + * @param path the path to the file + * @param attributes the attributes to read + * @param linkOptions ignored + * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are + * the attribute values + * @throws UnsupportedOperationException if an attributes of the given type are not supported + * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public Map readAttributes(Path path, String attributes, LinkOption... linkOptions) + throws IOException { + if (attributes == null) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Attribute string cannot be null.")); + } + + AzurePath.ensureFileSystemOpen(path); + + Map results = new HashMap<>(); + + /* + AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate + one of each if both are specified somewhere in the list as that will waste a network call. This can be + generified later if we need to add more attribute types, but for now we can stick to just caching the supplier + for a single attributes object. + */ + Map> attributeSuppliers = null; // Initialized later as needed. + String viewType; + String attributeList; + String[] parts = attributes.split(":"); + + if (parts.length > 2) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Invalid format for attribute string: " + attributes)); + } + + if (parts.length == 1) { + viewType = "basic"; // Per jdk docs. + attributeList = attributes; + } else { + viewType = parts[0]; + attributeList = parts[1]; + } + + /* + For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs + state that "basic" must be supported, so we funnel to azureBasic. + */ + if ("basic".equals(viewType)) { + viewType = AzureBasicFileAttributeView.NAME; + } + if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new UnsupportedOperationException("Invalid attribute view: " + viewType)); + } + + for (String attributeName : attributeList.split(",")) { + /* + We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we + should at least validate that the attribute is available on a basic view. + */ + // TODO: Put these strings in constants + if (viewType.equals(AzureBasicFileAttributeView.NAME)) { + if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !"*".equals(attributeName)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Invalid attribute. View: " + viewType + + ". Attribute: " + attributeName)); + } + } + + // As mentioned, azure blob can fulfill requests to both kinds of views. + // Populate the supplier if we haven't already. + if (attributeSuppliers == null) { + attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( + this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); + } + + // If "*" is specified, add all the attributes from the specified set. + if ("*".equals(attributeName)) { + if (viewType.equals(AzureBasicFileAttributeView.NAME)) { + for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { + results.put(attr, attributeSuppliers.get(attr).get()); + } + } else { + // attributeSuppliers is guaranteed to have been set by this point. + for (Map.Entry> entry: attributeSuppliers.entrySet()) { + results.put(entry.getKey(), entry.getValue().get()); + } + } + + } else if (!attributeSuppliers.containsKey(attributeName)) { + // Validate that the attribute is legal and add the value returned by the supplier to the results. + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Invalid attribute. View: " + viewType + + ". Attribute: " + attributeName)); + } else { + results.put(attributeName, attributeSuppliers.get(attributeName).get()); + + } + } + + // Throw if nothing specified per jdk docs. + if (results.isEmpty()) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); + } + + return results; + } + + /** + * Sets the value of a file attribute. + *

+ * See {@link AzureBlobFileAttributeView} for more information. + *

+ * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See + * {@link #createDirectory(Path, FileAttribute[])} for more information on virtual directories. + * + * @param path the path to the file + * @param attributes the attribute to set + * @param value the attribute value + * @param linkOptions ignored + * @throws UnsupportedOperationException if an attribute view is not available + * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute + * value is of the correct type but has an inappropriate value + * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing + * elements that are not of the expected type + * @throws IOException If an I/O error occurs. + * @throws SecurityException never + */ + @Override + public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { + AzurePath.ensureFileSystemOpen(path); + String viewType; + String attributeName; + String[] parts = attributes.split(":"); + if (parts.length > 2) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Invalid format for attribute string: " + attributes)); + } + if (parts.length == 1) { + viewType = "basic"; // Per jdk docs. + attributeName = attributes; + } else { + viewType = parts[0]; + attributeName = parts[1]; + } + + /* + For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs + state that "basic" must be supported, so we funnel to azureBasic. + */ + if ("basic".equals(viewType)) { + viewType = AzureBasicFileAttributeView.NAME; + } + + // We don't actually support any setters on the basic view. + if (viewType.equals(AzureBasicFileAttributeView.NAME)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Invalid attribute. View: " + viewType + + ". Attribute: " + attributeName)); + } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { + Map> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( + this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); + if (!attributeConsumers.containsKey(attributeName)) { + // Validate that the attribute is legal and add the value returned by the supplier to the results. + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("Invalid attribute. View: " + viewType + + ". Attribute: " + attributeName)); + } + try { + attributeConsumers.get(attributeName).accept(value); + } catch (UncheckedIOException e) { + if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, e.getCause()); + } + } + } else { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new UnsupportedOperationException("Invalid attribute view: " + viewType)); + } + } + + void closeFileSystem(String fileSystemName) { + this.openFileSystems.remove(fileSystemName); + } + + private String extractAccountEndpoint(URI uri) { + if (!uri.getScheme().equals(this.getScheme())) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, new IllegalArgumentException( + "URI scheme does not match this provider")); + } + if (CoreUtils.isNullOrEmpty(uri.getQuery())) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("URI does not contain a query component. FileSystems require a URI of " + + "the format \"azb://?endpoint=\".")); + } + + String endpoint = Flux.fromArray(uri.getQuery().split("&")) + .filter(s -> s.startsWith(ENDPOINT_QUERY_KEY + "=")) + .switchIfEmpty(Mono.defer(() -> Mono.error(LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("URI does not contain an \"" + ENDPOINT_QUERY_KEY + "=\" parameter. " + + "FileSystems require a URI of the format \"azb://?endpoint=\""))))) + .map(s -> s.substring(ENDPOINT_QUERY_KEY.length() + 1)) // Trim the query key and = + .blockLast(); + + if (CoreUtils.isNullOrEmpty(endpoint)) { + throw LoggingUtility.logError(ClientLoggerHolder.LOGGER, + new IllegalArgumentException("No account endpoint provided in URI query.")); + } + + return endpoint; + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java new file mode 100644 index 00000000000..9742af1f696 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java @@ -0,0 +1,836 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.ClosedFileSystemException; +import java.nio.file.FileSystem; +import java.nio.file.InvalidPathException; +import java.nio.file.LinkOption; +import java.nio.file.Path; +import java.nio.file.WatchEvent; +import java.nio.file.WatchKey; +import java.nio.file.WatchService; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Deque; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobUrlParts; + +/** + * An object that may be used to locate a file in a file system. + *

+ * The root component, if it is present, is the first element of the path and is denoted by a {@code ':'} as the last + * character. Hence, only one instance of {@code ':'} may appear in a path string, and it may only be the last character + * of the first element in the path. The root component is used to identify which container a path belongs to. All other + * path elements, including separators, are considered as the blob name. {@link AzurePath#fromBlobUrl} may + * be used to convert a typical http url pointing to a blob into an {@code AzurePath} object pointing to the same + * resource. + *

+ * Constructing a syntactically valid path does not ensure a resource exists at the given path. An error will + * not be thrown until trying to access an invalid resource, e.g. trying to access a resource that does not exist. + *

+ * Path names are case-sensitive. + *

+ * If a resource is accessed via a relative path, it will be resolved against the default directory of the file system. + * The default directory is as defined in the {@link AzureFileSystem} docs. + *

+ * Leading and trailing separators will be stripped from each component passed to + * {@link AzureFileSystem#getPath(String, String...)}. This has the effect of treating "foo/" as though it were simply + * "foo". + */ +public final class AzurePath implements Path { + private static final ClientLogger LOGGER = new ClientLogger(AzurePath.class); + static final String ROOT_DIR_SUFFIX = ":"; + + private final AzureFileSystem parentFileSystem; + private final String pathString; + + AzurePath(AzureFileSystem parentFileSystem, String first, String... more) { + this.parentFileSystem = parentFileSystem; + + /* + Break all strings into their respective elements and remove empty elements. This has the effect of stripping + any trailing, leading, or internal delimiters so there are no duplicates/empty elements when we join. + */ + List elements = new ArrayList<>(Arrays.asList(first.split(parentFileSystem.getSeparator()))); + if (more != null) { + for (String next : more) { + elements.addAll(Arrays.asList(next.split(parentFileSystem.getSeparator()))); + } + } + elements.removeIf(String::isEmpty); + + this.pathString = String.join(this.parentFileSystem.getSeparator(), elements); + + // Validate the path string by checking usage of the reserved character ROOT_DIR_SUFFIX. + for (int i = 0; i < elements.size(); i++) { + String element = elements.get(i); + /* + If there is a root component, it must be the first element. A root component takes the format of + ":". The ':', or ROOT_DIR_SUFFIX, if present, can only appear once, and can only be the last + character of the first element. + */ + if (i == 0) { + if (element.contains(ROOT_DIR_SUFFIX) && element.indexOf(ROOT_DIR_SUFFIX) < element.length() - 1) { + throw LoggingUtility.logError(LOGGER, new InvalidPathException(this.pathString, ROOT_DIR_SUFFIX + + " may only be used as the last character in the root component of a path")); + } + // No element besides the first may contain the ROOT_DIR_SUFFIX, as only the first element may be the root. + } else if (element.contains(ROOT_DIR_SUFFIX)) { + throw LoggingUtility.logError(LOGGER, new InvalidPathException(this.pathString, ROOT_DIR_SUFFIX + + " is an invalid character except to identify the root element of this path if there is one.")); + } + } + } + + /** + * Returns the file system that created this object. + * + * @return the file system that created this object + */ + @Override + public FileSystem getFileSystem() { + return this.parentFileSystem; + } + + /** + * Tells whether this path is absolute. + *

+ * An absolute path is complete in that it doesn't need to be combined with other path information in order to + * locate a file. A path is considered absolute in this file system if it contains a root component. + * + * @return whether the path is absolute + */ + @Override + public boolean isAbsolute() { + return this.getRoot() != null; + } + + /** + * Returns the root component of this path as a Path object, or null if this path does not have a root component. + *

+ * The root component of this path also identifies the Azure Storage Container in which the file is stored. This + * method will not validate that the root component corresponds to an actual file store/container in this + * file system. It will simply return the root component of the path if one is present and syntactically valid. + * + * @return a path representing the root component of this path, or null + */ + @Override + public Path getRoot() { + // Check if the first element of the path is formatted like a root directory. + String[] elements = this.splitToElements(); + if (elements.length > 0 && elements[0].endsWith(ROOT_DIR_SUFFIX)) { + return this.parentFileSystem.getPath(elements[0]); + } + return null; + } + + /** + * Returns the name of the file or directory denoted by this path as a Path object. The file name is the farthest + * element from the root in the directory hierarchy. + * + * @return a path representing the name of the file or directory, or null if this path has zero elements + */ + @Override + public Path getFileName() { + if (this.isRoot()) { + return null; + } else if (this.pathString.isEmpty()) { + return this; + } else { + List elements = Arrays.asList(this.splitToElements()); + return this.parentFileSystem.getPath(elements.get(elements.size() - 1)); + } + } + + /** + * Returns the parent path, or null if this path does not have a parent. + *

+ * The parent of this path object consists of this path's root component, if any, and each element in the path + * except for the farthest from the root in the directory hierarchy. This method does not access the file system; + * the path or its parent may not exist. Furthermore, this method does not eliminate special names such as "." and + * ".." that may be used in some implementations. On UNIX for example, the parent of "/a/b/c" is "/a/b", and the + * parent of "x/y/." is "x/y". This method may be used with the normalize method, to eliminate redundant names, for + * cases where shell-like navigation is required. + *

+ * If this path has one or more elements, and no root component, then this method is equivalent to evaluating the + * expression: + * + * {@code subpath(0, getNameCount()-1);} + * + * @return a path representing the path's parent + */ + @Override + public Path getParent() { + /* + If this path only has one element or is empty, there is no parent. Note the root is included in the parent, so + we don't use getNameCount here. + */ + String[] elements = this.splitToElements(); + if (elements.length == 1 || elements.length == 0) { + return null; + } + + return this.parentFileSystem.getPath( + this.pathString.substring(0, this.pathString.lastIndexOf(this.parentFileSystem.getSeparator()))); + } + + /** + * Returns the number of name elements in the path. + * + * @return the number of elements in the path, or 0 if this path only represents a root component + */ + @Override + public int getNameCount() { + if (this.pathString.isEmpty()) { + return 1; + } + return this.splitToElements(this.withoutRoot()).length; + } + + /** + * Returns a name element of this path as a Path object. + *

+ * The index parameter is the index of the name element to return. The element that is closest to the root in the + * directory hierarchy has index 0. The element that is farthest from the root has index {@code count-1}. + * + * @param index the index of the element + * @return the name element + * @throws IllegalArgumentException if index is negative, index is greater than or equal to the number of elements, + * or this path has zero name elements + */ + @Override + public Path getName(int index) { + if (index < 0 || index >= this.getNameCount()) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException(String.format("Index %d is out of " + + "bounds", index))); + } + // If the path is empty, the only valid option is also an empty path. + if (this.pathString.isEmpty()) { + return this; + } + + return this.parentFileSystem.getPath(this.splitToElements(this.withoutRoot())[index]); + } + + /** + * Returns a relative Path that is a subsequence of the name elements of this path. + *

+ * The beginIndex and endIndex parameters specify the subsequence of name elements. The name that is closest to the + * root in the directory hierarchy has index 0. The name that is farthest from the root has index {@code count-1}. + * The returned Path object has the name elements that begin at beginIndex and extend to the element at index + * {@code endIndex-1}. + * + * @param begin the index of the first element, inclusive + * @param end the index of the last element, exclusive + * @return a new Path object that is a subsequence of the name elements in this Path + */ + @Override + public Path subpath(int begin, int end) { + if (begin < 0 || begin >= this.getNameCount() + || end <= begin || end > this.getNameCount()) { + throw LoggingUtility.logError(LOGGER, + new IllegalArgumentException(String.format("Values of begin: %d and end: %d are invalid", begin, end))); + } + + String[] subnames = Stream.of(this.splitToElements(this.withoutRoot())) + .skip(begin) + .limit(end - begin) + .toArray(String[]::new); + + return this.parentFileSystem.getPath(String.join(this.parentFileSystem.getSeparator(), subnames)); + } + + /** + * Tests if this path starts with the given path. + *

+ * This path starts with the given path if this path's root component starts with the root component of the given + * path, and this path starts with the same name elements as the given path. If the given path has more name + * elements than this path then false is returned. + *

+ * If this path does not have a root component and the given path has a root component then this path does not start + * with the given path. + *

+ * If the given path is associated with a different FileSystem to this path then false is returned. + *

+ * In this implementation, a root component starts with another root component if the two root components are + * equivalent strings. In other words, if the files are stored in the same container. + * + * @param path the given path + * @return true if this path starts with the given path; otherwise false + */ + @Override + public boolean startsWith(Path path) { + if (!path.getFileSystem().equals(this.parentFileSystem)) { + return false; + } + + // An empty path never starts with another path and is never the start of another path. + if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) { + return false; + } + + String[] thisPathElements = this.splitToElements(); + String[] otherPathElements = ((AzurePath) path).splitToElements(); + if (otherPathElements.length > thisPathElements.length) { + return false; + } + for (int i = 0; i < otherPathElements.length; i++) { + if (!otherPathElements[i].equals(thisPathElements[i])) { + return false; + } + } + + return true; + } + + /** + * Tests if this path starts with a Path, constructed by converting the given path string, in exactly the manner + * specified by the startsWith(Path) method. + * + * @param path the given path string + * @return true if this path starts with the given path; otherwise false + * @throws InvalidPathException If the path string cannot be converted to a Path. + */ + @Override + public boolean startsWith(String path) { + return this.startsWith(this.parentFileSystem.getPath(path)); + } + + /** + * Tests if this path ends with the given path. + *

+ * If the given path has N elements, and no root component, and this path has N or more elements, then this path + * ends with the given path if the last N elements of each path, starting at the element farthest from the root, + * are equal. + *

+ * If the given path has a root component then this path ends with the given path if the root component of this path + * ends with the root component of the given path, and the corresponding elements of both paths are equal. If this + * path does not have a root component and the given path has a root component then this path does not end with the + * given path. + *

+ * If the given path is associated with a different FileSystem to this path then false is returned. + *

+ * In this implementation, a root component ends with another root component if the two root components are + * equivalent strings. In other words, if the files are stored in the same container. + * + * @param path the given path + * @return true if this path ends with the given path; otherwise false + */ + @Override + public boolean endsWith(Path path) { + /* + There can only be one instance of a file system with a given id, so comparing object identity is equivalent + to checking ids here. + */ + if (path.getFileSystem() != this.parentFileSystem) { + return false; + } + + // An empty path never ends with another path and is never the end of another path. + if (this.pathString.isEmpty() ^ ((AzurePath) path).pathString.isEmpty()) { + return false; + } + + String[] thisPathElements = this.splitToElements(); + String[] otherPathElements = ((AzurePath) path).splitToElements(); + if (otherPathElements.length > thisPathElements.length) { + return false; + } + // If the given path has a root component, the paths must be equal. + if (path.getRoot() != null && otherPathElements.length != thisPathElements.length) { + return false; + } + for (int i = 1; i <= otherPathElements.length; i++) { + if (!otherPathElements[otherPathElements.length - i] + .equals(thisPathElements[thisPathElements.length - i])) { + return false; + } + } + return true; + } + + /** + * Tests if this path ends with a Path, constructed by converting the given path string, in exactly the manner + * specified by the endsWith(Path) method. + * + * @param path the given path string + * @return true if this path starts with the given path; otherwise false + * @throws InvalidPathException If the path string cannot be converted to a Path. + */ + @Override + public boolean endsWith(String path) { + return this.endsWith(this.parentFileSystem.getPath(path)); + } + + /** + * Returns a path that is this path with redundant name elements eliminated. + *

+ * It derives from this path, a path that does not contain redundant name elements. The "." and ".." are special + * names used to indicate the current directory and parent directory. All occurrences of "." are considered + * redundant. If a ".." is preceded by a non-".." name then both names are considered redundant (the process to + * identify such names is repeated until is it no longer applicable). + *

+ * This method does not access the file system; the path may not locate a file that exists. Eliminating ".." and a + * preceding name from a path may result in the path that locates a different file than the original path + * + * @return the resulting path or this path if it does not contain redundant name elements; an empty path is returned + * if this path does have a root component and all name elements are redundant + * + */ + @Override + public Path normalize() { + Deque stack = new ArrayDeque<>(); + String[] pathElements = this.splitToElements(); + Path root = this.getRoot(); + String rootStr = root == null ? null : root.toString(); + for (String element : pathElements) { + if (".".equals(element)) { + continue; + } else if ("..".equals(element)) { + if (rootStr != null) { + // Root path. We never push "..". + if (!stack.isEmpty() && stack.peekLast().equals(rootStr)) { + // Cannot go higher than root. Ignore. + continue; + } else { + stack.removeLast(); + } + } else { + // Relative paths can have an arbitrary number of ".." at the beginning. + if (stack.isEmpty()) { + stack.addLast(element); + } else if (stack.peek().equals("..")) { + stack.addLast(element); + } else { + stack.removeLast(); + } + } + } else { + stack.addLast(element); + } + } + + return this.parentFileSystem.getPath("", stack.toArray(new String[0])); + } + + /** + * Resolve the given path against this path. + *

+ * If the other parameter is an absolute path then this method trivially returns other. If other is an empty path + * then this method trivially returns this path. Otherwise, this method considers this path to be a directory and + * resolves the given path against this path. In the simplest case, the given path does not have a root component, + * in which case this method joins the given path to this path and returns a resulting path that ends with the given + * path. Where the given path has a root component then resolution is highly implementation dependent and therefore + * unspecified. + * + * @param path the path to resolve against this path + * @return the resulting path + */ + @Override + public Path resolve(Path path) { + if (path.isAbsolute()) { + return path; + } + if (path.getNameCount() == 0) { + return this; + } + return this.parentFileSystem.getPath(this.toString(), path.toString()); + } + + /** + * Converts a given path string to a Path and resolves it against this Path in exactly the manner specified by the + * {@link #resolve(Path) resolve} method. + * + * @param path the path string to resolve against this path + * @return the resulting path + * @throws InvalidPathException if the path string cannot be converted to a Path. + */ + @Override + public Path resolve(String path) { + return this.resolve(this.parentFileSystem.getPath(path)); + } + + /** + * Resolves the given path against this path's parent path. This is useful where a file name needs to be replaced + * with another file name. For example, suppose that the name separator is "/" and a path represents + * "dir1/dir2/foo", then invoking this method with the Path "bar" will result in the Path "dir1/dir2/bar". If this + * path does not have a parent path, or other is absolute, then this method returns other. If other is an empty path + * then this method returns this path's parent, or where this path doesn't have a parent, the empty path. + * + * @param path the path to resolve against this path's parent + * @return the resulting path + */ + @Override + public Path resolveSibling(Path path) { + if (path.isAbsolute()) { + return path; + } + + Path parent = this.getParent(); + return parent == null ? path : parent.resolve(path); + } + + /** + * Converts a given path string to a Path and resolves it against this path's parent path in exactly the manner + * specified by the resolveSibling method. + * + * @param path the path string to resolve against this path's parent + * @return the resulting path + * @throws InvalidPathException if the path string cannot be converted to a Path. + */ + @Override + public Path resolveSibling(String path) { + return this.resolveSibling(this.parentFileSystem.getPath(path)); + } + + /** + * Constructs a relative path between this path and a given path. + *

+ * Relativization is the inverse of resolution. This method attempts to construct a relative path that when resolved + * against this path, yields a path that locates the same file as the given path. + *

+ * A relative path cannot be constructed if only one of the paths have a root component. If both paths have a root + * component, it is still possible to relativize one against the other. If this path and the given path are equal + * then an empty path is returned. + *

+ * For any two normalized paths p and q, where q does not have a root component, + * {@code p.relativize(p.resolve(q)).equals(q)} + * + * @param path the path to relativize against this path + * @return the resulting relative path, or an empty path if both paths are equal + * @throws IllegalArgumentException if other is not a Path that can be relativized against this path + */ + @Override + public Path relativize(Path path) { + if (path.getRoot() == null ^ this.getRoot() == null) { + throw LoggingUtility.logError(LOGGER, + new IllegalArgumentException("Both paths must be absolute or neither can be")); + } + + AzurePath thisNormalized = (AzurePath) this.normalize(); + Path otherNormalized = path.normalize(); + + Deque deque = new ArrayDeque<>( + Arrays.asList(otherNormalized.toString().split(this.parentFileSystem.getSeparator()))); + + int i = 0; + String[] thisElements = thisNormalized.splitToElements(); + while (i < thisElements.length && !deque.isEmpty() && thisElements[i].equals(deque.peekFirst())) { + deque.removeFirst(); + i++; + } + while (i < thisElements.length) { + deque.addFirst(".."); + i++; + } + + return this.parentFileSystem.getPath("", deque.toArray(new String[0])); + } + + /** + * Returns a URI to represent this path. + *

+ * This method constructs an absolute URI with a scheme equal to the URI scheme that identifies the provider. + *

+ * No authority component is defined for the {@code URI} returned by this method. This implementation offers the + * same equivalence guarantee as the default provider. + * + * @return the URI representing this path + * @throws SecurityException never + */ + @Override + public URI toUri() { + try { + return new URI(this.parentFileSystem.provider().getScheme(), null, "/" + this.toAbsolutePath(), + null, null); + } catch (URISyntaxException e) { + throw LoggingUtility.logError(LOGGER, new IllegalStateException("Unable to create valid URI from path", e)); + } + } + + /** + * Returns a Path object representing the absolute path of this path. + *

+ * If this path is already absolute then this method simply returns this path. Otherwise, this method resolves the + * path against the default directory. + * + * @return a Path object representing the absolute path + * @throws SecurityException never + */ + @Override + public Path toAbsolutePath() { + if (this.isAbsolute()) { + return this; + } + return this.parentFileSystem.getDefaultDirectory().resolve(this); + } + + /** + * Unsupported. + * + * @param linkOptions options + * @return the real path + * @throws UnsupportedOperationException operation not supported. + */ + @Override + public Path toRealPath(LinkOption... linkOptions) throws IOException { + throw new UnsupportedOperationException("Symbolic links are not supported."); + } + + /** + * Unsupported. + * + * @return the file + * @throws UnsupportedOperationException operation not supported. + */ + @Override + public File toFile() { + throw new UnsupportedOperationException(); + } + + /** + * Unsupported. + * + * @param watchService watchService + * @param kinds kinds + * @param modifiers modifiers + * @return the watch key + * @throws UnsupportedOperationException operation not supported. + */ + @Override + public WatchKey register(WatchService watchService, WatchEvent.Kind[] kinds, WatchEvent.Modifier... modifiers) + throws IOException { + throw new UnsupportedOperationException("WatchEvents are not supported."); + } + + /** + * Unsupported. + * + * @param watchService watchService + * @param kinds kinds + * @return the watch key + * @throws UnsupportedOperationException operation not supported. + */ + @Override + public WatchKey register(WatchService watchService, WatchEvent.Kind... kinds) throws IOException { + throw new UnsupportedOperationException("WatchEvents are not supported."); + } + + /** + * Returns an iterator over the name elements of this path. + *

+ * The first element returned by the iterator represents the name element that is closest to the root in the + * directory hierarchy, the second element is the next closest, and so on. The last element returned is the name of + * the file or directory denoted by this path. The root component, if present, is not returned by the iterator. + * + * @return an iterator over the name elements of this path. + */ + @Override + public Iterator iterator() { + if (this.pathString.isEmpty()) { + return Collections.singletonList((Path) this).iterator(); + } + return Arrays.asList(Stream.of(this.splitToElements(this.withoutRoot())) + .map(s -> this.parentFileSystem.getPath(s)) + .toArray(Path[]::new)) + .iterator(); + } + + /** + * Compares two abstract paths lexicographically. This method does not access the file system and neither file is + * required to exist. + *

+ * This method may not be used to compare paths that are associated with different file system providers. + *

+ * This result of this method is identical to a string comparison on the underlying path strings. + * + * @return zero if the argument is equal to this path, a value less than zero if this path is lexicographically less + * than the argument, or a value greater than zero if this path is lexicographically greater than the argument + * @throws ClassCastException if the paths are associated with different providers + */ + @Override + public int compareTo(Path path) { + if (!(path instanceof AzurePath)) { + throw LoggingUtility.logError(LOGGER, new ClassCastException("Other path is not an instance of " + + "AzurePath.")); + } + + return this.pathString.compareTo(((AzurePath) path).pathString); + } + + /** + * Returns the string representation of this path. + *

+ * If this path was created by converting a path string using the getPath method then the path string returned by + * this method may differ from the original String used to create the path. + *

+ * The returned path string uses the default name separator to separate names in the path. + * + * @return the string representation of this path + */ + @Override + public String toString() { + return this.pathString; + } + + /** + * A path is considered equal to another path if it is associated with the same file system instance and if the + * path strings are equivalent. + * + * @return true if, and only if, the given object is a Path that is identical to this Path + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AzurePath paths = (AzurePath) o; + return Objects.equals(parentFileSystem, paths.parentFileSystem) + && Objects.equals(pathString, paths.pathString); + } + + @Override + public int hashCode() { + return Objects.hash(parentFileSystem, pathString); + } + + /** + * Returns a {@link BlobClient} which references a blob pointed to by this path. Note that this does not guarantee + * the existence of the blob at this location. + * + * @return a {@link BlobClient}. + * @throws IOException If the path only contains a root component or is empty + */ + public BlobClient toBlobClient() throws IOException { + /* + We don't store the blob client because unlike other types in this package, a Path does not actually indicate the + existence or even validity of any remote resource. It is purely a representation of a path. Therefore, we do not + construct the client or perform any validation until it is requested. + */ + // Converting to an absolute path ensures there is a container to operate on even if it is the default. + // Normalizing ensures the path is clean. + Path root = this.normalize().toAbsolutePath().getRoot(); + if (root == null) { + throw LoggingUtility.logError(LOGGER, + new IllegalStateException("Root should never be null after calling toAbsolutePath.")); + } + String fileStoreName = this.rootToFileStore(root.toString()); + + BlobContainerClient containerClient = + ((AzureFileStore) this.parentFileSystem.getFileStore(fileStoreName)).getContainerClient(); + + String blobName = this.withoutRoot(); + if (blobName.isEmpty()) { + throw LoggingUtility.logError(LOGGER, new IOException("Cannot get a blob client to a path that only " + + "contains the root or is an empty path")); + } + + return containerClient.getBlobClient(blobName); + } + + /** + * A utility method to conveniently convert from a URL to a storage resource to an {@code AzurePath} pointing to the + * same resource. + * + * The url must be well formatted. There must be an open filesystem corresponding to the account which contains the + * blob. Otherwise, a {@link java.nio.file.FileSystemNotFoundException} will be thrown. + * + * The url may point to either an account, container, or blob. If it points to an account, the path will be empty, + * but it will have an internal reference to the file system containing it, meaning instance methods may be + * performed on the path to construct a reference to another object. If it points to a container, there will be one + * element, which is the root element. Everything after the container, that is the blob name, will then be appended + * after the root element. + * + * IP style urls are not currently supported. + * + * The {@link AzureFileSystemProvider} can typically be obtained via {@link AzureFileSystem#provider()}. + * + * @param provider The installed {@link AzureFileSystemProvider} that manages open file systems for this jvm. + * @param url The url to the desired resource. + * @return An {@link AzurePath} which points to the resource identified by the url. + * @throws URISyntaxException If the url contains elements which are not well formatted. + */ + public static AzurePath fromBlobUrl(AzureFileSystemProvider provider, String url) throws URISyntaxException { + BlobUrlParts parts = BlobUrlParts.parse(url); + URI fileSystemUri = hostToFileSystemUri(provider, parts.getScheme(), parts.getHost()); + FileSystem parentFileSystem = provider.getFileSystem(fileSystemUri); + return new AzurePath((AzureFileSystem) parentFileSystem, fileStoreToRoot(parts.getBlobContainerName()), + parts.getBlobName() == null ? "" : parts.getBlobName()); + } + + /** + * @return Whether this path consists of only a root component. + */ + boolean isRoot() { + return this.equals(this.getRoot()); + } + + private String withoutRoot() { + Path root = this.getRoot(); + String str = this.pathString; + if (root != null) { + str = this.pathString.substring(root.toString().length()); + } + if (str.startsWith(this.parentFileSystem.getSeparator())) { + str = str.substring(1); + } + + return str; + } + + private String[] splitToElements() { + return this.splitToElements(this.pathString); + } + + private String[] splitToElements(String str) { + String[] arr = str.split(this.parentFileSystem.getSeparator()); + /* + This is a special case where we split after removing the root from a path that is just the root. Or otherwise + have an empty path. + */ + if (arr.length == 1 && arr[0].isEmpty()) { + return new String[0]; + } + return arr; + } + + private String rootToFileStore(String root) { + return root.substring(0, root.length() - 1); // Remove the ROOT_DIR_SUFFIX + } + + private static String fileStoreToRoot(String fileStore) { + if (fileStore == null || "".equals(fileStore)) { + return ""; + } + return fileStore + ROOT_DIR_SUFFIX; + } + + private static URI hostToFileSystemUri(AzureFileSystemProvider provider, String scheme, String host) + throws URISyntaxException { + return new URI(provider.getScheme() + "://?endpoint=" + scheme + "://" + host); + } + + static void ensureFileSystemOpen(Path p) { + if (!p.getFileSystem().isOpen()) { + throw LoggingUtility.logError(LOGGER, new ClosedFileSystemException()); + } + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java new file mode 100644 index 00000000000..92fb14a62cc --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java @@ -0,0 +1,284 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerClientBuilder; +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobListDetails; +import com.azure.storage.blob.models.BlobProperties; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.models.ParallelTransferOptions; +import com.azure.storage.blob.options.BlockBlobOutputStreamOptions; +import com.azure.storage.blob.specialized.BlobOutputStream; +import com.azure.storage.common.implementation.Constants; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * This type is meant to be a logical grouping of operations and data associated with an azure resource. It is NOT + * intended to serve as a local cache for any data related to remote resources. It is agnostic to whether the resource + * is a directory or a file and will not perform any validation of the resource type, though root directories are not + * supported as they are backed by containers and do not support many file system apis. + * + * It also serves as the interface to Storage clients. Any operation that needs to use a client should first build an + * AzureResource using a path and then use the getter to access the client. + */ +final class AzureResource { + private static final ClientLogger LOGGER = new ClientLogger(AzureResource.class); + + static final String DIR_METADATA_MARKER = Constants.HeaderConstants.DIRECTORY_METADATA_KEY; + + private final AzurePath path; + private final BlobClient blobClient; + + // The following are not kept consistent with the service. They are only held here between parsing and putting. + private BlobHttpHeaders blobHeaders; + private Map blobMetadata; + + AzureResource(Path path) throws IOException { + Objects.requireNonNull(path, "path"); + this.path = validatePathInstanceType(path); + this.validateNotRoot(); + this.blobClient = this.path.toBlobClient(); + } + + /** + * Checks for the existence of the parent of the given path. We do not check for the actual marker blob as parents + * need only weakly exist. + * + * If the parent is a root (container), it will be assumed to exist, so it must be validated elsewhere that the + * container is a legitimate root within this file system. + */ + boolean checkParentDirectoryExists() throws IOException { + /* + If the parent is just the root (or null, which means the parent is implicitly the default directory which is a + root), that means we are checking a container, which is always considered to exist. Otherwise, perform normal + existence check. + */ + Path parent = this.path.getParent(); + return (parent == null || parent.equals(path.getRoot())) + || new AzureResource(this.path.getParent()).checkDirectoryExists(); + } + + /** + * Checks whether a directory exists by either being empty or having children. + */ + boolean checkDirectoryExists() throws IOException { + DirectoryStatus dirStatus = this.checkDirStatus(); + return dirStatus.equals(DirectoryStatus.EMPTY) || dirStatus.equals(DirectoryStatus.NOT_EMPTY); + } + + /* + This method will check specifically whether there is a virtual directory at this location. It must be known before + that there is no file present at the destination. + */ + boolean checkVirtualDirectoryExists() throws IOException { + DirectoryStatus dirStatus = this.checkDirStatus(false); + return dirStatus.equals(DirectoryStatus.NOT_EMPTY); // Virtual directories cannot be empty + } + + /** + * This method will check if a directory is extant and/or empty and accommodates virtual directories. This method + * will not check the status of root directories. + */ + DirectoryStatus checkDirStatus() throws IOException { + if (this.blobClient == null) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException("The blob client was null.")); + } + + /* + * Do a get properties first on the directory name. This will determine if it is concrete&&exists or is either + * virtual or doesn't exist. + */ + BlobProperties props = null; + boolean exists = false; + try { + props = this.getBlobClient().getProperties(); + exists = true; + } catch (BlobStorageException e) { + if (e.getStatusCode() != 404) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + // Check if the resource is a file or directory before listing + if (exists && !props.getMetadata().containsKey(AzureResource.DIR_METADATA_MARKER)) { + return DirectoryStatus.NOT_A_DIRECTORY; + } + + return checkDirStatus(exists); + } + + /* + This method will determine the status of the directory given it is already known whether or not there is an object + at the target. + */ + DirectoryStatus checkDirStatus(boolean exists) throws IOException { + BlobContainerClient containerClient = this.getContainerClient(); + + // List on the directory name + '/' so that we only get things under the directory if any + ListBlobsOptions listOptions = new ListBlobsOptions().setMaxResultsPerPage(2) + .setPrefix(this.blobClient.getBlobName() + AzureFileSystem.PATH_SEPARATOR) + .setDetails(new BlobListDetails().setRetrieveMetadata(true)); + + /* + * If listing returns anything, then it is not empty. If listing returns nothing and exists() was true, then it's + * empty Else it does not exist + */ + try { + Iterator blobIterator = containerClient.listBlobsByHierarchy(AzureFileSystem.PATH_SEPARATOR, + listOptions, null).iterator(); + if (blobIterator.hasNext()) { + return DirectoryStatus.NOT_EMPTY; + } else if (exists) { + return DirectoryStatus.EMPTY; + } else { + return DirectoryStatus.DOES_NOT_EXIST; + } + } catch (BlobStorageException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + /** + * Creates the actual directory marker. This method should only be used when any necessary checks for proper + * conditions of directory creation (e.g. parent existence) have already been performed. Otherwise, + * {@link AzureFileSystemProvider#createDirectory(Path, FileAttribute[])} should be preferred. + * + * @param requestConditions Any necessary request conditions to pass when creating the directory blob. + */ + void putDirectoryBlob(BlobRequestConditions requestConditions) { + this.blobClient.getBlockBlobClient().commitBlockListWithResponse(Collections.emptyList(), this.blobHeaders, + this.prepareMetadataForDirectory(), null, requestConditions, null, null); + } + + /* + Note that this will remove the properties from the list of attributes as it finds them. + */ + private void extractHttpHeaders(List> fileAttributes) { + BlobHttpHeaders headers = new BlobHttpHeaders(); + for (Iterator> it = fileAttributes.iterator(); it.hasNext();) { + FileAttribute attr = it.next(); + boolean propertyFound = true; + switch (attr.name()) { + case AzureFileSystemProvider.CONTENT_TYPE: + headers.setContentType(attr.value().toString()); + break; + case AzureFileSystemProvider.CONTENT_LANGUAGE: + headers.setContentLanguage(attr.value().toString()); + break; + case AzureFileSystemProvider.CONTENT_DISPOSITION: + headers.setContentDisposition(attr.value().toString()); + break; + case AzureFileSystemProvider.CONTENT_ENCODING: + headers.setContentEncoding(attr.value().toString()); + break; + case AzureFileSystemProvider.CONTENT_MD5: + if ((attr.value() instanceof byte[])) { + headers.setContentMd5((byte[]) attr.value()); + } else { + throw LoggingUtility.logError(LOGGER, + new UnsupportedOperationException("Content-MD5 attribute must be a byte[]")); + } + break; + case AzureFileSystemProvider.CACHE_CONTROL: + headers.setCacheControl(attr.value().toString()); + break; + default: + propertyFound = false; + break; + } + + if (propertyFound) { + it.remove(); + } + } + + this.blobHeaders = headers; + } + + /** + * Note this should only be used after the headers have been extracted. + * + * @param fileAttributes The attributes to convert to metadata. + */ + private void convertAttributesToMetadata(List> fileAttributes) { + Map metadata = new HashMap<>(); + for (FileAttribute attr : fileAttributes) { + metadata.put(attr.name(), attr.value().toString()); + } + + // If no attributes are set, return null so existing metadata is not cleared. + this.blobMetadata = metadata.isEmpty() ? null : metadata; + } + + private void validateNotRoot() { + if (this.path.isRoot()) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException( + "Root directory not supported. Path: " + this.path)); + } + } + + private AzurePath validatePathInstanceType(Path path) { + if (!(path instanceof AzurePath)) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException("This provider cannot operate on " + + "subtypes of Path other than AzurePath")); + } + return (AzurePath) path; + } + + BlobContainerClient getContainerClient() { + return new BlobContainerClientBuilder().endpoint(this.blobClient.getBlobUrl()) + .pipeline(this.blobClient.getHttpPipeline()) + .buildClient(); + } + + AzureResource setFileAttributes(List> attributes) { + attributes = new ArrayList<>(attributes); // To ensure removing header values from the list is supported. + extractHttpHeaders(attributes); + convertAttributesToMetadata(attributes); + + return this; + } + + AzurePath getPath() { + return this.path; + } + + BlobClient getBlobClient() { + return this.blobClient; + } + + BlobOutputStream getBlobOutputStream(ParallelTransferOptions pto, BlobRequestConditions rq) { + BlockBlobOutputStreamOptions options = new BlockBlobOutputStreamOptions() + .setHeaders(this.blobHeaders) + .setMetadata(this.blobMetadata) + .setParallelTransferOptions(pto) + .setRequestConditions(rq); + return this.blobClient.getBlockBlobClient().getBlobOutputStream(options); + } + + private Map prepareMetadataForDirectory() { + if (this.blobMetadata == null) { + this.blobMetadata = new HashMap<>(); + } + this.blobMetadata.put(DIR_METADATA_MARKER, "true"); + return this.blobMetadata; + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java new file mode 100644 index 00000000000..e51e727450b --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java @@ -0,0 +1,245 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.NonReadableChannelException; +import java.nio.channels.NonWritableChannelException; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.Path; + +/** + * A byte channel that maintains a current position. + *

+ * A channel may only be opened in read mode OR write mode. It may not be opened in read/write mode. Seeking is + * supported for reads, but not for writes. Modifications to existing files is not permitted--only creating new files or + * overwriting existing files. + *

+ * This type is not threadsafe to prevent having to hold locks across network calls. + */ +public final class AzureSeekableByteChannel implements SeekableByteChannel { + private static final ClientLogger LOGGER = new ClientLogger(AzureSeekableByteChannel.class); + + private final NioBlobInputStream reader; + private final NioBlobOutputStream writer; + private long position; + private boolean closed = false; + private final Path path; + /* + If this type needs to be made threadsafe, closed should be volatile. We need to add a lock to guard updates to + position or make it an atomicLong. If we have a lock, we have to be careful about holding while doing io ops and at + least ensure timeouts are set. We probably have to duplicate or copy the buffers for at least writing to ensure they + don't get overwritten. + */ + + AzureSeekableByteChannel(NioBlobInputStream inputStream, Path path) { + this.reader = inputStream; + /* + We mark at the beginning (we always construct a stream to the beginning of the blob) to support seeking. We can + effectively seek anywhere by always marking at the beginning of the blob and then a seek is resetting to that + mark and skipping. + */ + inputStream.mark(Integer.MAX_VALUE); + this.writer = null; + this.position = 0; + this.path = path; + } + + AzureSeekableByteChannel(NioBlobOutputStream outputStream, Path path) { + this.writer = outputStream; + this.reader = null; + this.position = 0; + this.path = path; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + AzurePath.ensureFileSystemOpen(this.path); + validateOpen(); + validateReadMode(); + + // See comments in position(), remember that position is 0-based and size() is exclusive + if (this.position >= this.size()) { + return -1; // at or past EOF + } + + // If the buffer is backed by an array, we can write directly to that instead of allocating new memory. + int pos; + final int limit; + final byte[] buf; + if (dst.hasArray()) { + // ByteBuffer has a position and limit that define the bounds of the writeable area, and that + // area can be both smaller than the backing array and might not begin at array index 0. + pos = dst.position(); + limit = pos + dst.remaining(); + buf = dst.array(); + } else { + pos = 0; + limit = dst.remaining(); + buf = new byte[limit]; + } + + while (pos < limit) { + int byteCount = this.reader.read(buf, pos, limit - pos); + if (byteCount == -1) { + break; + } + pos += byteCount; + } + + /* + Either write to the destination if we had to buffer separately or just set the position correctly if we wrote + underneath the buffer + */ + int count; + if (dst.hasArray()) { + count = pos - dst.position(); + dst.position(pos); + } else { + count = pos; // original position was 0 + dst.put(buf, 0, count); + } + + this.position += count; + return count; + } + + @Override + public int write(ByteBuffer src) throws IOException { + AzurePath.ensureFileSystemOpen(this.path); + validateOpen(); + validateWriteMode(); + + final int length = src.remaining(); + this.position += length; + + /* + If the buffer is backed by an array, we can read directly from that instead of allocating new memory. + Set the position correctly if we read from underneath the buffer + */ + int pos; + byte[] buf; + if (src.hasArray()) { + // ByteBuffer has a position and limit that define the bounds of the readable area, and that + // area can be both smaller than the backing array and might not begin at array index 0. + pos = src.position(); + buf = src.array(); + src.position(pos + length); + } else { + pos = 0; + buf = new byte[length]; + src.get(buf); // advances src.position() + } + // Either way, the src.position() and this.position have been updated before we know if this write + // will succeed. (Original behavior.) It may be better to update position(s) only *after* success, + // but then on IOException would we know if there was a partial write, and if so how much? + this.writer.write(buf, pos, length); + return length; + } + + @Override + public long position() throws IOException { + AzurePath.ensureFileSystemOpen(this.path); + validateOpen(); + + return this.position; + } + + @Override + public AzureSeekableByteChannel position(long newPosition) throws IOException { + AzurePath.ensureFileSystemOpen(this.path); + validateOpen(); + validateReadMode(); + + if (newPosition < 0) { + throw LoggingUtility.logError(LOGGER, new IllegalArgumentException("Seek position cannot be negative")); + } + + /* + The javadoc says seeking past the end for reading is legal and that it should indicate the end of the file on + the next read. StorageInputStream doesn't allow this, but we can get around that by modifying the + position variable and skipping the actual read (when read is called next); we'll check in read if we've seeked + past the end and short circuit there as well. + + Because we are in read mode this will always give us the size from properties. + */ + if (newPosition > this.size()) { + this.position = newPosition; + return this; + } + this.reader.reset(); // Because we always mark at the beginning, this will reset us back to the beginning. + this.reader.mark(Integer.MAX_VALUE); + long skipAmount = this.reader.skip(newPosition); + if (skipAmount < newPosition) { + throw new IOException("Could not set desired position"); + } + this.position = newPosition; + + return this; + } + + @Override + public long size() throws IOException { + AzurePath.ensureFileSystemOpen(this.path); + validateOpen(); + + /* + If we are in read mode, the size is the size of the file. + If we are in write mode, the size is the amount of data written so far. + */ + if (reader != null) { + return reader.getBlobInputStream().getProperties().getBlobSize(); + } else { + return position; + } + } + + @Override + public AzureSeekableByteChannel truncate(long size) throws IOException { + throw LoggingUtility.logError(LOGGER, new UnsupportedOperationException()); + } + + @Override + public boolean isOpen() { + AzurePath.ensureFileSystemOpen(this.path); + return !this.closed; + } + + @Override + public void close() throws IOException { + AzurePath.ensureFileSystemOpen(this.path); + if (this.reader != null) { + this.reader.close(); + } else { + this.writer.close(); + } + this.closed = true; + } + + Path getPath() { + return this.path; + } + + private void validateOpen() throws ClosedChannelException { + if (this.closed) { + throw LoggingUtility.logError(LOGGER, new ClosedChannelException()); + } + } + + private void validateReadMode() { + if (this.reader == null) { + throw LoggingUtility.logError(LOGGER, new NonReadableChannelException()); + } + } + + private void validateWriteMode() { + if (this.writer == null) { + throw LoggingUtility.logError(LOGGER, new NonWritableChannelException()); + } + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java new file mode 100644 index 00000000000..8356a7ebeb1 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +/** + * RESERVED FOR INTERNAL USE. + * + * An enum to indicate the status of a directory. + */ +enum DirectoryStatus { + EMPTY, // The directory at least weakly exists and is empty. + + NOT_EMPTY, // The directory at least weakly exists and has one or more children. + + DOES_NOT_EXIST, // There is no resource at this path. + + NOT_A_DIRECTORY; // A resource exists at this path, but it is not a directory. + + static boolean isDirectory(DirectoryStatus status) { + return EMPTY.equals(status) || NOT_EMPTY.equals(status); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java new file mode 100644 index 00000000000..3cd503f98c2 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; + +/** + * Only a minimal Utility class to get around a shortcoming in Core's logging. + */ +final class LoggingUtility { + public static T logError(ClientLogger logger, T e) { + logger.error(e.getMessage()); + return e; + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java new file mode 100644 index 00000000000..676972dc93a --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java @@ -0,0 +1,211 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.specialized.BlobInputStream; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; + +/** + * Provides an InputStream to read a file stored as an Azure Blob. + */ +public final class NioBlobInputStream extends InputStream { + private static final ClientLogger LOGGER = new ClientLogger(NioBlobInputStream.class); + + private final BlobInputStream blobInputStream; + private final Path path; + + NioBlobInputStream(BlobInputStream blobInputStream, Path path) { + this.blobInputStream = blobInputStream; + this.path = path; + } + + /** + * Returns an estimate of the number of bytes that can be read (or skipped over) from this input stream without + * blocking by the next invocation of a method for this input stream. The next invocation might be the same thread + * or another thread. A single read or skip of this many bytes will not block, but may read or skip fewer bytes. + * + * @return An int which represents an estimate of the number of bytes that can be read (or skipped + * over) from this input stream without blocking, or 0 when it reaches the end of the input stream. + */ + @Override + public synchronized int available() throws IOException { + AzurePath.ensureFileSystemOpen(path); + return this.blobInputStream.available(); + } + + /** + * Closes this input stream and releases any system resources associated with the stream. + */ + @Override + public synchronized void close() throws IOException { + AzurePath.ensureFileSystemOpen(path); + this.blobInputStream.close(); + } + + /** + * Marks the current position in this input stream. A subsequent call to the reset method repositions this stream at + * the last marked position so that subsequent reads re-read the same bytes. + * + * @param readlimit An int which represents the maximum limit of bytes that can be read before the mark + * position becomes invalid. + */ + @Override + public synchronized void mark(final int readlimit) { + this.blobInputStream.mark(readlimit); + } + + /** + * Tests if this input stream supports the mark and reset methods. + * + * @return Returns {@code true} + */ + @Override + public boolean markSupported() { + return this.blobInputStream.markSupported(); + } + + /** + * Reads the next byte of data from the input stream. The value byte is returned as an int in the range 0 to 255. If + * no byte is available because the end of the stream has been reached, the value -1 is returned. This method blocks + * until input data is available, the end of the stream is detected, or an exception is thrown. + * + * @return An int which represents the total number of bytes read into the buffer, or -1 if there is no + * more data because the end of the stream has been reached. + * @throws IOException If an I/O error occurs. + */ + @Override + public int read() throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + return this.blobInputStream.read(); + /* + BlobInputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message, + so we can't do any better than re-wrapping it in an IOException. + */ + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + /** + * Reads some number of bytes from the input stream and stores them into the buffer array b. The number + * of bytes actually read is returned as an integer. This method blocks until input data is available, end of file + * is detected, or an exception is thrown. If the length of b is zero, then no bytes are read and 0 is + * returned; otherwise, there is an attempt to read at least one byte. If no byte is available because the stream is + * at the end of the file, the value -1 is returned; otherwise, at least one byte is read and stored into + * b. + * + * The first byte read is stored into element b[0], the next one into b[1], and so on. The + * number of bytes read is, at most, equal to the length of b. Let k be the number of + * bytes actually read; these bytes will be stored in elements b[0] through b[k-1], + * leaving elements b[k] through + * b[b.length-1] unaffected. + * + * The read(b) method for class {@link InputStream} has the same effect as: + * + * read(b, 0, b.length) + * + * @param b A byte array which represents the buffer into which the data is read. + * @throws IOException If the first byte cannot be read for any reason other than the end of the file, if the input + * stream has been closed, or if some other I/O error occurs. + * @throws NullPointerException If the byte array b is null. + */ + @Override + public int read(final byte[] b) throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + return this.blobInputStream.read(b); + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + /** + * Reads up to len bytes of data from the input stream into an array of bytes. An attempt is made to + * read as many as len bytes, but a smaller number may be read. The number of bytes actually read is + * returned as an integer. This method blocks until input data is available, end of file is detected, or an + * exception is thrown. + * + * If len is zero, then no bytes are read and 0 is returned; otherwise, there is an attempt to read at + * least one byte. If no byte is available because the stream is at end of file, the value -1 is returned; + * otherwise, at least one byte is read and stored into b. + * + * The first byte read is stored into element b[off], the next one into b[off+1], and so + * on. The number of bytes read is, at most, equal to len. Let k be the number of bytes + * actually read; these bytes will be stored in elements b[off] through b[off+k-1], + * leaving elements b[off+k] through + * b[off+len-1] unaffected. + * + * In every case, elements b[0] through b[off] and elements b[off+len] + * through b[b.length-1] are unaffected. + * + * @param b A byte array which represents the buffer into which the data is read. + * @param off An int which represents the start offset in the byte array at which the data + * is written. + * @param len An int which represents the maximum number of bytes to read. + * @return An int which represents the total number of bytes read into the buffer, or -1 if there is no + * more data because the end of the stream has been reached. + * @throws IOException If the first byte cannot be read for any reason other than end of file, or if the input + * stream has been closed, or if some other I/O error occurs. + * @throws NullPointerException If the byte array b is null. + * @throws IndexOutOfBoundsException If off is negative, len is negative, or + * len is greater than + * b.length - off. + */ + @Override + public int read(final byte[] b, final int off, final int len) throws IOException { + AzurePath.ensureFileSystemOpen(path); + if (off < 0 || len < 0 || len > b.length - off) { + throw LOGGER.logExceptionAsError(new IndexOutOfBoundsException()); + } + try { + return this.blobInputStream.read(b, off, len); + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + /** + * Repositions this stream to the position at the time the mark method was last called on this input stream. Note + * repositioning the blob read stream will disable blob MD5 checking. + * + * @throws IOException If this stream has not been marked or if the mark has been invalidated. + */ + @Override + public synchronized void reset() throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + this.blobInputStream.reset(); + } catch (RuntimeException e) { + if (e.getMessage().equals("Stream mark expired.")) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + throw LoggingUtility.logError(LOGGER, e); + } + } + + /** + * Skips over and discards n bytes of data from this input stream. The skip method may, for a variety of reasons, + * end up skipping over some smaller number of bytes, possibly 0. This may result from any of a number of + * conditions; reaching end of file before n bytes have been skipped is only one possibility. The actual number of + * bytes skipped is returned. If n is negative, no bytes are skipped. + * + * Note repositioning the blob read stream will disable blob MD5 checking. + * + * @param n A long which represents the number of bytes to skip. + */ + @Override + public synchronized long skip(final long n) throws IOException { + AzurePath.ensureFileSystemOpen(path); + return this.blobInputStream.skip(n); + } + + BlobInputStream getBlobInputStream() { + return blobInputStream; + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java new file mode 100644 index 00000000000..ae5c0fa02b1 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java @@ -0,0 +1,99 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.specialized.BlobOutputStream; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Path; + +/** + * Provides an OutputStream to write to a file stored as an Azure Blob. + */ +public final class NioBlobOutputStream extends OutputStream { + private static final ClientLogger LOGGER = new ClientLogger(NioBlobOutputStream.class); + + private final BlobOutputStream blobOutputStream; + private final Path path; + + NioBlobOutputStream(BlobOutputStream blobOutputStream, Path path) { + this.blobOutputStream = blobOutputStream; + this.path = path; + } + + @Override + public synchronized void write(int i) throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + this.blobOutputStream.write(i); + /* + BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message, + so we can't do any better than re-wrapping it in an IOException. + */ + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + @Override + public synchronized void write(byte[] b) throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + this.blobOutputStream.write(b); + /* + BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message, + so we can't do any better than re-wrapping it in an IOException. + */ + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + @Override + public synchronized void write(byte[] b, int off, int len) throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + this.blobOutputStream.write(b, off, len); + /* + BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message, + so we can't do any better than re-wrapping it in an IOException. + */ + } catch (RuntimeException e) { + if (e instanceof IndexOutOfBoundsException) { + throw LoggingUtility.logError(LOGGER, e); + } + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + @Override + public synchronized void flush() throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + this.blobOutputStream.flush(); + /* + BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message, + so we can't do any better than re-wrapping it in an IOException. + */ + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } + + @Override + public synchronized void close() throws IOException { + AzurePath.ensureFileSystemOpen(path); + try { + this.blobOutputStream.close(); + /* + BlobOutputStream only throws RuntimeException, and it doesn't preserve the cause, it only takes the message, + so we can't do any better than re-wrapping it in an IOException. + */ + } catch (RuntimeException e) { + throw LoggingUtility.logError(LOGGER, new IOException(e)); + } + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/package-info.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/package-info.java new file mode 100644 index 00000000000..96cd1fbd627 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/package-info.java @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/** + * Package containing the classes for loading the AzureFileSystemProvider based on Azure Storage Blobs. + */ +package com.azure.storage.blob.nio; diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider new file mode 100644 index 00000000000..5cc2b4ead14 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider @@ -0,0 +1 @@ +com.azure.storage.blob.nio.AzureFileSystemProvider diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/azure-storage-blob-nio.properties b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/azure-storage-blob-nio.properties new file mode 100644 index 00000000000..ca812989b4f --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/azure-storage-blob-nio.properties @@ -0,0 +1,2 @@ +name=${project.artifactId} +version=${project.version} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java new file mode 100644 index 00000000000..6c8c5e06e0b --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.storage.blob.nio; + +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.common.StorageSharedKeyCredential; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * WARNING: MODIFYING THIS FILE WILL REQUIRE CORRESPONDING UPDATES TO README.md FILE. LINE NUMBERS + * ARE USED TO EXTRACT APPROPRIATE CODE SEGMENTS FROM THIS FILE. ADD NEW CODE AT THE BOTTOM TO AVOID CHANGING + * LINE NUMBERS OF EXISTING CODE SAMPLES. + * + * Code samples for the README.md + */ +public class ReadmeSamples { + + private static final String CONTAINER_STORES = "container1,container2"; // A comma separated list of container names + private static final StorageSharedKeyCredential SHARE_KEY_CREDENTIAL + = new StorageSharedKeyCredential("", ""); + private static final Map CONFIG = new HashMap() { + { + put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, SHARE_KEY_CREDENTIAL); + put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, CONTAINER_STORES); + } + }; + private FileSystem myFs = FileSystems.newFileSystem(new URI("azb://?endpoint= config = new HashMap<>(); + String stores = ","; // A comma separated list of container names + StorageSharedKeyCredential credential = new StorageSharedKeyCredential(" attributes = Files.readAttributes(filePath, "azureBlob:metadata,headers"); + // END: readme-sample-readAttributesOnAFileString + } + + public void writeAttributesToAFile() throws IOException { + // BEGIN: readme-sample-writeAttributesToAFile + AzureBlobFileAttributeView view = Files.getFileAttributeView(filePath, AzureBlobFileAttributeView.class); + view.setMetadata(Collections.emptyMap()); + // END: readme-sample-writeAttributesToAFile + } + + public void writeAttributesToAFileString() throws IOException { + // BEGIN: readme-sample-writeAttributesToAFileString + Files.setAttribute(filePath, "azureBlob:blobHttpHeaders", new BlobHttpHeaders()); + // END: readme-sample-writeAttributesToAFileString + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java new file mode 100644 index 00000000000..1c820073a31 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.models.AccessTier; +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.blob.models.BlobProperties; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.nio.file.ClosedFileSystemException; +import java.nio.file.Path; +import java.nio.file.attribute.FileTime; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.azure.core.test.utils.TestUtils.assertArraysEqual; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class AttributeViewTests extends BlobNioTestBase { + // Get attributes--All properties set; + private BlobClient bc; + private AzureFileSystem fs; + + @Override + protected void beforeTest() { + super.beforeTest(); + fs = createFS(initializeConfigMap()); + cc = rootNameToContainerClient(getDefaultDir(fs)); + bc = cc.getBlobClient(generateBlobName()); + bc.upload(DATA.getDefaultBinaryData()); + } + + @Test + public void azureBasicFileAttributeViewReadAttributes() throws IOException { + AzureBasicFileAttributes attr = new AzureBasicFileAttributeView(fs.getPath(bc.getBlobName())).readAttributes(); + BlobProperties props = bc.getProperties(); + + assertEquals(attr.size(), props.getBlobSize()); + assertEquals(attr.lastModifiedTime(), FileTime.from(props.getLastModified().toInstant())); + assertEquals(attr.creationTime(), FileTime.from(props.getCreationTime().toInstant())); + assertTrue(attr.isRegularFile()); + assertEquals(attr.fileKey(), bc.getBlobUrl()); + assertFalse(attr.isDirectory()); + assertFalse(attr.isVirtualDirectory()); + assertFalse(attr.isSymbolicLink()); + assertFalse(attr.isOther()); + } + + @Test + public void azureBasicFileAttributeViewDirectory() throws IOException { + Path path = fs.getPath(generateBlobName()); + putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); + AzureBasicFileAttributes attr = new AzureBasicFileAttributeView(path).readAttributes(); + + assertTrue(attr.isDirectory()); + assertFalse(attr.isVirtualDirectory()); + assertFalse(attr.isRegularFile()); + assertFalse(attr.isOther()); + assertFalse(attr.isSymbolicLink()); + } + + @Test + public void azureBasicFileAttributeViewDirectoryVirtual() throws IOException { + String dirName = generateBlobName(); + BlobClient bc = cc.getBlobClient(dirName + '/' + generateContainerName()); + bc.upload(DATA.getDefaultBinaryData()); + AzureBasicFileAttributes attr = new AzureBasicFileAttributeView(fs.getPath(dirName)).readAttributes(); + + assertTrue(attr.isDirectory()); + assertTrue(attr.isVirtualDirectory()); + assertFalse(attr.isRegularFile()); + assertFalse(attr.isOther()); + assertFalse(attr.isSymbolicLink()); + } + + @Test + public void azureBasicFileAttributeViewNoExist() { + assertThrows(IOException.class, + () -> new AzureBasicFileAttributeView(fs.getPath(generateBlobName())).readAttributes()); + } + + @Test + public void azureBasicFileAttributeViewFSClosed() throws IOException { + Path path = fs.getPath(generateBlobName()); + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> new AzureBasicFileAttributeView(path).readAttributes()); + } + + @Test + public void azureBlobFileAttributeViewReadAttributes() throws IOException { + AzureBlobFileAttributes attr = new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())).readAttributes(); + Map> suppliers = AzureBlobFileAttributes.getAttributeSuppliers(attr); + BlobProperties props = bc.getProperties(); + + // getters + assertEquals(attr.size(), props.getBlobSize()); + assertEquals(attr.lastModifiedTime(), FileTime.from(props.getLastModified().toInstant())); + assertEquals(attr.creationTime(), FileTime.from(props.getCreationTime().toInstant())); + assertTrue(attr.isRegularFile()); + assertEquals(attr.fileKey(), bc.getBlobUrl()); + assertFalse(attr.isDirectory()); + assertFalse(attr.isVirtualDirectory()); + assertFalse(attr.isSymbolicLink()); + assertFalse(attr.isOther()); + assertEquals(attr.eTag(), props.getETag()); + assertEquals(attr.blobHttpHeaders().getContentType(), props.getContentType()); + assertArraysEqual(attr.blobHttpHeaders().getContentMd5(), props.getContentMd5()); + assertEquals(attr.blobHttpHeaders().getContentLanguage(), props.getContentLanguage()); + assertEquals(attr.blobHttpHeaders().getContentEncoding(), props.getContentEncoding()); + assertEquals(attr.blobHttpHeaders().getContentDisposition(), props.getContentDisposition()); + assertEquals(attr.blobHttpHeaders().getCacheControl(), props.getCacheControl()); + assertEquals(attr.blobType(), props.getBlobType()); + assertEquals(attr.copyId(), props.getCopyId()); + assertEquals(attr.copyStatus(), props.getCopyStatus()); + assertEquals(attr.copySource(), props.getCopySource()); + assertEquals(attr.copyProgress(), props.getCopyProgress()); + assertEquals(attr.copyCompletionTime(), props.getCopyCompletionTime()); + assertEquals(attr.copyStatusDescription(), props.getCopyStatusDescription()); + assertEquals(attr.isServerEncrypted(), props.isServerEncrypted()); + assertEquals(attr.accessTier(), props.getAccessTier()); + assertEquals(attr.isAccessTierInferred(), props.isAccessTierInferred()); + assertEquals(attr.archiveStatus(), props.getArchiveStatus()); + assertEquals(attr.accessTierChangeTime(), props.getAccessTierChangeTime()); + assertEquals(attr.metadata(), props.getMetadata()); + + // Suppliers, used in FileSystemProvider.readAttributes(String). Unlike the consumers used for setting + // properties, we test these here rather than on the FileSystemProvider because there are so many of them and + // it's more feasible this way rather than having a test for each method like the consumers. + assertEquals(suppliers.get("size").get(), props.getBlobSize()); + assertEquals(suppliers.get("lastModifiedTime").get(), FileTime.from(props.getLastModified().toInstant())); + assertEquals(suppliers.get("creationTime").get(), FileTime.from(props.getCreationTime().toInstant())); + assertEquals(suppliers.get("eTag").get(), props.getETag()); + BlobHttpHeaders supplierHeaders = (BlobHttpHeaders) suppliers.get("blobHttpHeaders").get(); + assertEquals(supplierHeaders.getContentType(), props.getContentType()); + assertArraysEqual(supplierHeaders.getContentMd5(), props.getContentMd5()); + assertEquals(supplierHeaders.getContentLanguage(), props.getContentLanguage()); + assertEquals(supplierHeaders.getContentEncoding(), props.getContentEncoding()); + assertEquals(supplierHeaders.getContentDisposition(), props.getContentDisposition()); + assertEquals(supplierHeaders.getCacheControl(), props.getCacheControl()); + assertEquals(suppliers.get("blobType").get(), props.getBlobType()); + assertEquals(suppliers.get("copyId").get(), props.getCopyId()); + assertEquals(suppliers.get("copyStatus").get(), props.getCopyStatus()); + assertEquals(suppliers.get("copySource").get(), props.getCopySource()); + assertEquals(suppliers.get("copyProgress").get(), props.getCopyProgress()); + assertEquals(suppliers.get("copyCompletionTime").get(), props.getCopyCompletionTime()); + assertEquals(suppliers.get("copyStatusDescription").get(), props.getCopyStatusDescription()); + assertEquals(suppliers.get("isServerEncrypted").get(), props.isServerEncrypted()); + assertEquals(suppliers.get("accessTier").get(), props.getAccessTier()); + assertEquals(suppliers.get("isAccessTierInferred").get(), props.isAccessTierInferred()); + assertEquals(suppliers.get("archiveStatus").get(), props.getArchiveStatus()); + assertEquals(suppliers.get("accessTierChangeTime").get(), props.getAccessTierChangeTime()); + assertEquals(suppliers.get("metadata").get(), props.getMetadata()); + } + + @Test + public void azureBlobFileAttributeViewReadFSClosed() throws IOException { + Path path = fs.getPath(generateBlobName()); + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> new AzureBlobFileAttributeView(path).readAttributes()); + } + + @ParameterizedTest + @MethodSource("azureBlobFileAttributeViewSetBlobHttpHeadersSupplier") + public void azureBlobFileAttributeViewSetBlobHttpHeaders(String cacheControl, String contentDisposition, + String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) throws IOException { + AzureBlobFileAttributeView view = new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())); + BlobHttpHeaders headers = new BlobHttpHeaders().setCacheControl(cacheControl) + .setContentDisposition(contentDisposition) + .setContentEncoding(contentEncoding) + .setContentLanguage(contentLanguage) + .setContentMd5(contentMD5) + .setContentType(contentType); + + view.setBlobHttpHeaders(headers); + BlobProperties response = bc.getProperties(); + + assertEquals(cacheControl, response.getCacheControl()); + assertEquals(contentDisposition, response.getContentDisposition()); + assertEquals(contentEncoding, response.getContentEncoding()); + assertEquals(contentLanguage, response.getContentLanguage()); + assertArraysEqual(contentMD5, response.getContentMd5()); + assertEquals(contentType, response.getContentType()); + } + + private static Stream azureBlobFileAttributeViewSetBlobHttpHeadersSupplier() + throws NoSuchAlgorithmException { + return Stream.of( + Arguments.of(null, null, null, null, null, null), + Arguments.of("control", "disposition", "encoding", "language", + Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "typr") + ); + } + + @Test + public void azureBlobFileAttributeViewSetHeadersFSClosed() throws IOException { + Path path = fs.getPath(generateBlobName()); + fs.close(); + + assertThrows(ClosedFileSystemException.class, + () -> new AzureBlobFileAttributeView(path).setBlobHttpHeaders(new BlobHttpHeaders())); + } + + @ParameterizedTest + @CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz", + "i0,a,i_,a" /* Test culture sensitive word sort */}, nullValues = "null") + public void azureBlobFileAttributeViewSetMetadata(String key1, String value1, String key2, String value2) + throws IOException { + AzureBlobFileAttributeView view = new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())); + Map metadata = new HashMap(); + if (key1 != null && value1 != null) { + metadata.put(key1, value1); + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2); + } + + view.setMetadata(metadata); + + assertEquals(metadata, bc.getProperties().getMetadata()); + } + + @Test + public void azureBlobFileAttributeViewSetMetadataFSClosed() throws IOException { + Path path = fs.getPath(generateBlobName()); + fs.close(); + + assertThrows(ClosedFileSystemException.class, + () -> new AzureBlobFileAttributeView(path).setMetadata(Collections.emptyMap())); + } + + @ParameterizedTest + @MethodSource("azureBlobFileAttributeViewSetTierSupplier") + public void azureBlobFileAttributeViewSetTier(AccessTier tier) throws IOException { + new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())).setTier(tier); + + assertEquals(tier, bc.getProperties().getAccessTier()); + } + + private static Stream azureBlobFileAttributeViewSetTierSupplier() { + // We don't test archive because it takes a while to take effect and testing HOT and COOL demonstrates that the + // tier is successfully being passed to the underlying client. + return Stream.of(AccessTier.HOT, AccessTier.COOL); + } + + @Test + public void azureBlobFileAttributeViewSetTierFSClosed() throws IOException { + Path path = fs.getPath(generateBlobName()); + fs.close(); + + assertThrows(ClosedFileSystemException.class, + () -> new AzureBlobFileAttributeView(path).setTier(AccessTier.HOT)); + } + + @ParameterizedTest + @MethodSource("attributeViewSetTimesUnsupportedSupplier") + public void attributeViewSetTimesUnsupported(FileTime t1, FileTime t2, FileTime t3) { + Path path = fs.getPath(bc.getBlobName()); + AzureBlobFileAttributeView blobView = new AzureBlobFileAttributeView(path); + AzureBasicFileAttributeView basicView = new AzureBasicFileAttributeView(path); + + assertThrows(UnsupportedOperationException.class, () -> blobView.setTimes(t1, t2, t3)); + assertThrows(UnsupportedOperationException.class, () -> basicView.setTimes(t1, t2, t3)); + } + + private static Stream attributeViewSetTimesUnsupportedSupplier() { + return Stream.of( + Arguments.of(FileTime.fromMillis(System.currentTimeMillis()), null, null), + Arguments.of(null, FileTime.fromMillis(System.currentTimeMillis()), null), + Arguments.of(null, null, FileTime.fromMillis(System.currentTimeMillis())) + ); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java new file mode 100644 index 00000000000..0c75f780567 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java @@ -0,0 +1,215 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.test.TestMode; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.nio.file.ClosedFileSystemException; +import java.nio.file.DirectoryIteratorException; +import java.nio.file.DirectoryStream; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.IntStream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class AzureDirectoryStreamTests extends BlobNioTestBase { + private AzureFileSystem fs; + + @Override + protected void beforeTest() { + super.beforeTest(); + fs = createFS(initializeConfigMap()); + } + + @ParameterizedTest + @CsvSource(value = {"0,true", "5,true", "6000,true", "5,false"}) + public void listFiles(int numFiles, boolean absolute) throws IOException { + if (numFiles > 50 && getTestMode() != TestMode.LIVE) { + return; // Skip large data set in record and playback + } + String rootName = absolute ? getNonDefaultRootDir(fs) : ""; + String dirName = generateBlobName(); + Map resources = new ConcurrentHashMap<>(); + IntStream.range(0, numFiles).parallel().forEach(i -> { + AzureResource resource = null; + try { + resource = new AzureResource(fs.getPath(rootName, dirName, generateBlobName())); + } catch (IOException e) { + throw new RuntimeException(e); + } + resources.put(resource.getPath(), resource); + resource.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + }); + + Iterator iterator = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), entry -> true) + .iterator(); + + if (numFiles > 0) { + // Check that repeated hasNext calls returns true and doesn't affect the results of next() + assertTrue(iterator.hasNext()); + assertTrue(iterator.hasNext()); + } + + for (int i = 0; i < numFiles; i++) { + assertTrue(iterator.hasNext()); + assertNotNull(resources.remove(iterator.next())); + } + + assertFalse(iterator.hasNext()); + assertThrows(NoSuchElementException.class, iterator::next); + } + + // If listing results include directories, they should not be recursively listed. Only immediate children are + // returned. + @ParameterizedTest + @CsvSource(value = {"true,false", "false,false", "false,true"}) + public void listDirectories(boolean virtual, boolean isEmpty) throws IOException { + // The path to list against + AzureResource listResource = new AzureResource(fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + // The only expected result of the listing + AzureResource listResultResource = new AzureResource(listResource.getPath().resolve(generateBlobName())); + if (!virtual) { + listResource.putDirectoryBlob(null); + listResultResource.putDirectoryBlob(null); + } + + // Put some children under listResultResource. These should not be returned. + if (!isEmpty) { + for (int i = 0; i < 3; i++) { + ((AzurePath) listResultResource.getPath().resolve(generateBlobName())).toBlobClient() + .getBlockBlobClient().commitBlockList(Collections.emptyList()); + } + } + + Iterator iterator = new AzureDirectoryStream(listResource.getPath(), path -> true).iterator(); + + assertTrue(iterator.hasNext()); + assertEquals(listResultResource.getPath().toString(), iterator.next().toString()); + assertFalse(iterator.hasNext()); + } + + @ParameterizedTest + @ValueSource(ints = {0, 1, 3}) + public void listFilesDepth(int depth) throws IOException { + AzurePath listingPath = (AzurePath) fs.getPath(getNonDefaultRootDir(fs), getPathWithDepth(depth)); + + AzureResource filePath = new AzureResource(listingPath.resolve(generateBlobName())); + filePath.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + + AzureResource concreteDirEmptyPath = new AzureResource(listingPath.resolve(generateBlobName())); + concreteDirEmptyPath.putDirectoryBlob(null); + + AzureResource concreteDirNonEmptyPath = new AzureResource(listingPath.resolve(generateBlobName())); + concreteDirNonEmptyPath.putDirectoryBlob(null); + + AzureResource concreteDirChildPath = new AzureResource(concreteDirNonEmptyPath.getPath() + .resolve(generateBlobName())); + concreteDirChildPath.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + + AzureResource virtualDirPath = new AzureResource(listingPath.resolve(generateBlobName())); + AzureResource virtualDirChildPath = new AzureResource(virtualDirPath.getPath().resolve(generateBlobName())); + virtualDirChildPath.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + + List expectedListResults = new ArrayList<>(Arrays.asList(filePath.getPath().toString(), + concreteDirEmptyPath.getPath().toString(), concreteDirNonEmptyPath.getPath().toString(), + virtualDirPath.getPath().toString())); + + for (Path path : new AzureDirectoryStream(listingPath, path -> true)) { + assertTrue(expectedListResults.remove(path.toString())); + } + assertEquals(0, expectedListResults.size()); + } + + @Test + public void iteratorDuplicateCallsFail() throws IOException { + AzureDirectoryStream stream = new AzureDirectoryStream((AzurePath) fs.getPath(generateBlobName()), + path -> true); + stream.iterator(); + + assertThrows(IllegalStateException.class, stream::iterator); + } + + @Test + public void nextHasNextFailAfterClose() throws IOException { + String rootName = getNonDefaultRootDir(fs); + String dirName = generateBlobName(); + for (int i = 0; i < 3; i++) { + new AzureResource(fs.getPath(rootName, dirName, generateBlobName())) + .getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + } + + DirectoryStream stream = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), + path -> true); + Iterator iterator = stream.iterator(); + + // There are definitely items we haven't returned from the iterator, but they are inaccessible after closing. + stream.close(); + + assertFalse(iterator.hasNext()); + assertThrows(NoSuchElementException.class, iterator::next); + } + + @Test + public void hasNextFailAfterFSClose() throws IOException { + Path path = fs.getPath(generateBlobName()); + putDirectoryBlob(rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(path.getFileName().toString()) + .getBlockBlobClient()); + DirectoryStream stream = fs.provider().newDirectoryStream(path, null); + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> stream.iterator().hasNext()); + } + + @Test + public void filter() throws IOException { + String rootName = getNonDefaultRootDir(fs); + String dirName = generateBlobName(); + for (int i = 0; i < 3; i++) { + new AzureResource(fs.getPath(rootName, dirName, i + generateBlobName())) + .getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + } + + Iterator iterator = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), + path -> path.getFileName().toString().startsWith("0")).iterator(); + + assertTrue(iterator.hasNext()); + assertTrue(iterator.next().getFileName().toString().startsWith("0")); + assertFalse(iterator.hasNext()); + } + + @Test + public void filterException() throws IOException { + String rootName = getNonDefaultRootDir(fs); + String dirName = generateBlobName(); + for (int i = 0; i < 3; i++) { + new AzureResource(fs.getPath(rootName, dirName, i + generateBlobName())) + .getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + } + AzureDirectoryStream stream = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), + entry -> { + throw new IOException("Test exception"); + }); + + DirectoryIteratorException e = assertThrows(DirectoryIteratorException.class, + () -> stream.iterator().hasNext()); + assertEquals("Test exception", e.getCause().getMessage()); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java new file mode 100644 index 00000000000..8cc4d86d6f9 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.attribute.FileStoreAttributeView; +import java.nio.file.attribute.PosixFileAttributeView; +import java.util.Map; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class AzureFileStoreTests extends BlobNioTestBase { + private AzureFileSystem fs; + + // Just need one fs instance for creating the stores. + @Override + public void beforeTest() { + super.beforeTest(); + Map config = initializeConfigMap(); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, + ENV.getPrimaryAccount().getCredential()); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName() + "," + generateContainerName()); + try { + fs = new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getBlobEndpoint(), + config); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + // The constructor is implicitly tested by creating a file system. + @Test + public void name() throws IOException { + String name = generateContainerName(); + + assertEquals(name, new AzureFileStore(fs, name, false).name()); + } + + @Test + public void type() { + assertEquals("AzureBlobContainer", fs.getFileStores().iterator().next().type()); + } + + @Test + public void isReadOnly() { + assertFalse(fs.getFileStores().iterator().next().isReadOnly()); + } + + @Test + public void space() throws IOException { + FileStore store = fs.getFileStores().iterator().next(); + + assertEquals(Long.MAX_VALUE, store.getTotalSpace()); + assertEquals(Long.MAX_VALUE, store.getUsableSpace()); + assertEquals(Long.MAX_VALUE, store.getUnallocatedSpace()); + } + + @ParameterizedTest + @MethodSource("supportsFileAttributeViewSupplier") + public void supportsFileAttributeView(Class view, String viewName, boolean supports) { + FileStore store = fs.getFileStores().iterator().next(); + + assertEquals(supports, store.supportsFileAttributeView(view)); + assertEquals(supports, store.supportsFileAttributeView(viewName)); + } + + private static Stream supportsFileAttributeViewSupplier() { + return Stream.of( + Arguments.of(BasicFileAttributeView.class, "basic", true), + Arguments.of(AzureBlobFileAttributeView.class, "azureBlob", true), + Arguments.of(AzureBasicFileAttributeView.class, "azureBasic", true), + Arguments.of(PosixFileAttributeView.class, "posix", false) + ); + } + + @Test + public void getFileStoreAttributeView() { + FileStore store = fs.getFileStores().iterator().next(); + + assertNull(store.getFileStoreAttributeView(FileStoreAttributeView.class)); + assertThrows(UnsupportedOperationException.class, () -> store.getAttribute("basic:size")); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java new file mode 100644 index 00000000000..4055382fed3 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java @@ -0,0 +1,1437 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.http.HttpHeaders; +import com.azure.core.http.HttpMethod; +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpRequest; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.test.http.MockHttpResponse; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.models.AccessTier; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobHttpHeaders; +import com.azure.storage.blob.models.BlobProperties; +import com.azure.storage.blob.models.BlockListType; +import com.azure.storage.blob.specialized.AppendBlobClient; +import com.azure.storage.blob.specialized.BlockBlobClient; +import com.azure.storage.common.StorageSharedKeyCredential; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIf; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import reactor.core.publisher.Mono; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.URI; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.AccessDeniedException; +import java.nio.file.AccessMode; +import java.nio.file.ClosedFileSystemException; +import java.nio.file.DirectoryNotEmptyException; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileSystem; +import java.nio.file.FileSystemAlreadyExistsException; +import java.nio.file.FileSystemNotFoundException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.BasicFileAttributeView; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.DosFileAttributeView; +import java.nio.file.attribute.DosFileAttributes; +import java.nio.file.attribute.FileAttribute; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +import static com.azure.core.test.utils.TestUtils.assertArraysEqual; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SuppressWarnings("resource") +public class AzureFileSystemProviderTests extends BlobNioTestBase { + Map config; + private AzureFileSystemProvider provider; + + // The following are common among a large number of copy tests + private AzurePath sourcePath; + private AzurePath destPath; + private BlobClient sourceClient; + private BlobClient destinationClient; + + @Override + protected void beforeTest() { + super.beforeTest(); + config = initializeConfigMap(); + provider = new AzureFileSystemProvider(); + } + + @Test + public void createFileSystem() throws IOException { + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); + URI uri = getFileSystemUri(); + provider.newFileSystem(uri, config); + + assertTrue(provider.getFileSystem(uri).isOpen()); + assertEquals(primaryBlobServiceClient.getAccountUrl(), + ((AzureFileSystem) provider.getFileSystem(uri)).getFileSystemUrl()); + } + + @ParameterizedTest + @ValueSource(strings = {"azc://path", "azb://path", "azb://?foo=bar", "azb://?account="}) + public void createFileSystemInvalidUri(String uri) { + assertThrows(IllegalArgumentException.class, () -> provider.newFileSystem(new URI(uri), config)); + } + + @Test + public void createFileSystemDuplicate() throws IOException { + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); + provider.newFileSystem(getFileSystemUri(), config); + + assertThrows(FileSystemAlreadyExistsException.class, () -> provider.newFileSystem(getFileSystemUri(), config)); + } + + @Test + public void createFileSystemInitialCheckFail() { + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); + byte[] badKey = ENV.getPrimaryAccount().getKey().getBytes(StandardCharsets.UTF_8); + badKey[0]++; + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, + new StorageSharedKeyCredential(ENV.getPrimaryAccount().getName(), new String(badKey))); + + assertThrows(IOException.class, () -> provider.newFileSystem(getFileSystemUri(), config)); + assertThrows(FileSystemNotFoundException.class, () -> provider.getFileSystem(getFileSystemUri())); + } + + @Test + public void getFileSystemNotFound() { + assertThrows(FileSystemNotFoundException.class, () -> provider.getFileSystem(getFileSystemUri())); + } + + @ParameterizedTest + @ValueSource(strings = {"azc://path", "azb://path", "azb://?foo=bar", "azb://?account="}) + public void getFileSystemIa(String uri) { + assertThrows(IllegalArgumentException.class, () -> provider.getFileSystem(new URI(uri))); + } + + // TODO: Be sure to test operating on containers that already have data + // all apis should have a test that tries them after the FileSystem is closed to ensure they throw. + @Test + public void getScheme() { + assertEquals("azb", provider.getScheme()); + } + + @ParameterizedTest + @ValueSource(ints = {0, 1, 2}) + public void createDirParentExists(int depth) throws IOException { + AzureFileSystem fs = createFS(config); + + // Generate resource names. + // Don't use default directory to ensure we honor the root. + String rootName = getNonDefaultRootDir(fs); + String parent = getPathWithDepth(depth); + String dirPathStr = parent + generateBlobName(); + + Path dirPath = fs.getPath(rootName, dirPathStr); + + // Generate clients to resources. Create resources as necessary + BlobContainerClient containerClient = rootNameToContainerClient(rootName); + /* + In this case, we are putting the blob in the root directory, i.e. directly in the container, so no need to + create a blob. + */ + if (!"".equals(parent)) { + containerClient.getBlobClient(parent).getAppendBlobClient().create(); + } + BlobClient dirClient = containerClient.getBlobClient(dirPathStr); + fs.provider().createDirectory(dirPath); + + checkBlobIsDir(dirClient); + } + + @Test + public void createDirRelativePath() throws IOException { + AzureFileSystem fs = createFS(config); + String fileName = generateBlobName(); + BlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName); + + // Relative paths are resolved against the default directory + fs.provider().createDirectory(fs.getPath(fileName)); + + checkBlobIsDir(blobClient); + } + + @Test + public void createDirFileAlreadyExists() { + AzureFileSystem fs = createFS(config); + String fileName = generateBlobName(); + BlockBlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName) + .getBlockBlobClient(); + blobClient.commitBlockList(Collections.emptyList(), false); + + // Will go to default directory + assertThrows(FileAlreadyExistsException.class, () -> fs.provider().createDirectory(fs.getPath(fileName))); + } + + @Test + public void createDirConcreteDirAlreadyExists() { + AzureFileSystem fs = createFS(config); + String fileName = generateBlobName(); + BlockBlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName) + .getBlockBlobClient(); + putDirectoryBlob(blobClient); + + assertThrows(FileAlreadyExistsException.class, () -> fs.provider().createDirectory(fs.getPath(fileName))); + } + + @Test + public void createDirVirtualDirAlreadyExists() throws IOException { + AzureFileSystem fs = createFS(config); + String fileName = generateBlobName(); + BlobContainerClient containerClient = rootNameToContainerClient(getDefaultDir(fs)); + BlobClient blobClient = containerClient.getBlobClient(fileName); + AppendBlobClient blobClient2 = containerClient.getBlobClient(fileName + fs.getSeparator() + generateBlobName()) + .getAppendBlobClient(); + blobClient2.create(); + fs.provider().createDirectory(fs.getPath(fileName)); + + assertTrue(blobClient.exists()); // We will turn the directory from virtual to concrete + checkBlobIsDir(blobClient); + } + + @Test + public void createDirRoot() { + AzureFileSystem fs = createFS(config); + + assertThrows(IllegalArgumentException.class, () -> fs.provider().createDirectory(fs.getDefaultDirectory())); + } + + @Test + public void createDirNoParent() { + AzureFileSystem fs = createFS(config); + + // Parent doesn't exists. + assertThrows(IOException.class, () -> fs.provider() + .createDirectory(fs.getPath(generateBlobName() + fs.getSeparator() + generateBlobName()))); + } + + @Test + public void createDirInvalidRoot() { + AzureFileSystem fs = createFS(config); + + assertThrows(IOException.class, + () -> fs.provider().createDirectory(fs.getPath("fakeRoot:" + fs.getSeparator() + generateBlobName()))); + } + + @Test + public void createDirAttributes() throws NoSuchAlgorithmException, IOException { + AzureFileSystem fs = createFS(config); + String fileName = generateBlobName(); + AppendBlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName) + .getAppendBlobClient(); + byte[] contentMd5 = MessageDigest.getInstance("MD5").digest(new byte[0]); + FileAttribute[] attributes = new FileAttribute[]{ + new TestFileAttribute<>("fizz", "buzz"), + new TestFileAttribute<>("foo", "bar"), + new TestFileAttribute<>("Content-Type", "myType"), + new TestFileAttribute<>("Content-Disposition", "myDisposition"), + new TestFileAttribute<>("Content-Language", "myLanguage"), + new TestFileAttribute<>("Content-Encoding", "myEncoding"), + new TestFileAttribute<>("Cache-Control", "myControl"), + new TestFileAttribute<>("Content-MD5", contentMd5) + }; + + fs.provider().createDirectory(fs.getPath(fileName), attributes); + BlobProperties props = blobClient.getProperties(); + + assertEquals("buzz", props.getMetadata().get("fizz")); + assertEquals("bar", props.getMetadata().get("foo")); + assertFalse(props.getMetadata().containsKey("Content-Type")); + assertFalse(props.getMetadata().containsKey("Content-Disposition")); + assertFalse(props.getMetadata().containsKey("Content-Language")); + assertFalse(props.getMetadata().containsKey("Content-Encoding")); + assertFalse(props.getMetadata().containsKey("Content-MD5")); + assertFalse(props.getMetadata().containsKey("Cache-Control")); + assertEquals("myType", props.getContentType()); + assertEquals("myDisposition", props.getContentDisposition()); + assertEquals("myLanguage", props.getContentLanguage()); + assertEquals("myEncoding", props.getContentEncoding()); + assertArraysEqual(contentMd5, props.getContentMd5()); + assertEquals("myControl", props.getCacheControl()); + } + + @Test + public void createDirFSClosed() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().createDirectory(path)); + } + + @ParameterizedTest + @CsvSource(value = {"false,false,false", "true,true,false", "true,false,true", "true,false,false"}) + public void copySource(boolean sourceIsDir, boolean sourceIsVirtual, boolean sourceEmpty) throws IOException { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + // Generate resource names. + // Don't use default directory to ensure we honor the root. + AppendBlobClient sourceChildClient = null; + AppendBlobClient destChildClient = null; + + // Create resources as necessary + if (sourceIsDir) { + if (!sourceIsVirtual) { + fs.provider().createDirectory(sourcePath); + } + if (!sourceEmpty) { + String sourceChildName = generateBlobName(); + sourceChildClient = ((AzurePath) sourcePath.resolve(sourceChildName)).toBlobClient() + .getAppendBlobClient(); + sourceChildClient.create(); + destChildClient = ((AzurePath) destPath.resolve(sourceChildName)).toBlobClient() + .getAppendBlobClient(); + } + } else { // source is file + sourceClient.upload(DATA.getDefaultBinaryData()); + } + + fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES); + + // Check the source still exists. + if (!sourceIsVirtual) { + assertTrue(sourceClient.exists()); + } else { + assertTrue(new AzureResource(sourcePath).checkDirectoryExists()); + } + + // If the source was a file, check that the destination data matches the source. + if (!sourceIsDir) { + ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + destinationClient.download(outStream); + assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); + } else { + // Check that the destination directory is concrete. + assertTrue(destinationClient.exists()); + checkBlobIsDir(destinationClient); + if (!sourceEmpty) { + // Check that source child still exists and was not copied to the destination. + assertTrue(sourceChildClient.exists()); + assertFalse(destChildClient.exists()); + } + } + } + + @ParameterizedTest + @CsvSource(value = {"false,false", "true,false", "true,true"}) + public void copyDestination(boolean destinationExists, boolean destinationIsDir) throws IOException { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + // Create resources as necessary + sourceClient.upload(DATA.getDefaultBinaryData()); + if (destinationExists) { + if (destinationIsDir) { + fs.provider().createDirectory(destPath); + } else { // source is file + destinationClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); + } + } + fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES, + StandardCopyOption.REPLACE_EXISTING); + + assertTrue(sourceClient.exists()); + ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + destinationClient.download(outStream); + assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void copyNonEmptyDest(boolean destinationIsVirtual) throws IOException { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + // Create resources as necessary + sourceClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); + if (!destinationIsVirtual) { + fs.provider().createDirectory(destPath); + } + BlobClient destChildClient = ((AzurePath) destPath.resolve(generateBlobName())).toBlobClient(); + destChildClient.upload(DATA.getDefaultBinaryData()); + + // Ensure that even when trying to replace_existing, we still fail. + assertThrows(DirectoryNotEmptyException.class, () -> fs.provider().copy(sourcePath, destPath, + StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING)); + assertTrue(new AzureResource(destPath).checkDirectoryExists()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void copyReplaceExistingFail(boolean destinationIsDir) throws IOException { + // The success case is tested by the "copy destination" test. + // Testing replacing a virtual directory is in the "non empty dest" test as there can be no empty virtual dir. + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + // Create resources as necessary + sourceClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); + if (destinationIsDir) { + fs.provider().createDirectory(destPath); + } else { + destinationClient.upload(DATA.getDefaultBinaryData()); + } + + assertThrows(FileAlreadyExistsException.class, + () -> fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES)); + if (destinationIsDir) { + assertTrue(new AzureResource(destPath).checkDirectoryExists()); + } else { + ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + destinationClient.download(outStream); + assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); + } + } + + @Test + public void copyOptionsFail() { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + assertThrows(UnsupportedOperationException.class, () -> fs.provider().copy(sourcePath, destPath)); + assertThrows(UnsupportedOperationException.class, () -> fs.provider().copy(sourcePath, destPath, + StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.ATOMIC_MOVE)); + } + + @ParameterizedTest + @CsvSource(value = {"1,1", "1,2", "1,3", "2,1", "2,2", "2,3", "3,1", "3,2", "3,3"}) + public void copyDepth(int sourceDepth, int destDepth) throws IOException { + AzureFileSystem fs = createFS(config); + + // Generate resource names. + // Don't use default directory to ensure we honor the root. + String rootName = getNonDefaultRootDir(fs); + AzurePath sourcePath = (AzurePath) fs.getPath(rootName, getPathWithDepth(sourceDepth), generateBlobName()); + + String destParent = getPathWithDepth(destDepth); + AzurePath destPath = (AzurePath) fs.getPath(rootName, destParent, generateBlobName()); + + // Generate clients to resources. + BlobClient sourceClient = sourcePath.toBlobClient(); + BlobClient destinationClient = destPath.toBlobClient(); + BlobClient destParentClient = ((AzurePath) destPath.getParent()).toBlobClient(); + + // Create resources as necessary + sourceClient.upload(DATA.getDefaultBinaryData()); + putDirectoryBlob(destParentClient.getBlockBlobClient()); + + fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES); + + ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + destinationClient.download(outStream); + assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); + } + + @Test + public void copyNoParentForDest() throws IOException { + AzureFileSystem fs = createFS(config); + // Generate resource names. + // Don't use default directory to ensure we honor the root. + String rootName = getNonDefaultRootDir(fs); + AzurePath sourcePath = (AzurePath) fs.getPath(rootName, generateBlobName()); + AzurePath destPath = (AzurePath) fs.getPath(rootName, generateBlobName(), generateBlobName()); + + // Generate clients to resources. + BlobClient sourceClient = sourcePath.toBlobClient(); + BlobClient destinationClient = destPath.toBlobClient(); + + // Create resources as necessary + sourceClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); + + assertThrows(IOException.class, () -> fs.provider().copy(sourcePath, destPath, + StandardCopyOption.COPY_ATTRIBUTES)); + assertFalse(destinationClient.exists()); + } + + @Test + public void copySourceDoesNotExist() { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + assertThrows(IOException.class, () -> fs.provider().copy(sourcePath, destPath, + StandardCopyOption.COPY_ATTRIBUTES)); + } + + @Test + public void copyNoRootDir() { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + // Source root + assertThrows(IllegalArgumentException.class, () -> fs.provider().copy(sourcePath.getRoot(), destPath, + StandardCopyOption.COPY_ATTRIBUTES)); + + // Dest root + assertThrows(IllegalArgumentException.class, () -> fs.provider().copy(sourcePath, destPath.getRoot(), + StandardCopyOption.COPY_ATTRIBUTES)); + } + + @Test + public void copySameFileNoop() { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + + // Even when the source does not exist or COPY_ATTRIBUTES is not specified, this will succeed as no-op + assertDoesNotThrow(() -> fs.provider().copy(sourcePath, sourcePath)); + } + + @Test + public void copyAcrossContainers() throws IOException { + AzureFileSystem fs = createFS(config); + + // Generate resource names. + AzurePath sourcePath = (AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName()); + AzurePath destPath = (AzurePath) fs.getPath(getDefaultDir(fs), generateBlobName()); + + // Generate clients to resources. + BlobClient sourceClient = sourcePath.toBlobClient(); + BlobClient destinationClient = destPath.toBlobClient(); + + // Create resources as necessary + sourceClient.upload(DATA.getDefaultBinaryData()); + fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES); + + assertTrue(sourceClient.exists()); + assertTrue(destinationClient.exists()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void copyClosedFS(boolean sourceClosed) throws IOException { + AzureFileSystem fs = createFS(config); + basicSetupForCopyTest(fs); + AzureFileSystem fsDest = createFS(config); + Path destPath = fsDest.getPath(getDefaultDir(fsDest), generateBlobName()); + sourceClient.upload(DATA.getDefaultBinaryData()); + + if (sourceClosed) { + fs.close(); + } else { + fsDest.close(); + } + + assertThrows(ClosedFileSystemException.class, + () -> fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES)); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void delete(boolean isDir) throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); + + if (isDir) { + putDirectoryBlob(blobClient); + } else { + blobClient.upload(DATA.getDefaultBinaryData()); + } + + fs.provider().delete(path); + + assertFalse(blobClient.exists()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void deleteNonEmptyDir(boolean virtual) throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); + BlobClient childClient = ((AzurePath) path.resolve(generateBlobName())).toBlobClient(); + + childClient.upload(DATA.getDefaultBinaryData()); + if (!virtual) { + putDirectoryBlob(blobClient); + } + + assertThrows(DirectoryNotEmptyException.class, () -> fs.provider().delete(path)); + assertTrue(new AzureResource(path).checkDirectoryExists()); + } + + @Test + public void deleteNoTarget() { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + + assertThrows(NoSuchFileException.class, () -> fs.provider().delete(path)); + } + + @Test + public void deleteDefaultDir() throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(generateBlobName())); + BlobClient client = path.toBlobClient(); + + client.upload(DATA.getDefaultBinaryData()); + fs.provider().delete(path); + + assertFalse(client.exists()); + } + + @Test + public void deleteClosedFS() throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); + putDirectoryBlob(blobClient); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().delete(path)); + } + + @Test + public void directoryStream() throws IOException { + AzureFileSystem fs = createFS(config); + AzureResource resource = new AzureResource(fs.getPath("a" + generateBlobName())); + resource.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + resource = new AzureResource(fs.getPath("b" + generateBlobName())); + resource.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); + + Iterator iterator = fs.provider().newDirectoryStream(fs.getPath(getDefaultDir(fs)), + path -> path.getFileName().toString().startsWith("a")).iterator(); + + assertTrue(iterator.hasNext()); + assertTrue(iterator.next().getFileName().toString().startsWith("a")); + assertFalse(iterator.hasNext()); + } + + @Test + public void directoryStreamInvalidRoot() { + AzureFileSystem fs = createFS(config); + + assertThrows(IOException.class, () -> fs.provider().newDirectoryStream(fs.getPath("fakeRoot:"), path -> true)); + } + + @Test + public void directoryStreamClosedFS() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(getDefaultDir(fs)); + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().newDirectoryStream(path, null)); + } + + @Test + public void inputStreamDefault() throws IOException { + AzureFileSystem fs = createFS(config); + sourcePath = (AzurePath) fs.getPath(generateBlobName()); + sourceClient = sourcePath.toBlobClient(); + sourceClient.upload(DATA.getDefaultBinaryData()); + + compareInputStreams(fs.provider().newInputStream(sourcePath), DATA.getDefaultInputStream(), + DATA.getDefaultDataSize()); + } + + @ParameterizedTest + @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "CREATE", "CREATE_NEW", "DELETE_ON_CLOSE", + "DSYNC", "SPARSE", "SYNC", "TRUNCATE_EXISTING", "WRITE"}) + public void inputStreamOptionsFail(StandardOpenOption option) { + AzureFileSystem fs = createFS(config); + + // Options are validated before path is validated. + assertThrows(UnsupportedOperationException.class, + () -> fs.provider().newInputStream(fs.getPath("foo"), option)); + } + + @Test + public void inputStreamNonFileFailRoot() { + AzureFileSystem fs = createFS(config); + + assertThrows(IllegalArgumentException.class, () -> fs.provider().newInputStream(fs.getPath(getDefaultDir(fs)))); + } + + @Test + public void inputStreamNonFileFailDir() { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + putDirectoryBlob(bc); + + assertThrows(IOException.class, () -> fs.provider().newInputStream(fs.getPath(bc.getBlobName()))); + } + + @Test + public void inputStreamNonFileFailNoFile() { + AzureFileSystem fs = createFS(config); + + assertThrows(IOException.class, () -> fs.provider().newInputStream(fs.getPath("foo"))); + } + + @Test + public void inputStreamFSClosed() throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); + blobClient.upload(DATA.getDefaultBinaryData()); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().newInputStream(path)); + } + + @Test + public void outputStreamOptionsDefault() throws IOException { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + OutputStream nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName())); + + // Defaults should allow us to create a new file. + nioStream.write(DATA.getDefaultBytes()); + nioStream.close(); + + compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); + + // Defaults should allow us to open to an existing file and overwrite the destination. + byte[] randomData = getRandomByteArray(100); + nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName())); + nioStream.write(randomData); + nioStream.close(); + + compareInputStreams(bc.openInputStream(), new ByteArrayInputStream(randomData), 100); + } + + @Test + public void outputStreamOptionsCreate() { + // Create works both on creating new and opening existing. We test these scenarios above. + // Here we assert that we cannot create without this option (i.e. you are only allowed to overwrite, not create) + AzureFileSystem fs = createFS(config); + + // Explicitly exclude a create option. + assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), + StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); + } + + @Test + public void outputStreamOptionsCreateNew() throws IOException { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + + // Succeed in creating a new file + OutputStream nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName()), + StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); + nioStream.write(DATA.getDefaultBytes()); + nioStream.close(); + + compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); + + // Fail in overwriting an existing + assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(bc.getBlobName()), + StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); + } + + @Test + public void outputStreamOptionsMissingRequired() { + AzureFileSystem fs = createFS(config); + + // Missing WRITE + assertThrows(IllegalArgumentException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), + StandardOpenOption.TRUNCATE_EXISTING)); + + // Missing TRUNCATE_EXISTING and CREATE_NEW + assertThrows(IllegalArgumentException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), + StandardOpenOption.WRITE)); + + // Missing only TRUNCATE_EXISTING + assertDoesNotThrow(() -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), StandardOpenOption.WRITE, + StandardOpenOption.CREATE_NEW)); + + // Missing only CREATE_NEW + assertDoesNotThrow(() -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE)); + } + + @ParameterizedTest + @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "DELETE_ON_CLOSE", "DSYNC", "READ", "SPARSE", + "SYNC"}) + public void outputStreamOptionsInvalid(StandardOpenOption option) { + AzureFileSystem fs = createFS(config); + + assertThrows(UnsupportedOperationException.class, () -> fs.provider().newOutputStream( + fs.getPath(generateBlobName()), option, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); + } + + @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") + @ParameterizedTest + @CsvSource(value = {"60,0", "150,3"}) + public void outputStreamFileSystemConfig(int dataSize, int blockCount) throws IOException { + config.put(AzureFileSystem.AZURE_STORAGE_UPLOAD_BLOCK_SIZE, 50L); + config.put(AzureFileSystem.AZURE_STORAGE_PUT_BLOB_THRESHOLD, 100L); + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + OutputStream nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName())); + byte[] data = getRandomByteArray(dataSize); + + nioStream.write(data); + nioStream.close(); + + assertEquals(blockCount, bc.listBlocks(BlockListType.COMMITTED).getCommittedBlocks().size()); + } + + @Test + public void outputSteamOpenDirectoryFail() { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + putDirectoryBlob(bc); + + assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(bc.getBlobName()))); + } + + @Test + public void outputStreamClosedFS() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().newOutputStream(path)); + } + + @Test + public void byteChannelDefault() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + Files.createFile(path); + + SeekableByteChannel channel = fs.provider().newByteChannel(path, null); + + // This indicates the channel is open in read mode, which is the default + assertDoesNotThrow(() -> channel.read(ByteBuffer.allocate(1))); + } + + @ParameterizedTest + @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "DELETE_ON_CLOSE", "DSYNC", "SPARSE", "SYNC"}) + public void byteChannelOptionsFail(StandardOpenOption option) { + AzureFileSystem fs = createFS(config); + + // Options are validated before path is validated. + assertThrows(UnsupportedOperationException.class, + () -> fs.provider().newByteChannel(fs.getPath("foo"), new HashSet<>(Arrays.asList(option)))); + } + + @Test + public void byteChannelReadNonFileFailRoot() { + AzureFileSystem fs = createFS(config); + + assertThrows(IllegalArgumentException.class, + () -> fs.provider().newByteChannel(fs.getPath(getDefaultDir(fs)), null)); + } + + @Test + public void byteChannelReadFileFailDir() { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + putDirectoryBlob(bc); + + assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), null)); + } + + @Test + public void byteChannelReadNonFileFailNoFile() { + AzureFileSystem fs = createFS(config); + + assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath("foo"), null)); + } + + @Test + public void byteChannelFSClosed() throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + path.toBlobClient().getBlockBlobClient().upload(DATA.getDefaultBinaryData()); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().newByteChannel(path, null)); + } + + @Test + public void byteChannelOptionsCreate() throws IOException { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + + // There are no default options for write as read is the default for channel. We must specify all required. + SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING))); + + // Create should allow us to create a new file. + nioChannel.write(DATA.getDefaultData().duplicate()); + nioChannel.close(); + + compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); + + // Explicitly exclude a create option. + assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), + StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); + } + + @Test + public void byteChannelOptionsCreateNew() throws IOException { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + + // Succeed in creating a new file + SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING))); + nioChannel.write(DATA.getDefaultData().duplicate()); + nioChannel.close(); + + compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); + + // Fail in overwriting an existing file + assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING)))); + } + + @Test + public void byteChannelFileAttributes() throws NoSuchAlgorithmException, IOException { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + byte[] contentMd5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes()); + FileAttribute[] attributes = new FileAttribute[]{ + new TestFileAttribute<>("fizz", "buzz"), + new TestFileAttribute<>("foo", "bar"), + new TestFileAttribute<>("Content-Type", "myType"), + new TestFileAttribute<>("Content-Disposition", "myDisposition"), + new TestFileAttribute<>("Content-Language", "myLanguage"), + new TestFileAttribute<>("Content-Encoding", "myEncoding"), + new TestFileAttribute<>("Cache-Control", "myControl"), + new TestFileAttribute<>("Content-MD5", contentMd5) + }; + + SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, + StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)), attributes); + nioChannel.write(DATA.getDefaultData().duplicate()); + nioChannel.close(); + BlobProperties props = bc.getProperties(); + + compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); + assertEquals("buzz", props.getMetadata().get("fizz")); + assertEquals("bar", props.getMetadata().get("foo")); + assertFalse(props.getMetadata().containsKey("Content-Type")); + assertFalse(props.getMetadata().containsKey("Content-Disposition")); + assertFalse(props.getMetadata().containsKey("Content-Language")); + assertFalse(props.getMetadata().containsKey("Content-Encoding")); + assertFalse(props.getMetadata().containsKey("Content-MD5")); + assertFalse(props.getMetadata().containsKey("Cache-Control")); + assertEquals("myType", props.getContentType()); + assertEquals("myDisposition", props.getContentDisposition()); + assertEquals("myLanguage", props.getContentLanguage()); + assertEquals("myEncoding", props.getContentEncoding()); + assertArraysEqual(contentMd5, props.getContentMd5()); + assertEquals("myControl", props.getCacheControl()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void byteChannelFileAttrNullEmpty(boolean isNull) throws IOException { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + ByteBuffer data = DATA.getDefaultData().duplicate(); + + SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING)), isNull ? null : new FileAttribute[0]); + assertDoesNotThrow(() -> nioChannel.write(data)); + assertDoesNotThrow(nioChannel::close); + } + + @Test + public void byteChannelWriteOptionsMissingRequired() { + AzureFileSystem fs = createFS(config); + + // Missing WRITE + assertThrows(UnsupportedOperationException.class, () -> fs.provider().newByteChannel( + fs.getPath(generateBlobName()), new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, + StandardOpenOption.TRUNCATE_EXISTING)))); + + // Missing TRUNCATE_EXISTING and CREATE_NEW + assertThrows(IllegalArgumentException.class, () -> fs.provider().newByteChannel(fs.getPath(generateBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE)))); + + // Missing TRUNCATE_EXISTING + assertDoesNotThrow(() -> fs.provider().newByteChannel(fs.getPath(generateBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)))); + + assertDoesNotThrow(() -> fs.provider().newByteChannel(fs.getPath(generateBlobName()), new HashSet<>( + Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE)))); + } + + @ParameterizedTest + @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "DELETE_ON_CLOSE", "DSYNC", "READ", "SPARSE", + "SYNC"}) + public void byteChannelOptionsInvalid(StandardOpenOption option) { + AzureFileSystem fs = createFS(config); + + assertThrows(UnsupportedOperationException.class, () -> fs.provider().newOutputStream( + fs.getPath(generateBlobName()), option, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); + } + + @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") + @ParameterizedTest + @CsvSource(value = {"60,0", "150,3"}) + public void byteChannelFileSystemConfig(int dataSize, int blockCount) throws IOException { + config.put(AzureFileSystem.AZURE_STORAGE_UPLOAD_BLOCK_SIZE, 50L); + config.put(AzureFileSystem.AZURE_STORAGE_PUT_BLOB_THRESHOLD, 100L); + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING))); + + nioChannel.write(getRandomData(dataSize)); + nioChannel.close(); + + assertEquals(blockCount, bc.listBlocks(BlockListType.COMMITTED).getCommittedBlocks().size()); + } + + @Test + public void byteChannelOpenDirectoryFail() { + AzureFileSystem fs = createFS(config); + BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) + .getBlockBlobClient(); + putDirectoryBlob(bc); + + assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), + new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)))); + } + + @Test + public void byteChannelClosedFS() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().newByteChannel(path, null)); + } + + @Test + public void checkAccess() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.provider().newOutputStream(path).close(); + + assertDoesNotThrow(() -> fs.provider().checkAccess(path)); + } + + @Test + public void checkAccessRoot() { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(getDefaultDir(fs)); + + assertDoesNotThrow(() -> fs.provider().checkAccess(path)); + } + + @ParameterizedTest + @EnumSource(value = AccessMode.class, names = {"READ", "WRITE", "EXECUTE"}) + public void checkAccessAccessDenied(AccessMode mode) throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.provider().newOutputStream(path).close(); + + assertThrows(AccessDeniedException.class, () -> fs.provider().checkAccess(path, mode)); + } + + @Test + public void checkAccessIOException() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.provider().newOutputStream(path).close(); + fs.close(); + + config = initializeConfigMap(new CheckAccessIoExceptionPolicy()); + fs = createFS(config); + path = fs.getPath(path.toString()); + + AzureFileSystem finalFs = fs; + Path finalPath = path; + IOException e = assertThrows(IOException.class, () -> finalFs.provider().checkAccess(finalPath)); + assertFalse(e instanceof NoSuchFileException); + } + + class CheckAccessIoExceptionPolicy implements HttpPipelinePolicy { + @Override + public Mono process(HttpPipelineCallContext httpPipelineCallContext, + HttpPipelineNextPolicy httpPipelineNextPolicy) { + HttpRequest request = httpPipelineCallContext.getHttpRequest(); + // GetProperties call to blob + if (request.getUrl().getPath().split("/").length == 3 && request.getHttpMethod() == (HttpMethod.HEAD)) { + return Mono.just(new MockHttpResponse(request, 403, new HttpHeaders() + .set("x-ms-error-code", BlobErrorCode.AUTHORIZATION_FAILURE.toString()))); + } else { + return httpPipelineNextPolicy.process(); + } + } + } + + @Test + public void checkAccessNoFile() { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + + assertThrows(NoSuchFileException.class, () -> fs.provider().checkAccess(path)); + } + + @Test + public void checkAccessFSClosed() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.provider().newOutputStream(path).close(); + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().checkAccess(path)); + } + + @ParameterizedTest + @ValueSource(classes = {BasicFileAttributeView.class, AzureBasicFileAttributeView.class, + AzureBlobFileAttributeView.class}) + public void getAttributeView(Class type) { + Class expected = type == AzureBlobFileAttributeView.class + ? AzureBlobFileAttributeView.class : BasicFileAttributeView.class; + AzureFileSystem fs = createFS(config); + + // No path validation is expected for getting the view + assertInstanceOf(expected, fs.provider().getFileAttributeView(fs.getPath("path"), type)); + } + + @Test + public void getAttributeViewFail() { + AzureFileSystem fs = createFS(config); + + // No path validation is expected for getting the view + assertNull(fs.provider().getFileAttributeView(fs.getPath("path"), DosFileAttributeView.class)); + } + + @ParameterizedTest + @ValueSource(classes = {BasicFileAttributes.class, AzureBasicFileAttributes.class, AzureBlobFileAttributes.class}) + public void readAttributes(Class type) throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.provider().newOutputStream(path).close(); + + Class expected = type.equals(AzureBlobFileAttributes.class) + ? AzureBlobFileAttributes.class : AzureBasicFileAttributes.class; + + assertInstanceOf(expected, fs.provider().readAttributes(path, type)); + } + + @Test + public void readAttributesDirectory() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); + + assertDoesNotThrow(() -> fs.provider().readAttributes(path, BasicFileAttributes.class)); + } + + @Test + public void readAttributesUnsupported() { + AzureFileSystem fs = createFS(config); + + assertThrows(UnsupportedOperationException.class, + () -> fs.provider().readAttributes(fs.getPath("path"), DosFileAttributes.class)); + } + + @Test + public void readAttributesIOException() { + AzureFileSystem fs = createFS(config); + + // Path doesn't exist. + assertThrows(IOException.class, + () -> fs.provider().readAttributes(fs.getPath("path"), BasicFileAttributes.class)); + } + + @Test + public void readAttributesFSClosed() throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + path.toBlobClient().getBlockBlobClient().upload(DATA.getDefaultBinaryData()); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, + () -> fs.provider().readAttributes(path, AzureBasicFileAttributes.class)); + } + + @ParameterizedTest + @MethodSource("readAttributesStrParsingSupplier") + public void readAttributesStrParsing(String attrStr, List attrList) throws IOException { + // This test checks that we correctly parse the attribute string and that all the requested attributes are + // represented in the return value. We can also just test a subset of attributes for parsing logic. + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.provider().newOutputStream(path).close(); + + Map result = fs.provider().readAttributes(path, attrStr); + for (String attr : attrList) { + assertTrue(result.containsKey(attr)); + } + assertEquals(attrList.size(), result.keySet().size()); + } + + private static Stream readAttributesStrParsingSupplier() { + List basic = Arrays.asList("lastModifiedTime", "creationTime", "isRegularFile", "isDirectory", + "isVirtualDirectory", "isSymbolicLink", "isOther", "size"); + return Stream.of( + Arguments.of("*", basic), + Arguments.of("basic:*", basic), + Arguments.of("azureBasic:*", basic), + Arguments.of("azureBlob:*", Arrays.asList("lastModifiedTime", "creationTime", "eTag", "blobHttpHeaders", + "blobType", "copyId", "copyStatus", "copySource", "copyProgress", "copyCompletionTime", + "copyStatusDescription", "isServerEncrypted", "accessTier", "isAccessTierInferred", "archiveStatus", + "accessTierChangeTime", "metadata", "isRegularFile", "isDirectory", "isVirtualDirectory", + "isSymbolicLink", "isOther", "size")), + Arguments.of("lastModifiedTime,creationTime", Arrays.asList("lastModifiedTime", "creationTime")), + Arguments.of("basic:isRegularFile,isDirectory,isVirtualDirectory", + Arrays.asList("isRegularFile", "isDirectory", "isVirtualDirectory")), + Arguments.of("azureBasic:size", Collections.singletonList("size")), + Arguments.of("azureBlob:eTag,blobHttpHeaders,blobType,copyId", + Arrays.asList("eTag", "blobHttpHeaders", "blobType", "copyId")) + ); + } + + @Test + public void readAttributesStrDirectory() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); + + assertDoesNotThrow(() -> fs.provider().readAttributes(path, "creationTime")); + } + + @ParameterizedTest + @ValueSource(strings = {"azureBlob:size:foo", "", "azureBasic:foo"}) + public void readAttributesStrIA(String attrStr) { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + + assertThrows(IllegalArgumentException.class, () -> fs.provider().readAttributes(path, attrStr)); + } + + @Test + public void readAttributesStrInvalidView() { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + + assertThrows(UnsupportedOperationException.class, () -> fs.provider().readAttributes(path, "foo:size")); + } + + @Test + public void readAttributesStrIOException() { + AzureFileSystem fs = createFS(config); + + // Path doesn't exist + assertThrows(IOException.class, () -> fs.provider().readAttributes(fs.getPath("path"), "basic:creationTime")); + } + + @Test + public void readAtrributesStrClosedFS() throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); + blobClient.upload(DATA.getDefaultBinaryData()); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> fs.provider().readAttributes(path, "basic:*")); + } + + @ParameterizedTest + @MethodSource("setAttributesHeadersSupplier") + public void setAttributesHeaders(String cacheControl, String contentDisposition, String contentEncoding, + String contentLanguage, byte[] contentMD5, String contentType) throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + fs.provider().newOutputStream(path).close(); + BlobHttpHeaders headers = new BlobHttpHeaders().setCacheControl(cacheControl) + .setContentDisposition(contentDisposition) + .setContentEncoding(contentEncoding) + .setContentLanguage(contentLanguage) + .setContentMd5(contentMD5) + .setContentType(contentType); + + fs.provider().setAttribute(path, "azureBlob:blobHttpHeaders", headers); + headers = fs.provider().readAttributes(path, AzureBlobFileAttributes.class).blobHttpHeaders(); + + assertEquals(cacheControl, headers.getCacheControl()); + assertEquals(contentDisposition, headers.getContentDisposition()); + assertEquals(contentEncoding, headers.getContentEncoding()); + assertEquals(contentLanguage, headers.getContentLanguage()); + assertArraysEqual(contentMD5, headers.getContentMd5()); + assertEquals(contentType, headers.getContentType()); + } + + private static Stream setAttributesHeadersSupplier() throws NoSuchAlgorithmException { + return Stream.of( + Arguments.of(null, null, null, null, null, null), + Arguments.of("control", "disposition", "encoding", "language", + Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type") + ); + } + + @ParameterizedTest + @CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200", "i0,a,i_,a,200"}, nullValues = "null") + public void setAttributesMetadata(String key1, String value1, String key2, String value2) throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + OutputStream os = fs.provider().newOutputStream(path); + os.close(); + + Map metadata = new HashMap<>(); + if (key1 != null && value1 != null) { + metadata.put(key1, value1); + } + if (key2 != null && value2 != null) { + metadata.put(key2, value2); + } + + fs.provider().setAttribute(path, "azureBlob:metadata", metadata); + + assertEquals(metadata, fs.provider().readAttributes(path, AzureBlobFileAttributes.class).metadata()); + } + + @ParameterizedTest + @MethodSource("setAttributesTierSupplier") + public void setAttributesTier(AccessTier tier) throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + OutputStream os = fs.provider().newOutputStream(path); + os.close(); + + fs.provider().setAttribute(path, "azureBlob:tier", tier); + + assertEquals(tier, fs.provider().readAttributes(path, AzureBlobFileAttributes.class).accessTier()); + } + + private static Stream setAttributesTierSupplier() { + return Stream.of(AccessTier.HOT, AccessTier.COOL); + } + + @Test + public void setAttributesDirectory() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); + + assertDoesNotThrow(() -> fs.provider().setAttribute(path, "azureBlob:tier", AccessTier.COOL)); + } + + @ParameterizedTest + @ValueSource(strings = { + "azureBlob:metadata:foo", // Invalid format + "", // empty + "azureBasic:foo" // Invalid property + }) + public void setAttribuesIA(String attrStr) { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + + assertThrows(IllegalArgumentException.class, () -> fs.provider().setAttribute(path, attrStr, "Foo")); + } + + @Test + public void setAttributesInvalidView() { + AzureFileSystem fs = createFS(config); + Path path = fs.getPath(generateBlobName()); + + assertThrows(UnsupportedOperationException.class, () -> fs.provider().setAttribute(path, "foo:size", "foo")); + } + + @Test + public void setAttributesIOException() { + AzureFileSystem fs = createFS(config); + + // Path does not exist + // Covers virtual directory, too + assertThrows(IOException.class, + () -> fs.provider().setAttribute(fs.getPath("path"), "azureBlob:metadata", Collections.emptyMap())); + } + + @Test + public void setAttributesFSClosed() throws IOException { + AzureFileSystem fs = createFS(config); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); + BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); + blobClient.upload(DATA.getDefaultBinaryData()); + + fs.close(); + + assertThrows(ClosedFileSystemException.class, + () -> fs.provider().setAttribute(path, "azureBlob:blobHttpHeaders", new BlobHttpHeaders())); + } + + private void basicSetupForCopyTest(FileSystem fs) { + // Generate resource names. + // Don't use default directory to ensure we honor the root. + String rootName = getNonDefaultRootDir(fs); + sourcePath = (AzurePath) fs.getPath(rootName, generateBlobName()); + destPath = (AzurePath) fs.getPath(rootName, generateBlobName()); + + // Generate clients to resources. + try { + sourceClient = sourcePath.toBlobClient(); + destinationClient = destPath.toBlobClient(); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java new file mode 100644 index 00000000000..0a1048a7247 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.credential.AzureSasCredential; +import com.azure.core.util.CoreUtils; +import com.azure.storage.common.sas.AccountSasPermission; +import com.azure.storage.common.sas.AccountSasResourceType; +import com.azure.storage.common.sas.AccountSasService; +import com.azure.storage.common.sas.AccountSasSignatureValues; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.net.URI; +import java.nio.file.FileSystem; +import java.nio.file.FileSystemNotFoundException; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class AzureFileSystemTests extends BlobNioTestBase { + private Map config; + + @Override + protected void beforeTest() { + super.beforeTest(); + config = initializeConfigMap(); + } + + // We do not have a meaningful way of testing the configurations for the ServiceClient. + @ParameterizedTest + @CsvSource(value = {"1,false,false", "3,false,true", "3,true,false", "3,true,true"}) + public void create(int numContainers, boolean createContainers, boolean sasToken) throws IOException { + List containerNames = IntStream.range(0, numContainers) + .mapToObj(i -> generateContainerName()) + .collect(Collectors.toList()); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, CoreUtils.stringJoin(",", containerNames)); + if (!sasToken) { + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); + } else { + config.put(AzureFileSystem.AZURE_STORAGE_SAS_TOKEN_CREDENTIAL, new AzureSasCredential( + primaryBlobServiceClient.generateAccountSas( + new AccountSasSignatureValues(testResourceNamer.now().plusDays(2), + AccountSasPermission.parse("rwcdl"), new AccountSasService().setBlobAccess(true), + new AccountSasResourceType().setContainer(true))))); + } + + AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), + ENV.getPrimaryAccount().getBlobEndpoint(), config); + + + List actualContainerNames = new ArrayList<>(); + fileSystem.getFileStores().forEach(fs -> actualContainerNames.add(fs.name())); + + assertEquals(containerNames.size(), actualContainerNames.size()); + for (String containerName : containerNames) { + assertTrue(actualContainerNames.contains(containerName)); + assertTrue(primaryBlobServiceClient.getBlobContainerClient(containerName).exists()); + } + assertEquals(primaryBlobServiceAsyncClient.getAccountUrl(), fileSystem.getFileSystemUrl()); + } + + @ParameterizedTest + @CsvSource(value = {"true,false", "false,true"}) + public void createFailIa(boolean credential, boolean containers) { + if (containers) { + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); + } + if (credential) { + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getKey()); + } + + assertThrows(IllegalArgumentException.class, + () -> new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getName(), config)); + } + + @Test + public void createFailContainerCheck() { + config.put(AzureFileSystem.AZURE_STORAGE_SAS_TOKEN_CREDENTIAL, new AzureSasCredential( + primaryBlobServiceClient.generateAccountSas( + new AccountSasSignatureValues(testResourceNamer.now().plusDays(2), + AccountSasPermission.parse("d"), new AccountSasService().setBlobAccess(true), + new AccountSasResourceType().setContainer(true))))); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); + + assertThrows(IOException.class, () -> new AzureFileSystem(new AzureFileSystemProvider(), + ENV.getPrimaryAccount().getBlobEndpoint(), config)); + } + + @Test + public void createSkipContainerCheck() { + config.put(AzureFileSystem.AZURE_STORAGE_SAS_TOKEN_CREDENTIAL, new AzureSasCredential( + primaryBlobServiceClient.generateAccountSas( + new AccountSasSignatureValues(testResourceNamer.now().plusDays(2), + AccountSasPermission.parse("d"), new AccountSasService().setBlobAccess(true), + new AccountSasResourceType().setContainer(true))))); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); + config.put(AzureFileSystem.AZURE_STORAGE_SKIP_INITIAL_CONTAINER_CHECK, true); + + // This would fail, but we skipped the check + assertDoesNotThrow(() -> + new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getBlobEndpoint(), config)); + } + + @Test + public void close() throws IOException { + AzureFileSystemProvider provider = new AzureFileSystemProvider(); + URI uri = getFileSystemUri(); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); + FileSystem fileSystem = provider.newFileSystem(uri, config); + fileSystem.close(); + + assertFalse(fileSystem.isOpen()); + assertThrows(FileSystemNotFoundException.class, () -> provider.getFileSystem(uri)); + assertDoesNotThrow(fileSystem::close); // Closing twice should have no effect + + // Creating a file system with the same ID after the old one is closed should work. + assertDoesNotThrow(() -> provider.newFileSystem(uri, config)); + assertNotNull(provider.getFileSystem(uri)); + } + + @ParameterizedTest + @MethodSource("getPathSupplier") + public void getPath(String path0, List pathArr, String resultStr) { + String[] arr = pathArr == null ? null : Arrays.copyOf(pathArr.toArray(), pathArr.size(), String[].class); + + assertEquals(resultStr, createFS(config).getPath(path0, arr).toString()); + } + + private static Stream getPathSupplier() { + return Stream.of( + Arguments.of("foo", null, "foo"), + Arguments.of("foo/bar", null, "foo/bar"), + Arguments.of("/foo/", null, "foo"), + Arguments.of("/foo/bar/", null, "foo/bar"), + Arguments.of("foo", Collections.singletonList("bar"), "foo/bar"), + Arguments.of("foo/bar/fizz/buzz", null, "foo/bar/fizz/buzz"), + Arguments.of("foo", Arrays.asList("bar", "fizz", "buzz"), "foo/bar/fizz/buzz"), + Arguments.of("foo", Arrays.asList("bar/fizz", "buzz"), "foo/bar/fizz/buzz"), + Arguments.of("foo", Arrays.asList("bar", "fizz/buzz"), "foo/bar/fizz/buzz"), + Arguments.of("root:/foo", null, "root:/foo"), + Arguments.of("root:/foo", Collections.singletonList("bar"), "root:/foo/bar"), + Arguments.of("///root:////foo", Arrays.asList("//bar///fizz//", "buzz"), "root:/foo/bar/fizz/buzz"), + Arguments.of("root:/", null, "root:"), + Arguments.of("", null, "") + ); + } + + @ParameterizedTest + @ValueSource(strings = {"root1:/dir1:", "root1:/d:ir", ":root1:/dir", "root1::/dir", "root:1/dir", "root1/dir:", + "root1:/foo/bar/dir:"}) + public void getPathFail(String path) { + assertThrows(InvalidPathException.class, () -> createFS(config).getPath(path)); + } + + @Test + public void isReadOnlyGetSeparator() { + AzureFileSystem fs = createFS(config); + + assertFalse(fs.isReadOnly()); + assertEquals("/", fs.getSeparator()); + } + + @Test + public void getRootDirsGetFileStores() { + AzureFileSystem fs = createFS(config); + String[] containers = ((String) config.get(AzureFileSystem.AZURE_STORAGE_FILE_STORES)).split(","); + List fileStoreNames = new ArrayList<>(); + fs.getFileStores().forEach(store -> fileStoreNames.add(store.name())); + List rootDirectories = new ArrayList<>(); + fs.getRootDirectories().forEach(rootDirectories::add); + + assertEquals(containers.length, rootDirectories.size()); + assertEquals(containers.length, fileStoreNames.size()); + for (String container : containers) { + assertTrue(rootDirectories.contains(fs.getPath(container + ":"))); + assertTrue(fileStoreNames.contains(container)); + } + } + + @ParameterizedTest + @CsvSource(value = {"basic,true", "azureBasic,true", "azureBlob,true", "posix,false"}) + public void supportsFileAttributeView(String view, boolean supports) { + assertEquals(supports, createFS(config).supportedFileAttributeViews().contains(view)); + } + + @Test + public void getDefaultDirectory() { + AzureFileSystem fs = createFS(config); + + assertEquals( + ((String) config.get(AzureFileSystem.AZURE_STORAGE_FILE_STORES)).split(",")[0] + AzurePath.ROOT_DIR_SUFFIX, + fs.getDefaultDirectory().toString()); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java new file mode 100644 index 00000000000..d88f45ee36c --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java @@ -0,0 +1,285 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.storage.blob.BlobClient; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.parallel.ResourceLock; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.FileSystemNotFoundException; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.util.Iterator; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@ResourceLock("AzurePathTest") +public class AzurePathTests extends BlobNioTestBase { + private AzureFileSystem fs; + + // Just need one fs instance for creating the paths. + @Override + public void beforeTest() { + super.beforeTest(); + Map config = initializeConfigMap(); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "jtcazurepath1,jtcazurepath2"); + try { + fs = (AzureFileSystem) new AzureFileSystemProvider().newFileSystem( + new URI("azb://?endpoint=" + ENV.getPrimaryAccount().getBlobEndpoint()), config); + } catch (IOException | URISyntaxException e) { + throw new RuntimeException(e); + } + } + + @Test + public void getFileSystem() { + Path path = fs.getPath("Foo"); + + assertEquals(fs, path.getFileSystem()); + } + + @ParameterizedTest + @CsvSource(value = {"foo,false,null", "foo/bar,false,null", "jtcazurepath1/foo,false,null", + "jtcazurepath1:/foo,true,jtcazurepath1:/", "fakeroot:/foo,true,fakeroot:/", + "jtcazurepath2:/,true,jtcazurepath2:/", "jtcazurepath2:,true,jtcazurepath2:/", "'',false,null"}, + nullValues = "null") + public void isAbsoluteGetRoot(String path, boolean absolute, String root) { + assertEquals(absolute, fs.getPath(path).isAbsolute()); + assertEquals((root == null ? null : fs.getPath(root)), fs.getPath(path).getRoot()); + } + + @ParameterizedTest + @CsvSource(value = {"root:/,null,null,0", "root:/foo,foo,root:,1", "root:/foo/bar,bar,root:/foo,2", + "foo,foo,null,1", "foo/,foo,null,1", "/foo,foo,null,1", "foo/bar,bar,foo,2", "foo/bar/baz,baz,foo/bar,3", + "foo/../bar/baz,baz,foo/../bar/,4", "foo/..,..,foo/,2", "foo/./bar,bar,foo/./,3", "foo/bar/.,.,foo/bar/,3", + "'','',null,1"}, nullValues = "null") + public void getFileNameGetParentGetNameCount(String path, String fileName, String parent, int nameCount) { + assertEquals((fileName == null ? null : fs.getPath(fileName)), fs.getPath(path).getFileName()); + assertEquals((parent == null ? null : fs.getPath(parent)), fs.getPath(path).getParent()); + assertEquals(nameCount, fs.getPath(path).getNameCount()); + } + + @ParameterizedTest + @CsvSource(value = {"0,foo", "1,bar", "2,baz"}) + public void getName(int index, String name) { + assertEquals(fs.getPath("root:/foo/bar/baz").getName(index), fs.getPath(name)); + assertEquals(fs.getPath("foo/bar/baz").getName(index), fs.getPath(name)); + } + + @ParameterizedTest + @ValueSource(ints = {-1, 2}) + public void getNameFail(int index) { + assertThrows(IllegalArgumentException.class, () -> fs.getPath("foo/bar").getName(index)); + + // Special case with no name elements + assertThrows(IllegalArgumentException.class, () -> fs.getPath("root:/").getName(0)); + } + + @ParameterizedTest + @CsvSource(value = {"0,1,foo", "0,3,foo/bar/fizz", "0,5,foo/bar/fizz/buzz/dir", "1,2,bar", "1,4,bar/fizz/buzz", + "1,5,bar/fizz/buzz/dir", "4,5,dir"}) + public void subPath(int begin, int end, String resultPath) { + assertEquals(fs.getPath(resultPath), fs.getPath("root:/foo/bar/fizz/buzz/dir").subpath(begin, end)); + assertEquals(fs.getPath(resultPath), fs.getPath("foo/bar/fizz/buzz/dir").subpath(begin, end)); + } + + // The javadocs define an equivalence between these two methods in special cases. + @Test + public void subPathGetParent() { + Path path = fs.getPath("foo/bar/fizz/buzz"); + + assertEquals(path.getParent(), path.subpath(0, path.getNameCount() - 1)); + } + + @ParameterizedTest + @CsvSource(value = {"-1,1", "5,5", "3,3", "3,1", "3,6"}) + public void subPathFail(int begin, int end) { + assertThrows(IllegalArgumentException.class, () -> fs.getPath("foo/bar/fizz/buzz/dir").subpath(begin, end)); + } + + @ParameterizedTest + @CsvSource(value = {"root:/foo,foo,false", "foo,root:/foo,false", "foo,foo,true", "root:/foo,root:/foo,true", + "root2:/foo,root:/foo,false", "root:/foo,root2:/foo,false", "foo/bar,foo,true", "foo/bar,foo/bar,true", + "foo/bar/fizz,foo,true", "foo/bar/fizz,f,false", "foo/bar/fizz,foo/bar/f,false", "foo,foo/bar,false", + "'',foo,false", "foo,'',false"}) + public void startsWith(String path, String otherPath, boolean startsWith) { + assertEquals(startsWith, fs.getPath(path).startsWith(fs.getPath(otherPath))); + assertEquals(startsWith, fs.getPath(path).startsWith(otherPath)); + + // If the paths are not from the same file system, false is always returned + assertFalse(fs.getPath("foo/bar").startsWith(FileSystems.getDefault().getPath("foo/bar"))); + } + + @ParameterizedTest + @CsvSource(value = {"root:/foo,foo,true", "foo,root:/foo,false", "foo,foo,true", "root:/foo,root:/foo,true", + "root2:/foo,root:/foo,false", "root:/foo,root2:/foo,false", "foo/bar,bar,true", "foo/bar,foo/bar,true", + "foo/bar/fizz,fizz,true", "foo/bar/fizz,z,false", "foo/bar/fizz,r/fizz,false", "foo,foo/bar,false", + "'',foo,false", "foo,'',false"}) + public void endsWith(String path, String otherPath, boolean endsWith) { + assertEquals(endsWith, fs.getPath(path).endsWith(fs.getPath(otherPath))); + assertEquals(endsWith, fs.getPath(path).endsWith(otherPath)); + + // If the paths are not from the same file system, false is always returned + assertFalse(fs.getPath("foo/bar").endsWith(FileSystems.getDefault().getPath("foo/bar"))); + } + + @ParameterizedTest + @CsvSource(value = {"foo/bar,foo/bar", ".,''", "..,..", "foo/..,''", "foo/bar/..,foo", "foo/../bar,bar", + "foo/./bar,foo/bar", "foo/bar/.,foo/bar", "foo/bar/fizz/../..,foo", "foo/bar/../fizz/.,foo/fizz", + "foo/../..,..", "foo/../../bar,../bar", "root:/foo/bar,root:/foo/bar", "root:/.,root:/", "root:/..,root:/", + "root:/../../..,root:/", "root:/foo/..,root:", "'',''"}) + public void normalize(String path, String resultPath) { + assertEquals(fs.getPath(resultPath), fs.getPath(path).normalize()); + } + + @ParameterizedTest + @CsvSource(value = {"foo/bar,root:/fizz/buzz,root:/fizz/buzz", "root:/foo/bar,root:/fizz/buzz,root:/fizz/buzz", + "foo/bar,'',foo/bar", "foo/bar,fizz/buzz,foo/bar/fizz/buzz", + "foo/bar/..,../../fizz/buzz,foo/bar/../../../fizz/buzz", + "root:/../foo/./,fizz/../buzz,root:/../foo/./fizz/../buzz", "'',foo/bar,foo/bar"}) + public void resolve(String path, String other, String resultPath) { + assertEquals(fs.getPath(resultPath), fs.getPath(path).resolve(fs.getPath(other))); + assertEquals(fs.getPath(resultPath), fs.getPath(path).resolve(other)); + } + + @ParameterizedTest + @CsvSource(value = {"foo,fizz,fizz", "foo/bar,root:/fizz,root:/fizz", "foo/bar,'',foo", "foo,'',''", "'',foo,foo", + "foo/bar,fizz,foo/fizz", "foo/bar/fizz,buzz/dir,foo/bar/buzz/dir", "root:/foo/bar,fizz,root:/foo/fizz", + "root:/foo,fizz,root:/fizz", "root:/,fizz,fizz"}) + public void resolveSibling(String path, String other, String resultPath) { + assertEquals(fs.getPath(resultPath), fs.getPath(path).resolveSibling(fs.getPath(other))); + assertEquals(fs.getPath(resultPath), fs.getPath(path).resolveSibling(other)); + } + + @ParameterizedTest + @CsvSource(value = {"foo/bar,foo/bar/fizz/buzz/,fizz/buzz,true", "foo/bar,foo/bar,'',true", + "root:/foo/bar,root:/foo/bar/fizz,fizz,false", "foo/dir,foo/fizz/buzz,../fizz/buzz,true", + "foo/bar/a/b/c,foo/bar/fizz,../../../fizz,true", "a/b/c,foo/bar/fizz,../../../foo/bar/fizz,true", + "foo/../bar,bar/./fizz,fizz,false", "root:,root:/foo/bar,foo/bar,false", "'',foo,foo,true", "foo,'',..,true"}) + public void relativize(String path, String other, String result, boolean equivalence) { + Path p = fs.getPath(path); + Path otherP = fs.getPath(other); + + assertEquals(fs.getPath(result), p.relativize(otherP)); + if (equivalence) { // Only applies when neither path has a root and both are normalized. + assertEquals(otherP, p.relativize(p.resolve(otherP))); + } + } + + @ParameterizedTest + @CsvSource(value = {"root:/foo/bar,foo/bar/fizz/buzz", "foo/bar,root:/foo/bar/fizz"}) + public void relativizeFail(String path, String other) { + assertThrows(IllegalArgumentException.class, () -> fs.getPath(path).relativize(fs.getPath(other))); + } + + @ParameterizedTest + @CsvSource(value = {"root:/foo/bar,root:/foo/bar", "foo/bar,jtcazurepath1:/foo/bar", "'',jtcazurepath1:"}) + public void toUriToAbsolute(String path, String expected) { + assertEquals(expected, fs.getPath(path).toAbsolutePath().toString()); + assertEquals(fs.provider().getScheme() + ":/" + expected, fs.getPath(path).toUri().toString()); + } + + @ParameterizedTest + @ValueSource(strings = {"root:/foo/bar", "foo/bar/fizz/buzz", "foo", "root:/"}) + public void iterator(String path) { + Path p = fs.getPath(path); + Iterator it = p.iterator(); + int i = 0; + + Iterator emptyIt = fs.getPath("").iterator(); + + while (it.hasNext()) { + assertEquals(p.getName(i), it.next()); + i++; + } + + assertEquals("", emptyIt.next().toString()); + assertFalse(emptyIt.hasNext()); + } + + @ParameterizedTest + @CsvSource(value = {"a/b/c,a/b,false", "a/b/c,foo/bar,false", "foo/bar,foo/bar,true", "'',foo,false"}) + public void compareToEquals(String path1, String path2, boolean equals) { + assertEquals(path1.compareTo(path2), fs.getPath(path1).compareTo(fs.getPath(path2))); + assertEquals(equals, fs.getPath(path1).equals(fs.getPath(path2))); + } + + @Test + public void compareToEqualsFails() { + Path path1 = fs.getPath("a/b"); + Path path2 = FileSystems.getDefault().getPath("a/b"); + + assertNotEquals(path1, path2); + assertThrows(ClassCastException.class, () -> path1.compareTo(path2)); + } + + @Test + public void getBlobClientRelative() throws IOException { + BlobClient client = ((AzurePath) fs.getPath("foo/bar")).toBlobClient(); + + assertEquals("foo/bar", client.getBlobName()); + assertEquals(rootNameToContainerName(getDefaultDir(fs)), client.getContainerName()); + } + + @Test + public void getBlobClientEmpty() { + assertThrows(IOException.class, () -> ((AzurePath) fs.getPath(getNonDefaultRootDir(fs))).toBlobClient()); + assertThrows(IOException.class, () -> ((AzurePath) fs.getPath("")).toBlobClient()); + } + + @Test + public void getBlobClientAbsolute() throws IOException { + Path path = fs.getPath(getNonDefaultRootDir(fs), "foo/bar"); + BlobClient client = ((AzurePath) path).toBlobClient(); + + assertEquals("foo/bar", client.getBlobName()); + assertEquals(rootNameToContainerName(getNonDefaultRootDir(fs)), client.getContainerName()); + } + + @Test + public void getBlobClientFail() { + // Can't get a client to a nonexistent root/container. + assertThrows(IOException.class, () -> ((AzurePath) fs.getPath("fakeRoot:", "foo/bar")).toBlobClient()); + } + + @ParameterizedTest + @CsvSource(value = { + "://myaccount.blob.core.windows.net/containername/blobname,containername:/blobname", + "://myaccount.blob.core.windows.net/containername/dirname/blobname,containername:/dirname/blobname", + "://myaccount.blob.core.windows.net/containername,containername:", + "://myaccount.blob.core.windows.net/,''", + }) + public void fromBlobUrl(String url, String path) throws URISyntaxException { + // Adjust the parameterized urls to point at real resources + String scheme = ENV.getPrimaryAccount().getBlobEndpoint().startsWith("https") ? "https" : "http"; + url = scheme + url; + url = url.replace("myaccount", ENV.getPrimaryAccount().getName()); + url = url.replace("containername", "jtcazurepath1"); + + path = path.replace("myaccount", ENV.getPrimaryAccount().getName()); + path = path.replace("containername", "jtcazurepath1"); + + AzurePath resultPath = AzurePath.fromBlobUrl((AzureFileSystemProvider) fs.provider(), url); + + assertEquals(fs, resultPath.getFileSystem()); + assertEquals(path, resultPath.toString()); + } + + @Test + public void fromBlobUrlNoOpenFileSystem() { + assertThrows(FileSystemNotFoundException.class, () -> AzurePath.fromBlobUrl(new AzureFileSystemProvider(), + "http://myaccount.blob.core.windows.net/container/blob")); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java new file mode 100644 index 00000000000..e1f3eb74708 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java @@ -0,0 +1,291 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobProperties; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +import static com.azure.core.test.utils.TestUtils.assertArraysEqual; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +public class AzureResourceTests extends BlobNioTestBase { + private Map config; + + @Override + protected void beforeTest() { + super.beforeTest(); + config = initializeConfigMap(); + } + + @Test + public void constructor() throws IOException { + AzureFileSystem fs = createFS(config); + AzureResource resource = new AzureResource(fs.getPath(getNonDefaultRootDir(fs), "foo/bar")); + + assertEquals(getNonDefaultRootDir(fs) + "/foo/bar", resource.getPath().toString()); + assertEquals(resource.getPath().toBlobClient().getBlobUrl(), resource.getBlobClient().getBlobUrl()); + } + + @Test + public void noRoot() { + assertThrows(IllegalArgumentException.class, () -> new AzureResource(createFS(config).getPath("root:"))); + } + + @Test + public void instanceType() { + assertThrows(IllegalArgumentException.class, () -> new AzureResource(mock(Path.class))); + } + + @ParameterizedTest + @MethodSource("directoryStatusAndExistsSupplier") + public void directoryStatusAndExists(DirectoryStatus status, boolean isVirtual) throws IOException { + AzureFileSystem fs = createFS(config); + + // Generate resource names. + // In root1, the resource will be in the root. In root2, the resource will be several levels deep. Also + // root1 will be non-default directory and root2 is default directory. + AzurePath parentPath1 = (AzurePath) fs.getPath(rootNameToContainerName(getNonDefaultRootDir(fs)), + generateBlobName()); + AzurePath parentPath2 = (AzurePath) fs.getPath(getPathWithDepth(3), generateBlobName()); + + // Generate clients to resources. + BlobClient blobClient1 = parentPath1.toBlobClient(); + BlobClient blobClient2 = parentPath2.toBlobClient(); + BlobClient childClient1 = ((AzurePath) parentPath1.resolve(generateBlobName())).toBlobClient(); + BlobClient childClient2 = ((AzurePath) parentPath2.resolve(generateBlobName())).toBlobClient(); + + // Create resources as necessary + if (status == DirectoryStatus.NOT_A_DIRECTORY) { + blobClient1.upload(DATA.getDefaultBinaryData()); + blobClient2.upload(DATA.getDefaultBinaryData()); + } else if (status == DirectoryStatus.EMPTY) { + putDirectoryBlob(blobClient1.getBlockBlobClient()); + putDirectoryBlob(blobClient2.getBlockBlobClient()); + } else if (status == DirectoryStatus.NOT_EMPTY) { + if (!isVirtual) { + putDirectoryBlob(blobClient1.getBlockBlobClient()); + putDirectoryBlob(blobClient2.getBlockBlobClient()); + } + childClient1.upload(DATA.getDefaultBinaryData()); + childClient2.upload(DATA.getDefaultBinaryData()); + } + + boolean directoryExists = status == DirectoryStatus.EMPTY || status == DirectoryStatus.NOT_EMPTY; + assertEquals(status, new AzureResource(parentPath1).checkDirStatus()); + assertEquals(status, new AzureResource(parentPath2).checkDirStatus()); + assertEquals(directoryExists, new AzureResource(parentPath1).checkDirectoryExists()); + assertEquals(directoryExists, new AzureResource(parentPath2).checkDirectoryExists()); + } + + private static Stream directoryStatusAndExistsSupplier() { + return Stream.of(Arguments.of(DirectoryStatus.DOES_NOT_EXIST, false), + Arguments.of(DirectoryStatus.NOT_A_DIRECTORY, false), Arguments.of(DirectoryStatus.EMPTY, false), + Arguments.of(DirectoryStatus.NOT_EMPTY, true), Arguments.of(DirectoryStatus.NOT_EMPTY, false)); + } + + @Test + public void directoryStatusFilesWithSamePrefix() throws IOException { + AzureFileSystem fs = createFS(config); + // Create two files with same prefix. Both paths should have DirectoryStatus.NOT_A_DIRECTORY + String pathName = generateBlobName(); + Path path1 = fs.getPath("/foo/bar/" + pathName + ".txt"); + Path path2 = fs.getPath("/foo/bar/" + pathName + ".txt.backup"); + Files.createFile(path1); + Files.createFile(path2); + + assertEquals(DirectoryStatus.NOT_A_DIRECTORY, new AzureResource(path1).checkDirStatus()); + assertEquals(DirectoryStatus.NOT_A_DIRECTORY, new AzureResource(path2).checkDirStatus()); + } + + @Test + public void directoryStatusDirectoriesWithSamePrefix() throws IOException { + // Create two folders where one is a prefix of the others + AzureFileSystem fs = createFS(config); + String pathName = generateBlobName(); + String pathName2 = pathName + '2'; + Files.createDirectory(fs.getPath(pathName)); + Files.createDirectory(fs.getPath(pathName2)); + + // Both should be empty + assertEquals(DirectoryStatus.EMPTY, new AzureResource(fs.getPath(pathName)).checkDirStatus()); + assertEquals(DirectoryStatus.EMPTY, new AzureResource(fs.getPath(pathName2)).checkDirStatus()); + } + + @Test + public void directoryStatusFilesBetweenPrefixAndChild() throws IOException { + AzureFileSystem fs = createFS(config); + Path dirPath = fs.getPath(generateBlobName()); + Path childPath = fs.getPath(dirPath.toString(), generateBlobName()); + // Under an old listing scheme, it was possible for a file with the same name as a directory but with a trailing + // '+' to cut in between the parent and child in the listing as we did it and the listing may not register the + // child and erroneously return that the directory is empty. This ensures that listing is done in such a way as + // to account for this and return correctly that the directory is not empty. + Path middlePath = fs.getPath(dirPath + "+"); + + Files.createDirectory(dirPath); + Files.createFile(childPath); + Files.createFile(middlePath); + + assertEquals(DirectoryStatus.NOT_EMPTY, new AzureResource(dirPath).checkDirStatus()); + } + + @Test + public void parentDirExistsFalse() throws IOException { + assertFalse(new AzureResource(createFS(config).getPath(generateBlobName(), "bar")) + .checkParentDirectoryExists()); + } + + @Test + public void parentDirExistsVirtual() throws IOException { + AzureFileSystem fs = createFS(config); + String fileName = generateBlobName(); + String childName = generateBlobName(); + rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName + fs.getSeparator() + childName) + .getAppendBlobClient() + .create(); + + assertTrue(new AzureResource(fs.getPath(fileName, childName)).checkParentDirectoryExists()); + } + + @Test + public void parentDirExistsConcrete() throws IOException { + AzureFileSystem fs = createFS(config); + String fileName = generateBlobName(); + putDirectoryBlob(rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName).getBlockBlobClient()); + + assertTrue(new AzureResource(fs.getPath(fileName, "bar")).checkParentDirectoryExists()); + } + + @Test + public void parentDirExistsRoot() throws IOException { + // No parent means the parent is implicitly the default root, which always exists + assertTrue(new AzureResource(createFS(config).getPath("foo")).checkParentDirectoryExists()); + + } + + @Test + public void parentDirExistsNonDefaultRoot() throws IOException { + // Checks for a bug where we would check the wrong root container for existence on a path with depth > 1 + AzureFileSystem fs = createFS(config); + String rootName = getNonDefaultRootDir(fs); + rootNameToContainerClient(rootName).getBlobClient("fizz/buzz/bazz").getAppendBlobClient().create(); + + assertTrue(new AzureResource(fs.getPath(rootName, "fizz/buzz")).checkParentDirectoryExists()); + } + + @ParameterizedTest + @CsvSource(value = {"false,false", "true,false", "false,true", "true,true"}) + public void putDirectoryBlob(boolean metadata, boolean properties) throws IOException, NoSuchAlgorithmException { + AzureResource resource = new AzureResource(createFS(config).getPath(generateBlobName())); + byte[] contentMd5 = MessageDigest.getInstance("MD5").digest(new byte[0]); + List> attributes = new ArrayList<>(); + if (metadata) { + attributes.add(new TestFileAttribute<>("fizz", "buzz")); + attributes.add(new TestFileAttribute<>("foo", "bar")); + } + if (properties) { + attributes.add(new TestFileAttribute<>("Content-Type", "myType")); + attributes.add(new TestFileAttribute<>("Content-Disposition", "myDisposition")); + attributes.add(new TestFileAttribute<>("Content-Language", "myLanguage")); + attributes.add(new TestFileAttribute<>("Content-Encoding", "myEncoding")); + attributes.add(new TestFileAttribute<>("Cache-Control", "myControl")); + attributes.add(new TestFileAttribute<>("Content-MD5", contentMd5)); + } + + if (metadata || properties) { + resource.setFileAttributes(attributes); + } + resource.putDirectoryBlob(null); + checkBlobIsDir(resource.getBlobClient()); + BlobProperties props = resource.getBlobClient().getProperties(); + + if (metadata) { + assertEquals("buzz", props.getMetadata().get("fizz")); + assertEquals("bar", props.getMetadata().get("foo")); + assertFalse(props.getMetadata().containsKey("Content-Type")); + assertFalse(props.getMetadata().containsKey("Content-Disposition")); + assertFalse(props.getMetadata().containsKey("Content-Language")); + assertFalse(props.getMetadata().containsKey("Content-Encoding")); + assertFalse(props.getMetadata().containsKey("Content-MD5")); + assertFalse(props.getMetadata().containsKey("Cache-Control")); + } + if (properties) { + assertEquals("myType", props.getContentType()); + assertEquals("myDisposition", props.getContentDisposition()); + assertEquals("myLanguage", props.getContentLanguage()); + assertEquals("myEncoding", props.getContentEncoding()); + assertArraysEqual(contentMd5, props.getContentMd5()); + assertEquals("myControl", props.getCacheControl()); + } + } + + @ParameterizedTest + @MethodSource("putDirectoryBlobACSupplier") + public void putDirectoryBlobAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch) + throws IOException { + AzureResource resource = new AzureResource(createFS(config).getPath(generateBlobName())); + resource.getBlobClient().upload(DATA.getDefaultBinaryData()); + match = setupBlobMatchCondition(resource.getBlobClient(), match); + resource.putDirectoryBlob(new BlobRequestConditions() + .setIfMatch(match) + .setIfNoneMatch(noneMatch) + .setIfModifiedSince(modified) + .setIfUnmodifiedSince(unmodified)); + + checkBlobIsDir(resource.getBlobClient()); + } + + private static Stream putDirectoryBlobACSupplier() { + return Stream.of(Arguments.of(null, null, null, null), Arguments.of(OLD_DATE, null, null, null), + Arguments.of(null, NEW_DATE, null, null), Arguments.of(null, null, RECEIVED_ETAG, null), + Arguments.of(null, null, null, GARBAGE_ETAG)); + } + + @ParameterizedTest + @MethodSource("putDirectoryBlobACFailSupplier") + public void putDirectoryBlobACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, + String noneMatch) throws IOException { + AzureResource resource = new AzureResource(createFS(config).getPath(generateBlobName())); + resource.getBlobClient().upload(DATA.getDefaultBinaryData()); + noneMatch = setupBlobMatchCondition(resource.getBlobClient(), noneMatch); + BlobRequestConditions bac = new BlobRequestConditions() + .setIfMatch(match) + .setIfNoneMatch(noneMatch) + .setIfModifiedSince(modified) + .setIfUnmodifiedSince(unmodified); + + BlobStorageException e = assertThrows(BlobStorageException.class, () -> resource.putDirectoryBlob(bac)); + assertTrue(e.getErrorCode() == BlobErrorCode.CONDITION_NOT_MET + || e.getErrorCode() == BlobErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION); + } + + private static Stream putDirectoryBlobACFailSupplier() { + return Stream.of(Arguments.of(NEW_DATE, null, null, null), Arguments.of(null, OLD_DATE, null, null), + Arguments.of(null, null, GARBAGE_ETAG, null), Arguments.of(null, null, null, RECEIVED_ETAG)); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java new file mode 100644 index 00000000000..49f57d643b7 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java @@ -0,0 +1,412 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.test.TestMode; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.specialized.BlobOutputStream; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Mockito; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.NonReadableChannelException; +import java.nio.channels.NonWritableChannelException; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.ClosedFileSystemException; +import java.nio.file.Path; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import static com.azure.core.test.utils.TestUtils.assertArraysEqual; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class AzureSeekableByteChannelTests extends BlobNioTestBase { + private int sourceFileSize; + private byte[] fileBytes; + private File sourceFile; + private BlobClient bc; + private BlobClient writeBc; + private AzureSeekableByteChannel readByteChannel; + private AzureSeekableByteChannel writeByteChannel; + private FileInputStream fileStream; + private AzureFileSystem fs; + + @Override + protected void beforeTest() { + super.beforeTest(); + sourceFileSize = 5 * 1024 * 1024; + fileBytes = getRandomByteArray(sourceFileSize); + sourceFile = getRandomFile(fileBytes); + + cc.create(); + bc = cc.getBlobClient(generateBlobName()); + writeBc = cc.getBlobClient(generateBlobName()); + bc.upload(DATA.getDefaultBinaryData()); + fs = createFS(initializeConfigMap()); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); + AzurePath writePath = ((AzurePath) fs.getPath(writeBc.getContainerName() + ":", writeBc.getBlobName())); + + readByteChannel = new AzureSeekableByteChannel(new NioBlobInputStream(bc.openInputStream(), path), path); + // For writing, we don't want a blob to exist there yet + writeByteChannel = new AzureSeekableByteChannel( + new NioBlobOutputStream(writeBc.getBlockBlobClient().getBlobOutputStream(true), writePath), writePath); + try { + fileStream = new FileInputStream(sourceFile); + } catch (FileNotFoundException e) { + throw new RuntimeException(e); + } + } + + private void resetForLargeSource() { + if (getTestMode() != TestMode.PLAYBACK) { + // Base setup only uploads a small source to reduce size of session record. + BlobClient blobClient = getNonRecordingServiceClient() + .getBlobContainerClient(bc.getContainerName()) + .getBlobClient(bc.getBlobName()); + blobClient.upload(BinaryData.fromBytes(fileBytes), true); + } + + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); + AzurePath writePath = ((AzurePath) fs.getPath(writeBc.getContainerName() + ":", writeBc.getBlobName())); + + readByteChannel = new AzureSeekableByteChannel(new NioBlobInputStream(bc.openInputStream(), path), path); + // For writing, we don't want a blob to exist there yet + writeByteChannel = new AzureSeekableByteChannel( + new NioBlobOutputStream(writeBc.getBlockBlobClient().getBlobOutputStream(true), writePath), writePath); + } + + @Test + public void read() throws IOException { + resetForLargeSource(); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + int count = 0; + Random rand = new Random(); + + while (count < sourceFileSize) { + ByteBuffer buffer = ByteBuffer.allocate(rand.nextInt(1024 * 1024)); + int readAmount = readByteChannel.read(buffer); + os.write(buffer.array(), 0, readAmount); // limit the write in case we allocated more than we needed + count += readAmount; + } + + assertArrayEquals(fileBytes, os.toByteArray()); + } + + @Test + @Timeout(value = 60, unit = TimeUnit.SECONDS) // fail if test runs >= 1 minute + public void readLoopUntilEof() throws IOException { + resetForLargeSource(); + ByteArrayOutputStream os = new ByteArrayOutputStream(sourceFileSize); + Random rand = new Random(); + + while (true) { // ensures test duration is bounded + ByteBuffer buffer = ByteBuffer.allocate(rand.nextInt(1024 * 1024)); + int readAmount = readByteChannel.read(buffer); + if (readAmount == -1) { + break; // reached EOF + } + os.write(buffer.array(), 0, readAmount); // limit the write in case we allocated more than we needed + } + + assertArrayEquals(fileBytes, os.toByteArray()); + } + + @Test + public void readRespectDestBufferPos() throws IOException { + resetForLargeSource(); + Random rand = new Random(); + int initialOffset = rand.nextInt(512) + 1; // always > 0 + byte[] randArray = new byte[2 * initialOffset + sourceFileSize]; + rand.nextBytes(randArray); // fill with random bytes + + // copy same random bytes, but in this copy some will eventually be overwritten by read() + byte[] destArray = new byte[randArray.length]; + System.arraycopy(randArray, 0, destArray, 0, randArray.length); + ByteBuffer dest = ByteBuffer.wrap(destArray); + dest.position(initialOffset); // will have capacity on either side that should not be touched + + int readAmount = 0; + while (readAmount != -1) { + assert dest.position() != 0; + readAmount = readByteChannel.read(dest); // backed by an array, but position != 0 + } + + assertEquals(initialOffset + sourceFileSize, dest.position()); + // destination content should match file content at initial read position + assertArraysEqual(fileBytes, 0, destArray, initialOffset, sourceFileSize); + // destination content should be untouched prior to initial position + assertArraysEqual(randArray, 0, destArray, 0, initialOffset); + // destination content should be untouched past end of read + assertArraysEqual(randArray, initialOffset + sourceFileSize, destArray, initialOffset + sourceFileSize, + initialOffset); + } + + @Test + public void readFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> readByteChannel.read(ByteBuffer.allocate(1))); + } + + @Test + public void write() throws IOException { + resetForLargeSource(); + int count = 0; + Random rand = new Random(); + writeByteChannel.write(ByteBuffer.wrap(fileBytes)); + + while (count < sourceFileSize) { + int writeAmount = Math.min(rand.nextInt(1024 * 1024), sourceFileSize - count); + byte[] buffer = new byte[writeAmount]; + fileStream.read(buffer); + writeByteChannel.write(ByteBuffer.wrap(buffer)); + count += writeAmount; + } + + writeByteChannel.close(); + compareInputStreams(writeBc.openInputStream(), new ByteArrayInputStream(fileBytes), sourceFileSize); + } + + @Test + public void writeRespectSrcBufferPos() throws IOException { + resetForLargeSource(); + Random rand = new Random(); + int initialOffset = rand.nextInt(512) + 1; // always > 0 + byte[] srcBufferContent = new byte[2 * initialOffset + sourceFileSize]; + rand.nextBytes(srcBufferContent); // fill with random bytes + + // place expected file content into source buffer at random location, retain other random bytes + System.arraycopy(fileBytes, 0, srcBufferContent, initialOffset, sourceFileSize); + ByteBuffer srcBuffer = ByteBuffer.wrap(srcBufferContent); + srcBuffer.position(initialOffset); + srcBuffer.limit(initialOffset + sourceFileSize); + + // This test aims to observe the actual bytes written by the ByteChannel to the underlying OutputStream, + // not just the number of bytes allegedly written as reported by its position. It would prefer to examine + // the OutputStream directly, but the channel requires the specific NioBlobOutputStream implementation + // and does not accept something generic like a ByteArrayOutputStream. NioBlobOutputStream is final, so + // it cannot be subclassed or mocked and has little state of its own -- writes go to a BlobOutputStream. + // That class is abstract, but its constructor is not accessible outside its package and cannot normally + // be subclassed to provide custom behavior, but a runtime mocking framework like Mockito can. This is + // the nearest accessible observation point, so the test mocks a BlobOutputStream such that all write + // methods store data in ByteArrayOutputStream which it can later examine for its size and content. + ByteArrayOutputStream actualOutput = new ByteArrayOutputStream(sourceFileSize); + BlobOutputStream blobOutputStream = Mockito.mock( + BlobOutputStream.class, Mockito.withSettings().useConstructor(4096 /* block size */)); + Mockito.doAnswer(invocation -> { + actualOutput.write(invocation.getArgument(0)); + return null; + }).when(blobOutputStream).write(Mockito.anyInt()); + Mockito.doAnswer(invoked -> { + actualOutput.write(invoked.getArgument(0)); + return null; + }).when(blobOutputStream).write(Mockito.any(byte[].class)); + Mockito.doAnswer(invoked -> { + actualOutput.write(invoked.getArgument(0), invoked.getArgument(1), invoked.getArgument(2)); + return null; + }).when(blobOutputStream).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); + Path path = writeByteChannel.getPath(); + writeByteChannel = new AzureSeekableByteChannel(new NioBlobOutputStream(blobOutputStream, path), path); + + int written = 0; + while (written < sourceFileSize) { + written += writeByteChannel.write(srcBuffer); + } + writeByteChannel.close(); + + assertEquals(initialOffset + sourceFileSize, srcBuffer.position()); // src buffer position SHOULD be updated + assertEquals(srcBuffer.position(), srcBuffer.limit()); // limit SHOULD be unchanged (still at end of content) + // the above report back to the caller, but this verifies the correct bytes are going to the blob: + assertArraysEqual(fileBytes, 0, actualOutput.toByteArray(), 0, sourceFileSize); + } + + @Test + public void writeFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> writeByteChannel.write(ByteBuffer.allocate(1))); + } + + @Test + public void positionRead() throws IOException { + resetForLargeSource(); + int bufferSize = sourceFileSize / 10; + ByteBuffer dest = ByteBuffer.allocate(bufferSize); + + assertEquals(0, readByteChannel.position()); + + for (int i = 0; i < 10; i++) { + readByteChannel.read(dest); + assertEquals((i + 1) * bufferSize, readByteChannel.position()); + dest.flip(); + } + } + + @Test + public void positionSizeWrite() throws IOException { + resetForLargeSource(); + int bufferSize = sourceFileSize / 10; + ByteBuffer src = getRandomData(bufferSize); + + assertEquals(0, writeByteChannel.position()); + assertEquals(0, writeByteChannel.size()); + + for (int i = 0; i < 10; i++) { + writeByteChannel.write(src); + assertEquals((i + 1) * bufferSize, writeByteChannel.position()); + assertEquals(writeByteChannel.position(), writeByteChannel.size()); + src.flip(); + } + } + + @Test + public void positionFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, readByteChannel::position); + assertThrows(ClosedFileSystemException.class, writeByteChannel::position); + } + + @ParameterizedTest + @MethodSource("seekSupplier") + public void seek(int readCount0, int seekPos1, int readCount1, int seekPos2, int readCount2) throws IOException { + resetForLargeSource(); + ByteBuffer streamContent = ByteBuffer.allocate(readCount0); + readByteChannel(readByteChannel, streamContent); + compareInputStreams(fileStream, new ByteArrayInputStream(streamContent.array()), readCount0); + + readByteChannel.position(seekPos1); + assertEquals(seekPos1, readByteChannel.position()); + + fileStream = new FileInputStream(sourceFile); + fileStream.skip(seekPos1); + streamContent = ByteBuffer.allocate(readCount1); + readByteChannel(readByteChannel, streamContent); + compareInputStreams(fileStream, new ByteArrayInputStream(streamContent.array()), readCount1); + + readByteChannel.position(seekPos2); + assertEquals(seekPos2, readByteChannel.position()); + + fileStream = new FileInputStream(sourceFile); + fileStream.skip(seekPos2); + streamContent = ByteBuffer.allocate(readCount2); + readByteChannel(readByteChannel, streamContent); + compareInputStreams(fileStream, new ByteArrayInputStream(streamContent.array()), readCount2); + } + + private static Stream seekSupplier() { + return Stream.of( + Arguments.of(1024, 1024, (2 * 1024 * 1024) - 1024, 3 * 1024 * 1024, 2 * 1024 * 1024), // Only ever seek in place. Read whole blob + Arguments.of(1024, (5 * 1024 * 1024) - 1024, 1024, 2048, 1024), // Seek forward then seek backward + Arguments.of(2 * 1024 * 1024, 1024, 1024, (5 * 1024 * 1024) - 1024, 1024) // Seek backward then seek forward + ); + } + + private static void readByteChannel(SeekableByteChannel channel, ByteBuffer dst) throws IOException { + while (dst.remaining() > 0) { + if (channel.read(dst) == -1) { // Prevent infinite read + break; + } + } + } + + @Test + public void seekOutOfBounds() throws IOException { + assertThrows(IllegalArgumentException.class, () -> readByteChannel.position(-1)); + + readByteChannel.position(sourceFileSize); // position is 0-based, so seeking to size --> EOF + assertEquals(-1, readByteChannel.read(ByteBuffer.allocate(1))); // Seeking to the end and then reading should indicate EOF + } + + @Test + public void seekFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> readByteChannel.position(0)); + } + + @Test + public void sizeRead() throws IOException { + bc.upload(DATA.getDefaultBinaryData(), true); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); + readByteChannel = new AzureSeekableByteChannel(new NioBlobInputStream(bc.openInputStream(), path), path); + + assertEquals(DATA.getDefaultDataSize(), readByteChannel.size()); + } + + @Test + public void sizeFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, readByteChannel::size); + assertThrows(ClosedFileSystemException.class, writeByteChannel::size); + } + + @Test + public void close() throws IOException { + readByteChannel.close(); + writeByteChannel.close(); + + assertThrows(ClosedChannelException.class, () -> readByteChannel.read(ByteBuffer.allocate(1))); + assertThrows(ClosedChannelException.class, readByteChannel::size); + assertThrows(ClosedChannelException.class, readByteChannel::position); + assertThrows(ClosedChannelException.class, () -> writeByteChannel.write(ByteBuffer.allocate(1))); + assertThrows(ClosedChannelException.class, writeByteChannel::size); + assertThrows(ClosedChannelException.class, writeByteChannel::position); + } + + @Test + public void closeFSClose() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, readByteChannel::close); + assertThrows(ClosedFileSystemException.class, writeByteChannel::close); + } + + @Test + public void isOpen() throws IOException { + assertTrue(readByteChannel.isOpen()); + assertTrue(writeByteChannel.isOpen()); + + readByteChannel.close(); + writeByteChannel.close(); + + assertFalse(readByteChannel.isOpen()); + assertFalse(writeByteChannel.isOpen()); + } + + @Test + public void isOpenFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, readByteChannel::isOpen); + assertThrows(ClosedFileSystemException.class, writeByteChannel::isOpen); + } + + @Test + public void unsupportedOperations() { + assertThrows(NonWritableChannelException.class, () -> readByteChannel.write(ByteBuffer.allocate(1))); + assertThrows(NonReadableChannelException.class, () -> writeByteChannel.read(ByteBuffer.allocate(1))); + assertThrows(NonReadableChannelException.class, () -> writeByteChannel.position(5)); + assertThrows(UnsupportedOperationException.class, () -> readByteChannel.truncate(0)); + assertThrows(UnsupportedOperationException.class, () -> writeByteChannel.truncate(0)); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java new file mode 100644 index 00000000000..3bc1dd68d35 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java @@ -0,0 +1,416 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import static com.azure.core.test.utils.TestUtils.assertArraysEqual; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.lang.reflect.Method; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.http.HttpClient; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.time.Duration; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.zip.CRC32; + +import com.azure.core.client.traits.HttpTrait; +import com.azure.core.http.netty.NettyAsyncHttpClientBuilder; +import com.azure.core.http.okhttp.OkHttpAsyncHttpClientBuilder; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.http.rest.Response; +import com.azure.core.test.TestMode; +import com.azure.core.test.TestProxyTestBase; +import com.azure.core.test.models.CustomMatcher; +import com.azure.core.test.models.TestProxySanitizer; +import com.azure.core.test.models.TestProxySanitizerType; +import com.azure.core.util.ServiceVersion; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerAsyncClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceAsyncClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobContainerItem; +import com.azure.storage.blob.models.BlockBlobItem; +import com.azure.storage.blob.models.ListBlobContainersOptions; +import com.azure.storage.blob.specialized.BlobClientBase; +import com.azure.storage.blob.specialized.BlockBlobClient; +import com.azure.storage.common.StorageSharedKeyCredential; +import com.azure.storage.common.implementation.Constants; +import com.azure.storage.common.test.shared.ServiceVersionValidationPolicy; +import com.azure.storage.common.test.shared.TestAccount; +import com.azure.storage.common.test.shared.TestDataFactory; +import com.azure.storage.common.test.shared.TestEnvironment; + +import okhttp3.ConnectionPool; + +public class BlobNioTestBase extends TestProxyTestBase { + protected static final TestEnvironment ENV = TestEnvironment.getInstance(); + protected static final TestDataFactory DATA = TestDataFactory.getInstance(); + private static final HttpClient NETTY_HTTP_CLIENT = new NettyAsyncHttpClientBuilder().build(); + private static final HttpClient OK_HTTP_CLIENT = new OkHttpAsyncHttpClientBuilder() + .connectionPool(new ConnectionPool(50, 5, TimeUnit.MINUTES)) + .build(); + + // Used to generate stable container names for recording tests requiring multiple containers. + private int entityNo = 0; + + // both sync and async clients point to same container + protected BlobContainerClient cc; + protected BlobContainerAsyncClient ccAsync; + protected BlobServiceClient primaryBlobServiceClient; + protected BlobServiceAsyncClient primaryBlobServiceAsyncClient; + protected BlobServiceClient alternateBlobServiceClient; + protected String containerName; + protected String prefix; + + + // The values below are used to create data-driven tests for access conditions. + protected static final OffsetDateTime OLD_DATE = OffsetDateTime.now().minusDays(1); + protected static final OffsetDateTime NEW_DATE = OffsetDateTime.now().plusDays(1); + protected static final String GARBAGE_ETAG = "garbage"; + // Note that this value is only used to check if we depend on the received ETag. This value will not actually be + // used. + protected static final String RECEIVED_ETAG = "received"; + + @Override + protected void beforeTest() { + super.beforeTest(); + prefix = getCrc32(testContextManager.getTestPlaybackRecordingName()); + + primaryBlobServiceClient = getServiceClient(ENV.getPrimaryAccount()); + primaryBlobServiceAsyncClient = getServiceAsyncClient(ENV.getPrimaryAccount()); + alternateBlobServiceClient = getServiceClient(ENV.getPrimaryAccount()); + + containerName = generateContainerName(); + cc = primaryBlobServiceClient.getBlobContainerClient(containerName); + ccAsync = primaryBlobServiceAsyncClient.getBlobContainerAsyncClient(containerName); + + if (getTestMode() != TestMode.LIVE) { + interceptorManager.addSanitizers( + Collections.singletonList(new TestProxySanitizer("sig=(.*)", "REDACTED", TestProxySanitizerType.URL))); + // Ignore changes to the order of query parameters and wholly ignore the 'sv' (service version) query parameter + // in SAS tokens. + interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher() + .setComparingBodies(false) + .setExcludedHeaders(Arrays.asList("x-ms-copy-source", "If-Match", "x-ms-range", "If-Modified-Since", + "If-Unmodified-Since")) + .setQueryOrderingIgnored(true) + .setIgnoredQueryParameters(Arrays.asList("sv")))); + } + } + + @Override + protected void afterTest() { + super.afterTest(); + + if (getTestMode() == TestMode.PLAYBACK) { + return; + } + + BlobServiceClient cleanupClient = getNonRecordingServiceClient(); + ListBlobContainersOptions options = new ListBlobContainersOptions().setPrefix(prefix); + for (BlobContainerItem container : cleanupClient.listBlobContainers(options, Duration.ofSeconds(120))) { + BlobContainerClient containerClient = cleanupClient.getBlobContainerClient(container.getName()); + + containerClient.delete(); + } + } + + protected BlobServiceClient getNonRecordingServiceClient() { + return new BlobServiceClientBuilder() + .httpClient(getHttpClient()) + .credential(ENV.getPrimaryAccount().getCredential()) + .endpoint(ENV.getPrimaryAccount().getBlobEndpoint()) + .buildClient(); + } + + protected BlobServiceClient getServiceClient(TestAccount account) { + return getServiceClient(account.getCredential(), account.getBlobEndpoint()); + } + + protected BlobServiceClient getServiceClient(StorageSharedKeyCredential credential, String endpoint, + HttpPipelinePolicy... policies) { + return getServiceClientBuilder(credential, endpoint, policies).buildClient(); + } + + protected BlobServiceAsyncClient getServiceAsyncClient(TestAccount account) { + return getServiceClientBuilder(account.getCredential(), account.getBlobEndpoint()) + .buildAsyncClient(); + } + + protected BlobServiceClientBuilder getServiceClientBuilder(StorageSharedKeyCredential credential, String endpoint, + HttpPipelinePolicy... policies) { + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .endpoint(endpoint); + + for (HttpPipelinePolicy policy : policies) { + builder.addPolicy(policy); + } + + instrument(builder); + + if (credential != null) { + builder.credential(credential); + } + + return builder; + } + + protected Map initializeConfigMap(HttpPipelinePolicy... policies) { + Map config = new HashMap<>(); + config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, getHttpClient()); + List policyList = new ArrayList<>(Arrays.asList(policies)); + if (getTestMode() == TestMode.RECORD) { + policyList.add(interceptorManager.getRecordPolicy()); + } + config.put(AzureFileSystem.AZURE_STORAGE_HTTP_POLICIES, policyList.toArray(new HttpPipelinePolicy[0])); + + return config; + } + + protected URI getFileSystemUri() { + try { + return new URI("azb://?endpoint=" + ENV.getPrimaryAccount().getBlobEndpoint()); + } catch (URISyntaxException ex) { + throw new RuntimeException(ex); + } + } + + protected String generateContainerName() { + return generateResourceName(entityNo++); + } + + protected String generateBlobName() { + return generateResourceName(entityNo++); + } + + private String generateResourceName(int entityNo) { + return testResourceNamer.randomName(prefix + entityNo, 63); + } + + protected AzureFileSystem createFS(Map config) { + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName() + "," + generateContainerName()); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, + ENV.getPrimaryAccount().getCredential()); + + try { + return new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getBlobEndpoint(), + config); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + protected byte[] getRandomByteArray(int size) { + long seed = UUID.fromString(testResourceNamer.randomUuid()).getMostSignificantBits() & Long.MAX_VALUE; + Random rand = new Random(seed); + byte[] data = new byte[size]; + rand.nextBytes(data); + return data; + } + + /* + Size must be an int because ByteBuffer sizes can only be an int. Long is not supported. + */ + protected ByteBuffer getRandomData(int size) { + return ByteBuffer.wrap(getRandomByteArray(size)); + } + + /* + We only allow int because anything larger than 2GB (which would require a long) is left to stress/perf. + */ + protected File getRandomFile(byte[] bytes) { + try { + File file = File.createTempFile(UUID.randomUUID().toString(), ".txt"); + file.deleteOnExit(); + Files.write(file.toPath(), bytes); + + return file; + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + protected static void compareInputStreams(InputStream stream1, InputStream stream2, long count) { + long pos = 0L; + int defaultReadBuffer = 128 * Constants.KB; + try (InputStream s1 = stream1; InputStream s2 = stream2) { + // If the amount we are going to read is smaller than the default buffer size use that instead. + int bufferSize = (int) Math.min(defaultReadBuffer, count); + + while (pos < count) { + // Number of bytes we expect to read. + int expectedReadCount = (int) Math.min(bufferSize, count - pos); + byte[] buffer1 = new byte[expectedReadCount]; + byte[] buffer2 = new byte[expectedReadCount]; + + int readCount1 = s1.read(buffer1); + int readCount2 = s2.read(buffer2); + + // Use Arrays.equals as it is more optimized than Groovy/Spock's '==' for arrays. + assertEquals(readCount1, readCount2); + assertArraysEqual(buffer1, buffer2); + + pos += expectedReadCount; + } + + int verificationRead = s2.read(); + assertEquals(count, pos); + assertEquals(-1, verificationRead); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + protected String rootNameToContainerName(String root) { + return root.substring(0, root.length() - 1); + } + + protected BlobContainerClient rootNameToContainerClient(String root) { + return primaryBlobServiceClient.getBlobContainerClient(rootNameToContainerName(root)); + } + + protected String getNonDefaultRootDir(FileSystem fs) { + for (Path dir : fs.getRootDirectories()) { + if (!dir.equals(((AzureFileSystem) fs).getDefaultDirectory())) { + return dir.toString(); + } + } + throw new RuntimeException("File system only contains the default directory"); + } + + protected String getDefaultDir(FileSystem fs) { + return ((AzureFileSystem) fs).getDefaultDirectory().toString(); + } + + protected String getPathWithDepth(int depth) { + StringBuilder pathStr = new StringBuilder(); + for (int i = 0; i < depth; i++) { + pathStr.append(generateBlobName()).append(AzureFileSystem.PATH_SEPARATOR); + } + return pathStr.toString(); + } + + protected Response putDirectoryBlob(BlockBlobClient blobClient) { + return blobClient.commitBlockListWithResponse(Collections.emptyList(), null, + Collections.singletonMap(AzureResource.DIR_METADATA_MARKER, "true"), null, null, null, null); + } + + /** + * This will retrieve the etag to be used in testing match conditions. The result will typically be assigned to the + * ifMatch condition when testing success and the ifNoneMatch condition when testing failure. + * + * @param bc The URL to the blob to get the etag on. + * @param match The ETag value for this test. If {@code receivedEtag} is passed, that will signal that the test is + * expecting the blob's actual etag for this test, so it is retrieved. + * @return The appropriate etag value to run the current test. + */ + protected String setupBlobMatchCondition(BlobClientBase bc, String match) { + return RECEIVED_ETAG.equals(match) ? bc.getProperties().getETag() : match; + } + + protected void checkBlobIsDir(BlobClient blobClient) { + assertTrue(Boolean.parseBoolean(blobClient.getPropertiesWithResponse(null, null, null) + .getValue().getMetadata().get(AzureResource.DIR_METADATA_MARKER))); + } + + static class TestFileAttribute implements FileAttribute { + private final String name; + private final T value; + + TestFileAttribute(String name, T value) { + this.name = name; + this.value = value; + } + + @Override + public String name() { + return this.name; + } + + @Override + public T value() { + return this.value; + } + } + + private static String getCrc32(String input) { + CRC32 crc32 = new CRC32(); + crc32.update(input.getBytes(StandardCharsets.UTF_8)); + return String.format(Locale.US, "%08X", crc32.getValue()).toLowerCase(); + } + + @SuppressWarnings("unchecked") + protected , E extends Enum> T instrument(T builder) { + builder.httpClient(getHttpClient()); + if (getTestMode() == TestMode.RECORD) { + builder.addPolicy(interceptorManager.getRecordPolicy()); + } + + + if (ENV.getServiceVersion() != null) { + try { + Method serviceVersionMethod = Arrays.stream(builder.getClass().getDeclaredMethods()) + .filter(method -> "serviceVersion".equals(method.getName()) + && method.getParameterCount() == 1 + && ServiceVersion.class.isAssignableFrom(method.getParameterTypes()[0])) + .findFirst() + .orElseThrow(() -> new RuntimeException("Unable to find serviceVersion method for builder: " + + builder.getClass())); + Class serviceVersionClass = (Class) serviceVersionMethod.getParameterTypes()[0]; + ServiceVersion serviceVersion = (ServiceVersion) Enum.valueOf(serviceVersionClass, + ENV.getServiceVersion()); + serviceVersionMethod.invoke(builder, serviceVersion); + builder.addPolicy(new ServiceVersionValidationPolicy(serviceVersion.getVersion())); + } catch (ReflectiveOperationException ex) { + throw new RuntimeException(ex); + } + } + + builder.httpLogOptions(BlobServiceClientBuilder.getDefaultHttpLogOptions()); + + return builder; + } + + protected HttpClient getHttpClient() { + if (getTestMode() != TestMode.PLAYBACK) { + switch (ENV.getHttpClientType()) { + case NETTY: + return NETTY_HTTP_CLIENT; + case OK_HTTP: + return OK_HTTP_CLIENT; + default: + throw new IllegalArgumentException("Unknown http client type: " + ENV.getHttpClientType()); + } + } else { + return interceptorManager.getPlaybackClient(); + } + } + + public static boolean liveOnly() { + return ENV.getTestMode() == TestMode.LIVE; + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java new file mode 100644 index 00000000000..a750af92d55 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java @@ -0,0 +1,198 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.storage.blob.BlobClient; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Map; +import java.util.stream.Stream; + +import static com.azure.core.test.utils.TestUtils.assertArraysEqual; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class CompositeTests extends BlobNioTestBase { + private Map config; + + @Override + protected void beforeTest() { + super.beforeTest(); + config = initializeConfigMap(); + } + + @Test + public void filesCreateDirs() throws IOException { + AzureFileSystem fs = createFS(config); + Path dirs = fs.getPath("mydir1/mydir2/mydir3"); + Files.createDirectories(dirs); + + assertTrue(Files.isDirectory(fs.getPath("mydir1"))); + assertTrue(Files.isDirectory(fs.getPath("mydir1/mydir2"))); + assertTrue(Files.isDirectory(fs.getPath("mydir1/mydir2/mydir3"))); + } + + @Test + public void filesCreate() throws IOException { + AzureFileSystem fs = createFS(config); + Path path = Files.createFile(fs.getPath(generateBlobName())); + + assertDoesNotThrow(() -> fs.provider().checkAccess(path)); + } + + @Test + public void filesCopy() throws IOException { + AzureFileSystem fs = createFS(config); + Path dest = fs.getPath("dest"); + byte[] resultArr = new byte[DATA.getDefaultDataSize()]; + Files.copy(DATA.getDefaultInputStream(), dest); + fs.provider().newInputStream(dest).read(resultArr); + + assertArraysEqual(DATA.getDefaultBytes(), resultArr); + + Path dest2 = fs.getPath("dest2"); + OutputStream outStream = fs.provider().newOutputStream(dest2); + Files.copy(dest, outStream); + outStream.close(); + resultArr = new byte[DATA.getDefaultDataSize()]; + fs.provider().newInputStream(dest2).read(resultArr); + + assertArraysEqual(DATA.getDefaultBytes(), resultArr); + + Path dest3 = fs.getPath("dest3"); + Files.copy(dest, dest3, StandardCopyOption.COPY_ATTRIBUTES); + resultArr = new byte[DATA.getDefaultDataSize()]; + fs.provider().newInputStream(dest3).read(resultArr); + + assertArraysEqual(DATA.getDefaultBytes(), resultArr); + } + + // Bug: https://github.com/Azure/azure-sdk-for-java/issues/20325 + @Test + public void filesReadAllBytes() throws IOException { + AzureFileSystem fs = createFS(config); + String pathName = generateBlobName(); + Path path1 = fs.getPath("/foo/bar/" + pathName); + Path path2 = fs.getPath("/foo/bar/" + pathName + ".backup"); + Files.createFile(path1); + Files.createFile(path2); + + assertDoesNotThrow(() -> Files.readAllBytes(path1)); + } + + @Test + public void filesDeleteEmptyDirectory() throws IOException { + // Create two folders where one is a prefix of the others + AzureFileSystem fs = createFS(config); + String pathName = generateBlobName(); + String pathName2 = pathName + '2'; + Files.createDirectory(fs.getPath(pathName)); + Files.createDirectory(fs.getPath(pathName2)); + + // Delete the one that is a prefix to ensure the other one does not interfere + assertDoesNotThrow(() -> Files.delete(fs.getPath(pathName))); + } + + @ParameterizedTest + @MethodSource("filesExistsSupplier") + public void filesExists(DirectoryStatus status, boolean isVirtual) throws IOException { + AzureFileSystem fs = createFS(config); + + // Generate resource names. + AzurePath path = (AzurePath) fs.getPath(rootNameToContainerName(getNonDefaultRootDir(fs)), generateBlobName()); + + // Generate clients to resources. + BlobClient blobClient = path.toBlobClient(); + BlobClient childClient1 = ((AzurePath) path.resolve(generateBlobName())).toBlobClient(); + + // Create resources as necessary + if (status == DirectoryStatus.NOT_A_DIRECTORY) { + blobClient.upload(DATA.getDefaultBinaryData()); + } else if (status == DirectoryStatus.NOT_EMPTY) { + if (!isVirtual) { + putDirectoryBlob(blobClient.getBlockBlobClient()); + } + childClient1.upload(DATA.getDefaultBinaryData()); + } + + assertEquals(status != DirectoryStatus.DOES_NOT_EXIST, Files.exists(path)); + } + + private static Stream filesExistsSupplier() { + return Stream.of(Arguments.of(DirectoryStatus.DOES_NOT_EXIST, false), + Arguments.of(DirectoryStatus.NOT_A_DIRECTORY, false), Arguments.of(DirectoryStatus.NOT_EMPTY, true), + Arguments.of(DirectoryStatus.NOT_EMPTY, false)); + } + + @Test + public void filesWalkFileTree() throws IOException { + AzureFileSystem fs = createFS(config); + /* + file1 + cDir1 + cDir2 + |__file2 + |__cDir3 + |__vDir1 + |__file3 + vDir2 + |__file4 + |__cDir4 + |__vDir3 + |__file5 + */ + String baseDir = "a"; + + // Create files and directories + ((AzurePath) fs.getPath("a/file1")).toBlobClient().upload(DATA.getDefaultBinaryData()); + ((AzurePath) fs.getPath("a/cDir2/file2")).toBlobClient().upload(DATA.getDefaultBinaryData()); + ((AzurePath) fs.getPath("a/cDir2/vDir1/file3")).toBlobClient().upload(DATA.getDefaultBinaryData()); + ((AzurePath) fs.getPath("a/vDir2/file4")).toBlobClient().upload(DATA.getDefaultBinaryData()); + ((AzurePath) fs.getPath("a/vDir2/vDir3/file5")).toBlobClient().upload(DATA.getDefaultBinaryData()); + + putDirectoryBlob(((AzurePath) fs.getPath(baseDir)).toBlobClient().getBlockBlobClient()); + putDirectoryBlob(((AzurePath) fs.getPath("a/cDir1")).toBlobClient().getBlockBlobClient()); + putDirectoryBlob(((AzurePath) fs.getPath("a/cDir2")).toBlobClient().getBlockBlobClient()); + putDirectoryBlob(((AzurePath) fs.getPath("a/cDir2/cDir3")).toBlobClient().getBlockBlobClient()); + putDirectoryBlob(((AzurePath) fs.getPath("a/vDir2/cDir4")).toBlobClient().getBlockBlobClient()); + + TestFileVisitor visitor = new TestFileVisitor<>(); + // System.out.println(Files.readAttributes(fs.getPath(baseDir), AzureBasicFileAttributes.class).isDirectory()); + Files.walkFileTree(fs.getPath(baseDir), visitor); + + // might need to make this work on root directories as well, which would probably mean inspecting the path and + // adding an isRoot method + assertEquals(5, visitor.fileCount); + assertEquals(8, visitor.directoryCount); // includes baseDir + } + + static class TestFileVisitor extends SimpleFileVisitor { + private int fileCount = 0; + private int directoryCount = 0; + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + fileCount++; + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) { + directoryCount++; + return FileVisitResult.CONTINUE; + } + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java new file mode 100644 index 00000000000..2bf4026f80d --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java @@ -0,0 +1,243 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.test.TestMode; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.models.BlobStorageException; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.ClosedFileSystemException; +import java.nio.file.Files; +import java.util.function.Function; +import java.util.stream.Stream; + +import static com.azure.core.test.utils.TestUtils.assertArraysEqual; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class NioBlobInputStreamTests extends BlobNioTestBase { + private byte[] fileBytes; + private File sourceFile; + private BlobClient bc; + private NioBlobInputStream nioStream; + private FileInputStream fileStream; + private AzureFileSystem fs; + + @Override + protected void beforeTest() { + super.beforeTest(); + fileBytes = getRandomByteArray(5 * 1024 * 1024); + sourceFile = getRandomFile(fileBytes); + + cc.create(); + bc = cc.getBlobClient(generateBlobName()); + bc.upload(DATA.getDefaultBinaryData()); + fs = createFS(initializeConfigMap()); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); + + nioStream = new NioBlobInputStream(bc.openInputStream(), path); + try { + fileStream = new FileInputStream(sourceFile); + } catch (FileNotFoundException e) { + throw new RuntimeException(e); + } + } + + private void resetForLargeSource() { + if (getTestMode() != TestMode.PLAYBACK) { + // Base setup only uploads a small source to reduce size of session record. + BlobClient blobClient = getNonRecordingServiceClient() + .getBlobContainerClient(bc.getContainerName()) + .getBlobClient(bc.getBlobName()); + blobClient.upload(BinaryData.fromBytes(fileBytes), true); + } + + // Base setup only uploads a small source to reduce size of session record. + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); + nioStream = new NioBlobInputStream(bc.openInputStream(), path); + } + + @Override + protected void afterTest() { + super.afterTest(); + sourceFile.delete(); + } + + @Test + public void readWholeFile() throws IOException { + resetForLargeSource(); + compareInputStreams(nioStream, fileStream, Files.size(sourceFile.toPath())); + } + + @Test + public void readMin() throws IOException { + resetForLargeSource(); + for (int i = 0; i < 100; i++) { + assertEquals(fileStream.read(), nioStream.read()); + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 100, 4 * 1024 * 1024}) + public void readBuff(int size) throws IOException { + resetForLargeSource(); + byte[] nioBytes = new byte[size]; + nioStream.read(nioBytes); + + assertArraysEqual(fileBytes, 0, nioBytes, 0, size); + } + + @Test + public void readBuffOffsetLen() throws IOException { + resetForLargeSource(); + byte[] nioBytes = new byte[100]; + nioStream.read(nioBytes, 5, 50); + + assertArraysEqual(fileBytes, 0, nioBytes, 5, 50); + } + + @ParameterizedTest + @CsvSource(value = {"-1,5", "3,-1", "0,11", "3,8"}) + public void readBuffOffsetLenFail(int off, int len) { + byte[] b = new byte[10]; + + assertThrows(IndexOutOfBoundsException.class, () -> nioStream.read(b, off, len)); + } + + @ParameterizedTest + @MethodSource("readFailSupplier") + public void readFail(Function methodCall) throws IOException { + resetForLargeSource(); + bc.delete(); + nioStream.read(new byte[4 * 1024 * 1024]); // Must read through the initial download to trigger failed response + + IOException e = assertThrows(IOException.class, methodCall.apply(nioStream)); + assertInstanceOf(BlobStorageException.class, e.getCause()); + } + + private static Stream> readFailSupplier() { + return Stream.of(nioStream -> nioStream::read, nioStream -> () -> nioStream.read(new byte[5]), + nioStream -> () -> nioStream.read(new byte[5], 0, 4)); + } + + @Test + public void readFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, nioStream::read); + assertThrows(ClosedFileSystemException.class, () -> nioStream.read(new byte[1])); + assertThrows(ClosedFileSystemException.class, () -> nioStream.read(new byte[10], 2, 5)); + } + + + @ParameterizedTest + @CsvSource(value = {"0,0", "0,50", "50,0", "50,50", "50,5242780", "5242780,50"}) + public void markAndReset(int markAfter, int resetAfter) throws IOException { + resetForLargeSource(); + byte[] b = new byte[markAfter]; + nioStream.read(b); + fileStream.skip(markAfter); // Position the file stream where we expect to be after resetting. + + // Read some bytes past the mark + nioStream.mark(Integer.MAX_VALUE); + + nioStream.read(new byte[resetAfter]); + + // Reset to the mark + nioStream.reset(); + + compareInputStreams(nioStream, fileStream, sourceFile.length() - markAfter); + } + + @Test + public void markReadLimit() throws IOException { + nioStream.mark(5); + nioStream.read(new byte[6]); + + assertThrows(IOException.class, nioStream::reset); + } + + @Test + public void resetFail() throws IOException { + // Mark never set + nioStream.read(); + + assertThrows(IOException.class, nioStream::reset); + } + + @Test + public void resetFSClosed() throws IOException { + nioStream.mark(5); + fs.close(); + + assertThrows(ClosedFileSystemException.class, nioStream::reset); + } + + @Test + public void markSupported() { + assertTrue(nioStream.markSupported()); + } + + @ParameterizedTest + @ValueSource(ints = {0, 10, 4 * 1024 * 1024, (5 * 1024 * 1024) - 1}) + public void skip(int skip) throws IOException { + resetForLargeSource(); + nioStream.skip(skip); + fileStream.skip(skip); + + compareInputStreams(nioStream, fileStream, Files.size(sourceFile.toPath()) - skip); + } + + @Test + public void skipFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> nioStream.skip(5)); + } + + @Test + public void close() throws IOException { + nioStream.close(); + + assertThrows(IOException.class, nioStream::read); + assertThrows(IOException.class, () -> nioStream.read(new byte[5])); + assertThrows(IOException.class, () -> nioStream.read(new byte[5], 0, 4)); + } + + @Test + public void closeFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, nioStream::close); + } + + @ParameterizedTest + @CsvSource(value = {"0,4194304", "5,4194299", "5242880,0"}) + public void available(int readAmount, int available) throws IOException { + resetForLargeSource(); + nioStream.read(new byte[readAmount]); + + assertEquals(available, nioStream.available()); + } + + @Test + public void availableFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, nioStream::available); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java new file mode 100644 index 00000000000..753b6674733 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java @@ -0,0 +1,219 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.BlockListType; +import com.azure.storage.blob.models.ParallelTransferOptions; +import com.azure.storage.blob.options.BlockBlobOutputStreamOptions; +import com.azure.storage.blob.specialized.BlockBlobClient; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIf; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.ClosedFileSystemException; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class NioBlobOutputStreamTests extends BlobNioTestBase { + private static final int BLOCK_SIZE = 50; + private static final int MAX_SINGLE_UPLOAD_SIZE = 200; + + private BlockBlobClient bc; + private NioBlobOutputStream nioStream; + private AzureFileSystem fs; + + @Override + protected void beforeTest() { + super.beforeTest(); + cc.create(); + bc = cc.getBlobClient(generateBlobName()).getBlockBlobClient(); + + fs = createFS(initializeConfigMap()); + AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); + + nioStream = new NioBlobOutputStream(bc.getBlobOutputStream(new ParallelTransferOptions(BLOCK_SIZE, null, null, + MAX_SINGLE_UPLOAD_SIZE), null, null, null, null), path); + } + + @Test + public void writeMin() throws IOException { + nioStream.write(1); + nioStream.close(); + + assertEquals(1, bc.getProperties().getBlobSize()); + + InputStream inputStream = bc.openInputStream(); + + assertEquals(1, inputStream.read()); + assertEquals(-1, inputStream.read()); + } + + @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks + @Disabled("failing in ci") + public void writeMinError() throws IOException { + // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via + // writing that there was an error + cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); + + // Write enough data to force making network requests. + nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); + // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough + // time to round trip. + bc.getProperties(); + + assertThrows(IOException.class, () -> nioStream.write(1)); + } + + @Test + public void writeArray() throws IOException { + int dataSize = 100; + byte[] data = getRandomByteArray(dataSize); + nioStream.write(data); + nioStream.close(); + + assertEquals(dataSize, bc.getProperties().getBlobSize()); + compareInputStreams(bc.openInputStream(), new ByteArrayInputStream(data), dataSize); + } + + @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks + @Disabled("failing in ci") + public void writeArrayError() throws IOException { + // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via + // writing that there was an error + cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); + + /* + Write enough data to force making network requests. The error will not be thrown until the next time a method + on the stream is called. + */ + nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); + // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough + // time to round trip. + bc.getProperties(); + + assertThrows(IOException.class, () -> nioStream.write(new byte[1])); + } + + @ParameterizedTest + @CsvSource(value = {"0,100", "20,80", "20,40"}) + public void writeOffsetLen(int offset, int len) throws IOException { + int dataSize = 100; + byte[] data = getRandomByteArray(dataSize); + + nioStream.write(data, offset, len); + nioStream.close(); + + assertEquals(len, bc.getProperties().getBlobSize()); + compareInputStreams(bc.openInputStream(), new ByteArrayInputStream(data, offset, len), dataSize); + } + + // To ensure the error isn't being wrapped unnecessarily + @Test + public void writeOffsetLenIOB() { + assertThrows(IndexOutOfBoundsException.class, () -> nioStream.write(new byte[5], -1, 6)); + } + + @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks + @Disabled("failing in ci") + public void writeOffsetLenNetworkError() throws IOException { + // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via + // writing that there was an error + cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); + + // Write enough data to force making network requests. + nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); + // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough + // time to round trip. + bc.getProperties(); + + assertThrows(IOException.class, () -> nioStream.write(new byte[1], 0, 1)); + } + + @Test + public void writeFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, () -> nioStream.write(5)); + assertThrows(ClosedFileSystemException.class, () -> nioStream.write(new byte[5])); + assertThrows(ClosedFileSystemException.class, () -> nioStream.write(new byte[5], 2, 1)); + } + + // Flush does not actually flush data right now + @Test + public void flush() throws IOException { + nioStream.write(1); + nioStream.flush(); + + BlobStorageException e = assertThrows(BlobStorageException.class, () -> bc.listBlocks(BlockListType.ALL)); + assertEquals(BlobErrorCode.BLOB_NOT_FOUND, e.getErrorCode()); + } + + // Flush should at least check the stream state + @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks + @Disabled("failing in ci") + public void flushError() throws IOException { + // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via + // writing that there was an error + cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); + + // Write enough data to force making network requests. + nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); + // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough + // time to round trip. + bc.getProperties(); + + assertThrows(IOException.class, nioStream::flush); + } + + @Test + public void flushClosedFS() throws IOException { + nioStream.write(1); + fs.close(); + + assertThrows(ClosedFileSystemException.class, nioStream::flush); + } + + @Test + public void close() throws IOException { + nioStream.close(); + + assertThrows(IOException.class, () -> nioStream.write(1)); + } + + @Test + public void closeError() throws IOException { + // now calling close multiple times does not cause any error + nioStream.close(); + assertDoesNotThrow(nioStream::close); + } + + @Test + public void closeDoesNotThrowError() throws IOException { + bc = cc.getBlobClient(generateBlobName()).getBlockBlobClient(); + OutputStream nioStream = new NioBlobOutputStream(bc.getBlobOutputStream(new BlockBlobOutputStreamOptions()), + fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); + + nioStream.write(1); + nioStream.close(); + // assert no error is thrown since close handles multiple close requests now + assertDoesNotThrow(nioStream::close); + } + + @Test + public void closeFSClosed() throws IOException { + fs.close(); + + assertThrows(ClosedFileSystemException.class, nioStream::close); + } +} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java new file mode 100644 index 00000000000..413a8062352 --- /dev/null +++ b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java @@ -0,0 +1,108 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.storage.blob.nio; + +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpHeaderName; +import com.azure.core.http.HttpMethod; +import com.azure.core.http.HttpPipeline; +import com.azure.core.http.HttpRequest; +import com.azure.core.http.HttpResponse; +import com.azure.core.test.http.MockHttpResponse; +import com.azure.core.util.CoreUtils; +import com.azure.storage.blob.implementation.util.BlobUserAgentModificationPolicy; +import com.azure.storage.common.StorageSharedKeyCredential; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class NioClientBuilderTests { + private static final Map PROPERTIES = + CoreUtils.getProperties("azure-storage-blob-nio.properties"); + private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); + private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); + + static HttpRequest request(String url) { + return new HttpRequest(HttpMethod.HEAD, url); + } + + @Test + public void azureFileSystemServiceClient() throws IOException { + Map config = new HashMap<>(); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "containerName"); + config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, new StorageSharedKeyCredential("accountName", "accountKey")); + + AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), "https://accountName.blob.core.windows.net", config); + HttpPipeline pipeline = fileSystem.getBlobServiceClient().getHttpPipeline(); + + verifyPipelineAndResponse(pipeline, fileSystem.getBlobServiceClient().getAccountUrl()); + } + + @Test + public void azureFileStoreContainerClient() throws IOException { + Map config = new HashMap<>(); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "containerName"); + config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, new StorageSharedKeyCredential("accountName", "accountKey")); + AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), "https://accountName.blob.core.windows.net", config); + AzureFileStore fileStore = (AzureFileStore) fileSystem.getFileStore("containerName"); + HttpPipeline pipeline = fileStore.getContainerClient().getHttpPipeline(); + + verifyPipelineAndResponse(pipeline, fileStore.getContainerClient().getBlobContainerUrl()); + } + + @Test + public void azResourceBlobClient() throws IOException { + Map config = new HashMap<>(); + config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "containerName"); + config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")); + config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, new StorageSharedKeyCredential("accountName", "accountKey")); + AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), "https://accountName.blob.core.windows.net", config); + AzurePath path = (AzurePath) fileSystem.getPath("blobName"); + AzureResource resource = new AzureResource(path); + HttpPipeline pipeline = resource.getBlobClient().getHttpPipeline(); + + verifyPipelineAndResponse(pipeline, resource.getBlobClient().getBlobUrl()); + } + + private static void verifyPipelineAndResponse(HttpPipeline pipeline, String url) { + boolean foundPolicy = false; + for (int i = 0; i < pipeline.getPolicyCount(); i++) { + foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy); + } + + assertTrue(foundPolicy); + StepVerifier.create(pipeline.send(request(url))) + .assertNext(response -> assertEquals(200, response.getStatusCode())) + .verifyComplete(); + } + + private static final class UAStringTestClient implements HttpClient { + private final Pattern pattern; + + UAStringTestClient(String regex) { + this.pattern = Pattern.compile(regex); + } + + @Override + public Mono send(HttpRequest request) { + if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) { + throw new RuntimeException("Failed to set 'User-Agent' header."); + } + Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT)); + assertTrue(matcher.matches()); + return Mono.just(new MockHttpResponse(request, 200)); + } + } +} diff --git a/project/Dependencies.scala b/project/Dependencies.scala index d90ab1956c0..c941a157df8 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -10,7 +10,6 @@ object Dependencies { // We would like to use the BOM to manage Azure SDK versions, but SBT doesn't support it. // https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/boms/azure-sdk-bom // https://github.com/sbt/sbt/issues/4531 - private val azureStorageBlobNioV = "12.0.0-beta.19" private val azureIdentitySdkV = "1.9.0-beta.2" // We are using the older AppInsights 2 because we want to use the // logback appender to send logs. AppInsights 3 does not have a standalone @@ -206,10 +205,27 @@ object Dependencies { "org.typelevel" %% "cats-effect" % catsEffectV, ) + /* + Due to complications with the public preview Nio filesystem for azure, + we include this FS implementation locally and include its dependencies + */ + val azureBlobNioDependencies = List( + "com.azure" % "azure-core" % "1.40.0", + "com.azure" % "azure-storage-blob" % "12.23.0-beta.1", + "com.azure" % "azure-storage-common" % "12.22.0-beta.1", + "com.azure" % "azure-core-test" % "1.18.0", + "org.junit.jupiter" % "junit-jupiter-params" % "5.9.3", + "org.junit.jupiter" % "junit-jupiter-engine" % "5.9.3", + "org.junit.jupiter" % "junit-jupiter-api" % "5.9.3", + "io.projectreactor" % "reactor-test" % "3.4.29", + "cglib" % "cglib-nodep" % "3.2.7", + "org.slf4j" % "slf4j-simple" % slf4jV, + "com.azure" % "azure-core-http-okhttp" % "1.11.10", + "org.mockito" % "mockito-core" % "4.11.0", + "com.github.sbt" % "junit-interface" % "0.13.2" % Test // For running junit tests associated with this library + ) + val azureDependencies: List[ModuleID] = List( - "com.azure" % "azure-storage-blob-nio" % azureStorageBlobNioV - exclude("jakarta.xml.bind", "jakarta.xml.bind-api") - exclude("jakarta.activation", "jakarta.activation-api"), "com.azure" % "azure-identity" % azureIdentitySdkV exclude("jakarta.xml.bind", "jakarta.xml.bind-api") exclude("jakarta.activation", "jakarta.activation-api") @@ -219,7 +235,7 @@ object Dependencies { "com.azure.resourcemanager" % "azure-resourcemanager" % "2.18.0", "net.minidev" % "json-smart" % jsonSmartV, "com.microsoft.azure" % "applicationinsights-logging-logback" % azureAppInsightsLogbackV, - ) + ) ++ azureBlobNioDependencies val wsmDependencies: List[ModuleID] = List( "bio.terra" % "workspace-manager-client" % "0.254.452-SNAPSHOT" From 23e706f2d851594568c9cd3014df7807321b8c11 Mon Sep 17 00:00:00 2001 From: Christian Freitas Date: Thu, 29 Jun 2023 14:38:30 -0400 Subject: [PATCH 2/7] Remove tests due to artifact compile issue. --- .../storage/blob/nio/AttributeViewTests.java | 290 ---- .../blob/nio/AzureDirectoryStreamTests.java | 215 --- .../storage/blob/nio/AzureFileStoreTests.java | 96 -- .../nio/AzureFileSystemProviderTests.java | 1437 ----------------- .../blob/nio/AzureFileSystemTests.java | 216 --- .../storage/blob/nio/AzurePathTests.java | 285 ---- .../storage/blob/nio/AzureResourceTests.java | 291 ---- .../nio/AzureSeekableByteChannelTests.java | 412 ----- .../storage/blob/nio/BlobNioTestBase.java | 416 ----- .../storage/blob/nio/CompositeTests.java | 198 --- .../blob/nio/NioBlobInputStreamTests.java | 243 --- .../blob/nio/NioBlobOutputStreamTests.java | 219 --- .../blob/nio/NioClientBuilderTests.java | 108 -- 13 files changed, 4426 deletions(-) delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java delete mode 100644 filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java deleted file mode 100644 index 1c820073a31..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AttributeViewTests.java +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.storage.blob.BlobClient; -import com.azure.storage.blob.models.AccessTier; -import com.azure.storage.blob.models.BlobHttpHeaders; -import com.azure.storage.blob.models.BlobProperties; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.IOException; -import java.nio.file.ClosedFileSystemException; -import java.nio.file.Path; -import java.nio.file.attribute.FileTime; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Base64; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Supplier; -import java.util.stream.Stream; - -import static com.azure.core.test.utils.TestUtils.assertArraysEqual; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class AttributeViewTests extends BlobNioTestBase { - // Get attributes--All properties set; - private BlobClient bc; - private AzureFileSystem fs; - - @Override - protected void beforeTest() { - super.beforeTest(); - fs = createFS(initializeConfigMap()); - cc = rootNameToContainerClient(getDefaultDir(fs)); - bc = cc.getBlobClient(generateBlobName()); - bc.upload(DATA.getDefaultBinaryData()); - } - - @Test - public void azureBasicFileAttributeViewReadAttributes() throws IOException { - AzureBasicFileAttributes attr = new AzureBasicFileAttributeView(fs.getPath(bc.getBlobName())).readAttributes(); - BlobProperties props = bc.getProperties(); - - assertEquals(attr.size(), props.getBlobSize()); - assertEquals(attr.lastModifiedTime(), FileTime.from(props.getLastModified().toInstant())); - assertEquals(attr.creationTime(), FileTime.from(props.getCreationTime().toInstant())); - assertTrue(attr.isRegularFile()); - assertEquals(attr.fileKey(), bc.getBlobUrl()); - assertFalse(attr.isDirectory()); - assertFalse(attr.isVirtualDirectory()); - assertFalse(attr.isSymbolicLink()); - assertFalse(attr.isOther()); - } - - @Test - public void azureBasicFileAttributeViewDirectory() throws IOException { - Path path = fs.getPath(generateBlobName()); - putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); - AzureBasicFileAttributes attr = new AzureBasicFileAttributeView(path).readAttributes(); - - assertTrue(attr.isDirectory()); - assertFalse(attr.isVirtualDirectory()); - assertFalse(attr.isRegularFile()); - assertFalse(attr.isOther()); - assertFalse(attr.isSymbolicLink()); - } - - @Test - public void azureBasicFileAttributeViewDirectoryVirtual() throws IOException { - String dirName = generateBlobName(); - BlobClient bc = cc.getBlobClient(dirName + '/' + generateContainerName()); - bc.upload(DATA.getDefaultBinaryData()); - AzureBasicFileAttributes attr = new AzureBasicFileAttributeView(fs.getPath(dirName)).readAttributes(); - - assertTrue(attr.isDirectory()); - assertTrue(attr.isVirtualDirectory()); - assertFalse(attr.isRegularFile()); - assertFalse(attr.isOther()); - assertFalse(attr.isSymbolicLink()); - } - - @Test - public void azureBasicFileAttributeViewNoExist() { - assertThrows(IOException.class, - () -> new AzureBasicFileAttributeView(fs.getPath(generateBlobName())).readAttributes()); - } - - @Test - public void azureBasicFileAttributeViewFSClosed() throws IOException { - Path path = fs.getPath(generateBlobName()); - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> new AzureBasicFileAttributeView(path).readAttributes()); - } - - @Test - public void azureBlobFileAttributeViewReadAttributes() throws IOException { - AzureBlobFileAttributes attr = new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())).readAttributes(); - Map> suppliers = AzureBlobFileAttributes.getAttributeSuppliers(attr); - BlobProperties props = bc.getProperties(); - - // getters - assertEquals(attr.size(), props.getBlobSize()); - assertEquals(attr.lastModifiedTime(), FileTime.from(props.getLastModified().toInstant())); - assertEquals(attr.creationTime(), FileTime.from(props.getCreationTime().toInstant())); - assertTrue(attr.isRegularFile()); - assertEquals(attr.fileKey(), bc.getBlobUrl()); - assertFalse(attr.isDirectory()); - assertFalse(attr.isVirtualDirectory()); - assertFalse(attr.isSymbolicLink()); - assertFalse(attr.isOther()); - assertEquals(attr.eTag(), props.getETag()); - assertEquals(attr.blobHttpHeaders().getContentType(), props.getContentType()); - assertArraysEqual(attr.blobHttpHeaders().getContentMd5(), props.getContentMd5()); - assertEquals(attr.blobHttpHeaders().getContentLanguage(), props.getContentLanguage()); - assertEquals(attr.blobHttpHeaders().getContentEncoding(), props.getContentEncoding()); - assertEquals(attr.blobHttpHeaders().getContentDisposition(), props.getContentDisposition()); - assertEquals(attr.blobHttpHeaders().getCacheControl(), props.getCacheControl()); - assertEquals(attr.blobType(), props.getBlobType()); - assertEquals(attr.copyId(), props.getCopyId()); - assertEquals(attr.copyStatus(), props.getCopyStatus()); - assertEquals(attr.copySource(), props.getCopySource()); - assertEquals(attr.copyProgress(), props.getCopyProgress()); - assertEquals(attr.copyCompletionTime(), props.getCopyCompletionTime()); - assertEquals(attr.copyStatusDescription(), props.getCopyStatusDescription()); - assertEquals(attr.isServerEncrypted(), props.isServerEncrypted()); - assertEquals(attr.accessTier(), props.getAccessTier()); - assertEquals(attr.isAccessTierInferred(), props.isAccessTierInferred()); - assertEquals(attr.archiveStatus(), props.getArchiveStatus()); - assertEquals(attr.accessTierChangeTime(), props.getAccessTierChangeTime()); - assertEquals(attr.metadata(), props.getMetadata()); - - // Suppliers, used in FileSystemProvider.readAttributes(String). Unlike the consumers used for setting - // properties, we test these here rather than on the FileSystemProvider because there are so many of them and - // it's more feasible this way rather than having a test for each method like the consumers. - assertEquals(suppliers.get("size").get(), props.getBlobSize()); - assertEquals(suppliers.get("lastModifiedTime").get(), FileTime.from(props.getLastModified().toInstant())); - assertEquals(suppliers.get("creationTime").get(), FileTime.from(props.getCreationTime().toInstant())); - assertEquals(suppliers.get("eTag").get(), props.getETag()); - BlobHttpHeaders supplierHeaders = (BlobHttpHeaders) suppliers.get("blobHttpHeaders").get(); - assertEquals(supplierHeaders.getContentType(), props.getContentType()); - assertArraysEqual(supplierHeaders.getContentMd5(), props.getContentMd5()); - assertEquals(supplierHeaders.getContentLanguage(), props.getContentLanguage()); - assertEquals(supplierHeaders.getContentEncoding(), props.getContentEncoding()); - assertEquals(supplierHeaders.getContentDisposition(), props.getContentDisposition()); - assertEquals(supplierHeaders.getCacheControl(), props.getCacheControl()); - assertEquals(suppliers.get("blobType").get(), props.getBlobType()); - assertEquals(suppliers.get("copyId").get(), props.getCopyId()); - assertEquals(suppliers.get("copyStatus").get(), props.getCopyStatus()); - assertEquals(suppliers.get("copySource").get(), props.getCopySource()); - assertEquals(suppliers.get("copyProgress").get(), props.getCopyProgress()); - assertEquals(suppliers.get("copyCompletionTime").get(), props.getCopyCompletionTime()); - assertEquals(suppliers.get("copyStatusDescription").get(), props.getCopyStatusDescription()); - assertEquals(suppliers.get("isServerEncrypted").get(), props.isServerEncrypted()); - assertEquals(suppliers.get("accessTier").get(), props.getAccessTier()); - assertEquals(suppliers.get("isAccessTierInferred").get(), props.isAccessTierInferred()); - assertEquals(suppliers.get("archiveStatus").get(), props.getArchiveStatus()); - assertEquals(suppliers.get("accessTierChangeTime").get(), props.getAccessTierChangeTime()); - assertEquals(suppliers.get("metadata").get(), props.getMetadata()); - } - - @Test - public void azureBlobFileAttributeViewReadFSClosed() throws IOException { - Path path = fs.getPath(generateBlobName()); - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> new AzureBlobFileAttributeView(path).readAttributes()); - } - - @ParameterizedTest - @MethodSource("azureBlobFileAttributeViewSetBlobHttpHeadersSupplier") - public void azureBlobFileAttributeViewSetBlobHttpHeaders(String cacheControl, String contentDisposition, - String contentEncoding, String contentLanguage, byte[] contentMD5, String contentType) throws IOException { - AzureBlobFileAttributeView view = new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())); - BlobHttpHeaders headers = new BlobHttpHeaders().setCacheControl(cacheControl) - .setContentDisposition(contentDisposition) - .setContentEncoding(contentEncoding) - .setContentLanguage(contentLanguage) - .setContentMd5(contentMD5) - .setContentType(contentType); - - view.setBlobHttpHeaders(headers); - BlobProperties response = bc.getProperties(); - - assertEquals(cacheControl, response.getCacheControl()); - assertEquals(contentDisposition, response.getContentDisposition()); - assertEquals(contentEncoding, response.getContentEncoding()); - assertEquals(contentLanguage, response.getContentLanguage()); - assertArraysEqual(contentMD5, response.getContentMd5()); - assertEquals(contentType, response.getContentType()); - } - - private static Stream azureBlobFileAttributeViewSetBlobHttpHeadersSupplier() - throws NoSuchAlgorithmException { - return Stream.of( - Arguments.of(null, null, null, null, null, null), - Arguments.of("control", "disposition", "encoding", "language", - Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "typr") - ); - } - - @Test - public void azureBlobFileAttributeViewSetHeadersFSClosed() throws IOException { - Path path = fs.getPath(generateBlobName()); - fs.close(); - - assertThrows(ClosedFileSystemException.class, - () -> new AzureBlobFileAttributeView(path).setBlobHttpHeaders(new BlobHttpHeaders())); - } - - @ParameterizedTest - @CsvSource(value = {"null,null,null,null", "foo,bar,fizz,buzz", - "i0,a,i_,a" /* Test culture sensitive word sort */}, nullValues = "null") - public void azureBlobFileAttributeViewSetMetadata(String key1, String value1, String key2, String value2) - throws IOException { - AzureBlobFileAttributeView view = new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())); - Map metadata = new HashMap(); - if (key1 != null && value1 != null) { - metadata.put(key1, value1); - } - if (key2 != null && value2 != null) { - metadata.put(key2, value2); - } - - view.setMetadata(metadata); - - assertEquals(metadata, bc.getProperties().getMetadata()); - } - - @Test - public void azureBlobFileAttributeViewSetMetadataFSClosed() throws IOException { - Path path = fs.getPath(generateBlobName()); - fs.close(); - - assertThrows(ClosedFileSystemException.class, - () -> new AzureBlobFileAttributeView(path).setMetadata(Collections.emptyMap())); - } - - @ParameterizedTest - @MethodSource("azureBlobFileAttributeViewSetTierSupplier") - public void azureBlobFileAttributeViewSetTier(AccessTier tier) throws IOException { - new AzureBlobFileAttributeView(fs.getPath(bc.getBlobName())).setTier(tier); - - assertEquals(tier, bc.getProperties().getAccessTier()); - } - - private static Stream azureBlobFileAttributeViewSetTierSupplier() { - // We don't test archive because it takes a while to take effect and testing HOT and COOL demonstrates that the - // tier is successfully being passed to the underlying client. - return Stream.of(AccessTier.HOT, AccessTier.COOL); - } - - @Test - public void azureBlobFileAttributeViewSetTierFSClosed() throws IOException { - Path path = fs.getPath(generateBlobName()); - fs.close(); - - assertThrows(ClosedFileSystemException.class, - () -> new AzureBlobFileAttributeView(path).setTier(AccessTier.HOT)); - } - - @ParameterizedTest - @MethodSource("attributeViewSetTimesUnsupportedSupplier") - public void attributeViewSetTimesUnsupported(FileTime t1, FileTime t2, FileTime t3) { - Path path = fs.getPath(bc.getBlobName()); - AzureBlobFileAttributeView blobView = new AzureBlobFileAttributeView(path); - AzureBasicFileAttributeView basicView = new AzureBasicFileAttributeView(path); - - assertThrows(UnsupportedOperationException.class, () -> blobView.setTimes(t1, t2, t3)); - assertThrows(UnsupportedOperationException.class, () -> basicView.setTimes(t1, t2, t3)); - } - - private static Stream attributeViewSetTimesUnsupportedSupplier() { - return Stream.of( - Arguments.of(FileTime.fromMillis(System.currentTimeMillis()), null, null), - Arguments.of(null, FileTime.fromMillis(System.currentTimeMillis()), null), - Arguments.of(null, null, FileTime.fromMillis(System.currentTimeMillis())) - ); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java deleted file mode 100644 index 0c75f780567..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureDirectoryStreamTests.java +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.core.test.TestMode; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.IOException; -import java.nio.file.ClosedFileSystemException; -import java.nio.file.DirectoryIteratorException; -import java.nio.file.DirectoryStream; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.IntStream; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class AzureDirectoryStreamTests extends BlobNioTestBase { - private AzureFileSystem fs; - - @Override - protected void beforeTest() { - super.beforeTest(); - fs = createFS(initializeConfigMap()); - } - - @ParameterizedTest - @CsvSource(value = {"0,true", "5,true", "6000,true", "5,false"}) - public void listFiles(int numFiles, boolean absolute) throws IOException { - if (numFiles > 50 && getTestMode() != TestMode.LIVE) { - return; // Skip large data set in record and playback - } - String rootName = absolute ? getNonDefaultRootDir(fs) : ""; - String dirName = generateBlobName(); - Map resources = new ConcurrentHashMap<>(); - IntStream.range(0, numFiles).parallel().forEach(i -> { - AzureResource resource = null; - try { - resource = new AzureResource(fs.getPath(rootName, dirName, generateBlobName())); - } catch (IOException e) { - throw new RuntimeException(e); - } - resources.put(resource.getPath(), resource); - resource.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - }); - - Iterator iterator = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), entry -> true) - .iterator(); - - if (numFiles > 0) { - // Check that repeated hasNext calls returns true and doesn't affect the results of next() - assertTrue(iterator.hasNext()); - assertTrue(iterator.hasNext()); - } - - for (int i = 0; i < numFiles; i++) { - assertTrue(iterator.hasNext()); - assertNotNull(resources.remove(iterator.next())); - } - - assertFalse(iterator.hasNext()); - assertThrows(NoSuchElementException.class, iterator::next); - } - - // If listing results include directories, they should not be recursively listed. Only immediate children are - // returned. - @ParameterizedTest - @CsvSource(value = {"true,false", "false,false", "false,true"}) - public void listDirectories(boolean virtual, boolean isEmpty) throws IOException { - // The path to list against - AzureResource listResource = new AzureResource(fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - // The only expected result of the listing - AzureResource listResultResource = new AzureResource(listResource.getPath().resolve(generateBlobName())); - if (!virtual) { - listResource.putDirectoryBlob(null); - listResultResource.putDirectoryBlob(null); - } - - // Put some children under listResultResource. These should not be returned. - if (!isEmpty) { - for (int i = 0; i < 3; i++) { - ((AzurePath) listResultResource.getPath().resolve(generateBlobName())).toBlobClient() - .getBlockBlobClient().commitBlockList(Collections.emptyList()); - } - } - - Iterator iterator = new AzureDirectoryStream(listResource.getPath(), path -> true).iterator(); - - assertTrue(iterator.hasNext()); - assertEquals(listResultResource.getPath().toString(), iterator.next().toString()); - assertFalse(iterator.hasNext()); - } - - @ParameterizedTest - @ValueSource(ints = {0, 1, 3}) - public void listFilesDepth(int depth) throws IOException { - AzurePath listingPath = (AzurePath) fs.getPath(getNonDefaultRootDir(fs), getPathWithDepth(depth)); - - AzureResource filePath = new AzureResource(listingPath.resolve(generateBlobName())); - filePath.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - - AzureResource concreteDirEmptyPath = new AzureResource(listingPath.resolve(generateBlobName())); - concreteDirEmptyPath.putDirectoryBlob(null); - - AzureResource concreteDirNonEmptyPath = new AzureResource(listingPath.resolve(generateBlobName())); - concreteDirNonEmptyPath.putDirectoryBlob(null); - - AzureResource concreteDirChildPath = new AzureResource(concreteDirNonEmptyPath.getPath() - .resolve(generateBlobName())); - concreteDirChildPath.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - - AzureResource virtualDirPath = new AzureResource(listingPath.resolve(generateBlobName())); - AzureResource virtualDirChildPath = new AzureResource(virtualDirPath.getPath().resolve(generateBlobName())); - virtualDirChildPath.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - - List expectedListResults = new ArrayList<>(Arrays.asList(filePath.getPath().toString(), - concreteDirEmptyPath.getPath().toString(), concreteDirNonEmptyPath.getPath().toString(), - virtualDirPath.getPath().toString())); - - for (Path path : new AzureDirectoryStream(listingPath, path -> true)) { - assertTrue(expectedListResults.remove(path.toString())); - } - assertEquals(0, expectedListResults.size()); - } - - @Test - public void iteratorDuplicateCallsFail() throws IOException { - AzureDirectoryStream stream = new AzureDirectoryStream((AzurePath) fs.getPath(generateBlobName()), - path -> true); - stream.iterator(); - - assertThrows(IllegalStateException.class, stream::iterator); - } - - @Test - public void nextHasNextFailAfterClose() throws IOException { - String rootName = getNonDefaultRootDir(fs); - String dirName = generateBlobName(); - for (int i = 0; i < 3; i++) { - new AzureResource(fs.getPath(rootName, dirName, generateBlobName())) - .getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - } - - DirectoryStream stream = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), - path -> true); - Iterator iterator = stream.iterator(); - - // There are definitely items we haven't returned from the iterator, but they are inaccessible after closing. - stream.close(); - - assertFalse(iterator.hasNext()); - assertThrows(NoSuchElementException.class, iterator::next); - } - - @Test - public void hasNextFailAfterFSClose() throws IOException { - Path path = fs.getPath(generateBlobName()); - putDirectoryBlob(rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(path.getFileName().toString()) - .getBlockBlobClient()); - DirectoryStream stream = fs.provider().newDirectoryStream(path, null); - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> stream.iterator().hasNext()); - } - - @Test - public void filter() throws IOException { - String rootName = getNonDefaultRootDir(fs); - String dirName = generateBlobName(); - for (int i = 0; i < 3; i++) { - new AzureResource(fs.getPath(rootName, dirName, i + generateBlobName())) - .getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - } - - Iterator iterator = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), - path -> path.getFileName().toString().startsWith("0")).iterator(); - - assertTrue(iterator.hasNext()); - assertTrue(iterator.next().getFileName().toString().startsWith("0")); - assertFalse(iterator.hasNext()); - } - - @Test - public void filterException() throws IOException { - String rootName = getNonDefaultRootDir(fs); - String dirName = generateBlobName(); - for (int i = 0; i < 3; i++) { - new AzureResource(fs.getPath(rootName, dirName, i + generateBlobName())) - .getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - } - AzureDirectoryStream stream = new AzureDirectoryStream((AzurePath) fs.getPath(rootName, dirName), - entry -> { - throw new IOException("Test exception"); - }); - - DirectoryIteratorException e = assertThrows(DirectoryIteratorException.class, - () -> stream.iterator().hasNext()); - assertEquals("Test exception", e.getCause().getMessage()); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java deleted file mode 100644 index 8cc4d86d6f9..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileStoreTests.java +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.IOException; -import java.nio.file.FileStore; -import java.nio.file.attribute.BasicFileAttributeView; -import java.nio.file.attribute.FileAttributeView; -import java.nio.file.attribute.FileStoreAttributeView; -import java.nio.file.attribute.PosixFileAttributeView; -import java.util.Map; -import java.util.stream.Stream; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; - -public class AzureFileStoreTests extends BlobNioTestBase { - private AzureFileSystem fs; - - // Just need one fs instance for creating the stores. - @Override - public void beforeTest() { - super.beforeTest(); - Map config = initializeConfigMap(); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, - ENV.getPrimaryAccount().getCredential()); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName() + "," + generateContainerName()); - try { - fs = new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getBlobEndpoint(), - config); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - // The constructor is implicitly tested by creating a file system. - @Test - public void name() throws IOException { - String name = generateContainerName(); - - assertEquals(name, new AzureFileStore(fs, name, false).name()); - } - - @Test - public void type() { - assertEquals("AzureBlobContainer", fs.getFileStores().iterator().next().type()); - } - - @Test - public void isReadOnly() { - assertFalse(fs.getFileStores().iterator().next().isReadOnly()); - } - - @Test - public void space() throws IOException { - FileStore store = fs.getFileStores().iterator().next(); - - assertEquals(Long.MAX_VALUE, store.getTotalSpace()); - assertEquals(Long.MAX_VALUE, store.getUsableSpace()); - assertEquals(Long.MAX_VALUE, store.getUnallocatedSpace()); - } - - @ParameterizedTest - @MethodSource("supportsFileAttributeViewSupplier") - public void supportsFileAttributeView(Class view, String viewName, boolean supports) { - FileStore store = fs.getFileStores().iterator().next(); - - assertEquals(supports, store.supportsFileAttributeView(view)); - assertEquals(supports, store.supportsFileAttributeView(viewName)); - } - - private static Stream supportsFileAttributeViewSupplier() { - return Stream.of( - Arguments.of(BasicFileAttributeView.class, "basic", true), - Arguments.of(AzureBlobFileAttributeView.class, "azureBlob", true), - Arguments.of(AzureBasicFileAttributeView.class, "azureBasic", true), - Arguments.of(PosixFileAttributeView.class, "posix", false) - ); - } - - @Test - public void getFileStoreAttributeView() { - FileStore store = fs.getFileStores().iterator().next(); - - assertNull(store.getFileStoreAttributeView(FileStoreAttributeView.class)); - assertThrows(UnsupportedOperationException.class, () -> store.getAttribute("basic:size")); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java deleted file mode 100644 index 4055382fed3..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemProviderTests.java +++ /dev/null @@ -1,1437 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.core.http.HttpHeaders; -import com.azure.core.http.HttpMethod; -import com.azure.core.http.HttpPipelineCallContext; -import com.azure.core.http.HttpPipelineNextPolicy; -import com.azure.core.http.HttpRequest; -import com.azure.core.http.HttpResponse; -import com.azure.core.http.policy.HttpPipelinePolicy; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.storage.blob.BlobClient; -import com.azure.storage.blob.BlobContainerClient; -import com.azure.storage.blob.models.AccessTier; -import com.azure.storage.blob.models.BlobErrorCode; -import com.azure.storage.blob.models.BlobHttpHeaders; -import com.azure.storage.blob.models.BlobProperties; -import com.azure.storage.blob.models.BlockListType; -import com.azure.storage.blob.specialized.AppendBlobClient; -import com.azure.storage.blob.specialized.BlockBlobClient; -import com.azure.storage.common.StorageSharedKeyCredential; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.EnabledIf; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.EnumSource; -import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; -import reactor.core.publisher.Mono; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.io.UncheckedIOException; -import java.net.URI; -import java.nio.ByteBuffer; -import java.nio.channels.SeekableByteChannel; -import java.nio.charset.StandardCharsets; -import java.nio.file.AccessDeniedException; -import java.nio.file.AccessMode; -import java.nio.file.ClosedFileSystemException; -import java.nio.file.DirectoryNotEmptyException; -import java.nio.file.FileAlreadyExistsException; -import java.nio.file.FileSystem; -import java.nio.file.FileSystemAlreadyExistsException; -import java.nio.file.FileSystemNotFoundException; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.nio.file.StandardOpenOption; -import java.nio.file.attribute.BasicFileAttributeView; -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.DosFileAttributeView; -import java.nio.file.attribute.DosFileAttributes; -import java.nio.file.attribute.FileAttribute; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Arrays; -import java.util.Base64; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.stream.Stream; - -import static com.azure.core.test.utils.TestUtils.assertArraysEqual; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@SuppressWarnings("resource") -public class AzureFileSystemProviderTests extends BlobNioTestBase { - Map config; - private AzureFileSystemProvider provider; - - // The following are common among a large number of copy tests - private AzurePath sourcePath; - private AzurePath destPath; - private BlobClient sourceClient; - private BlobClient destinationClient; - - @Override - protected void beforeTest() { - super.beforeTest(); - config = initializeConfigMap(); - provider = new AzureFileSystemProvider(); - } - - @Test - public void createFileSystem() throws IOException { - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); - URI uri = getFileSystemUri(); - provider.newFileSystem(uri, config); - - assertTrue(provider.getFileSystem(uri).isOpen()); - assertEquals(primaryBlobServiceClient.getAccountUrl(), - ((AzureFileSystem) provider.getFileSystem(uri)).getFileSystemUrl()); - } - - @ParameterizedTest - @ValueSource(strings = {"azc://path", "azb://path", "azb://?foo=bar", "azb://?account="}) - public void createFileSystemInvalidUri(String uri) { - assertThrows(IllegalArgumentException.class, () -> provider.newFileSystem(new URI(uri), config)); - } - - @Test - public void createFileSystemDuplicate() throws IOException { - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); - provider.newFileSystem(getFileSystemUri(), config); - - assertThrows(FileSystemAlreadyExistsException.class, () -> provider.newFileSystem(getFileSystemUri(), config)); - } - - @Test - public void createFileSystemInitialCheckFail() { - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); - byte[] badKey = ENV.getPrimaryAccount().getKey().getBytes(StandardCharsets.UTF_8); - badKey[0]++; - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, - new StorageSharedKeyCredential(ENV.getPrimaryAccount().getName(), new String(badKey))); - - assertThrows(IOException.class, () -> provider.newFileSystem(getFileSystemUri(), config)); - assertThrows(FileSystemNotFoundException.class, () -> provider.getFileSystem(getFileSystemUri())); - } - - @Test - public void getFileSystemNotFound() { - assertThrows(FileSystemNotFoundException.class, () -> provider.getFileSystem(getFileSystemUri())); - } - - @ParameterizedTest - @ValueSource(strings = {"azc://path", "azb://path", "azb://?foo=bar", "azb://?account="}) - public void getFileSystemIa(String uri) { - assertThrows(IllegalArgumentException.class, () -> provider.getFileSystem(new URI(uri))); - } - - // TODO: Be sure to test operating on containers that already have data - // all apis should have a test that tries them after the FileSystem is closed to ensure they throw. - @Test - public void getScheme() { - assertEquals("azb", provider.getScheme()); - } - - @ParameterizedTest - @ValueSource(ints = {0, 1, 2}) - public void createDirParentExists(int depth) throws IOException { - AzureFileSystem fs = createFS(config); - - // Generate resource names. - // Don't use default directory to ensure we honor the root. - String rootName = getNonDefaultRootDir(fs); - String parent = getPathWithDepth(depth); - String dirPathStr = parent + generateBlobName(); - - Path dirPath = fs.getPath(rootName, dirPathStr); - - // Generate clients to resources. Create resources as necessary - BlobContainerClient containerClient = rootNameToContainerClient(rootName); - /* - In this case, we are putting the blob in the root directory, i.e. directly in the container, so no need to - create a blob. - */ - if (!"".equals(parent)) { - containerClient.getBlobClient(parent).getAppendBlobClient().create(); - } - BlobClient dirClient = containerClient.getBlobClient(dirPathStr); - fs.provider().createDirectory(dirPath); - - checkBlobIsDir(dirClient); - } - - @Test - public void createDirRelativePath() throws IOException { - AzureFileSystem fs = createFS(config); - String fileName = generateBlobName(); - BlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName); - - // Relative paths are resolved against the default directory - fs.provider().createDirectory(fs.getPath(fileName)); - - checkBlobIsDir(blobClient); - } - - @Test - public void createDirFileAlreadyExists() { - AzureFileSystem fs = createFS(config); - String fileName = generateBlobName(); - BlockBlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName) - .getBlockBlobClient(); - blobClient.commitBlockList(Collections.emptyList(), false); - - // Will go to default directory - assertThrows(FileAlreadyExistsException.class, () -> fs.provider().createDirectory(fs.getPath(fileName))); - } - - @Test - public void createDirConcreteDirAlreadyExists() { - AzureFileSystem fs = createFS(config); - String fileName = generateBlobName(); - BlockBlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName) - .getBlockBlobClient(); - putDirectoryBlob(blobClient); - - assertThrows(FileAlreadyExistsException.class, () -> fs.provider().createDirectory(fs.getPath(fileName))); - } - - @Test - public void createDirVirtualDirAlreadyExists() throws IOException { - AzureFileSystem fs = createFS(config); - String fileName = generateBlobName(); - BlobContainerClient containerClient = rootNameToContainerClient(getDefaultDir(fs)); - BlobClient blobClient = containerClient.getBlobClient(fileName); - AppendBlobClient blobClient2 = containerClient.getBlobClient(fileName + fs.getSeparator() + generateBlobName()) - .getAppendBlobClient(); - blobClient2.create(); - fs.provider().createDirectory(fs.getPath(fileName)); - - assertTrue(blobClient.exists()); // We will turn the directory from virtual to concrete - checkBlobIsDir(blobClient); - } - - @Test - public void createDirRoot() { - AzureFileSystem fs = createFS(config); - - assertThrows(IllegalArgumentException.class, () -> fs.provider().createDirectory(fs.getDefaultDirectory())); - } - - @Test - public void createDirNoParent() { - AzureFileSystem fs = createFS(config); - - // Parent doesn't exists. - assertThrows(IOException.class, () -> fs.provider() - .createDirectory(fs.getPath(generateBlobName() + fs.getSeparator() + generateBlobName()))); - } - - @Test - public void createDirInvalidRoot() { - AzureFileSystem fs = createFS(config); - - assertThrows(IOException.class, - () -> fs.provider().createDirectory(fs.getPath("fakeRoot:" + fs.getSeparator() + generateBlobName()))); - } - - @Test - public void createDirAttributes() throws NoSuchAlgorithmException, IOException { - AzureFileSystem fs = createFS(config); - String fileName = generateBlobName(); - AppendBlobClient blobClient = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName) - .getAppendBlobClient(); - byte[] contentMd5 = MessageDigest.getInstance("MD5").digest(new byte[0]); - FileAttribute[] attributes = new FileAttribute[]{ - new TestFileAttribute<>("fizz", "buzz"), - new TestFileAttribute<>("foo", "bar"), - new TestFileAttribute<>("Content-Type", "myType"), - new TestFileAttribute<>("Content-Disposition", "myDisposition"), - new TestFileAttribute<>("Content-Language", "myLanguage"), - new TestFileAttribute<>("Content-Encoding", "myEncoding"), - new TestFileAttribute<>("Cache-Control", "myControl"), - new TestFileAttribute<>("Content-MD5", contentMd5) - }; - - fs.provider().createDirectory(fs.getPath(fileName), attributes); - BlobProperties props = blobClient.getProperties(); - - assertEquals("buzz", props.getMetadata().get("fizz")); - assertEquals("bar", props.getMetadata().get("foo")); - assertFalse(props.getMetadata().containsKey("Content-Type")); - assertFalse(props.getMetadata().containsKey("Content-Disposition")); - assertFalse(props.getMetadata().containsKey("Content-Language")); - assertFalse(props.getMetadata().containsKey("Content-Encoding")); - assertFalse(props.getMetadata().containsKey("Content-MD5")); - assertFalse(props.getMetadata().containsKey("Cache-Control")); - assertEquals("myType", props.getContentType()); - assertEquals("myDisposition", props.getContentDisposition()); - assertEquals("myLanguage", props.getContentLanguage()); - assertEquals("myEncoding", props.getContentEncoding()); - assertArraysEqual(contentMd5, props.getContentMd5()); - assertEquals("myControl", props.getCacheControl()); - } - - @Test - public void createDirFSClosed() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().createDirectory(path)); - } - - @ParameterizedTest - @CsvSource(value = {"false,false,false", "true,true,false", "true,false,true", "true,false,false"}) - public void copySource(boolean sourceIsDir, boolean sourceIsVirtual, boolean sourceEmpty) throws IOException { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - // Generate resource names. - // Don't use default directory to ensure we honor the root. - AppendBlobClient sourceChildClient = null; - AppendBlobClient destChildClient = null; - - // Create resources as necessary - if (sourceIsDir) { - if (!sourceIsVirtual) { - fs.provider().createDirectory(sourcePath); - } - if (!sourceEmpty) { - String sourceChildName = generateBlobName(); - sourceChildClient = ((AzurePath) sourcePath.resolve(sourceChildName)).toBlobClient() - .getAppendBlobClient(); - sourceChildClient.create(); - destChildClient = ((AzurePath) destPath.resolve(sourceChildName)).toBlobClient() - .getAppendBlobClient(); - } - } else { // source is file - sourceClient.upload(DATA.getDefaultBinaryData()); - } - - fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES); - - // Check the source still exists. - if (!sourceIsVirtual) { - assertTrue(sourceClient.exists()); - } else { - assertTrue(new AzureResource(sourcePath).checkDirectoryExists()); - } - - // If the source was a file, check that the destination data matches the source. - if (!sourceIsDir) { - ByteArrayOutputStream outStream = new ByteArrayOutputStream(); - destinationClient.download(outStream); - assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); - } else { - // Check that the destination directory is concrete. - assertTrue(destinationClient.exists()); - checkBlobIsDir(destinationClient); - if (!sourceEmpty) { - // Check that source child still exists and was not copied to the destination. - assertTrue(sourceChildClient.exists()); - assertFalse(destChildClient.exists()); - } - } - } - - @ParameterizedTest - @CsvSource(value = {"false,false", "true,false", "true,true"}) - public void copyDestination(boolean destinationExists, boolean destinationIsDir) throws IOException { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - // Create resources as necessary - sourceClient.upload(DATA.getDefaultBinaryData()); - if (destinationExists) { - if (destinationIsDir) { - fs.provider().createDirectory(destPath); - } else { // source is file - destinationClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); - } - } - fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES, - StandardCopyOption.REPLACE_EXISTING); - - assertTrue(sourceClient.exists()); - ByteArrayOutputStream outStream = new ByteArrayOutputStream(); - destinationClient.download(outStream); - assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void copyNonEmptyDest(boolean destinationIsVirtual) throws IOException { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - // Create resources as necessary - sourceClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); - if (!destinationIsVirtual) { - fs.provider().createDirectory(destPath); - } - BlobClient destChildClient = ((AzurePath) destPath.resolve(generateBlobName())).toBlobClient(); - destChildClient.upload(DATA.getDefaultBinaryData()); - - // Ensure that even when trying to replace_existing, we still fail. - assertThrows(DirectoryNotEmptyException.class, () -> fs.provider().copy(sourcePath, destPath, - StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING)); - assertTrue(new AzureResource(destPath).checkDirectoryExists()); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void copyReplaceExistingFail(boolean destinationIsDir) throws IOException { - // The success case is tested by the "copy destination" test. - // Testing replacing a virtual directory is in the "non empty dest" test as there can be no empty virtual dir. - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - // Create resources as necessary - sourceClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); - if (destinationIsDir) { - fs.provider().createDirectory(destPath); - } else { - destinationClient.upload(DATA.getDefaultBinaryData()); - } - - assertThrows(FileAlreadyExistsException.class, - () -> fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES)); - if (destinationIsDir) { - assertTrue(new AzureResource(destPath).checkDirectoryExists()); - } else { - ByteArrayOutputStream outStream = new ByteArrayOutputStream(); - destinationClient.download(outStream); - assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); - } - } - - @Test - public void copyOptionsFail() { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - assertThrows(UnsupportedOperationException.class, () -> fs.provider().copy(sourcePath, destPath)); - assertThrows(UnsupportedOperationException.class, () -> fs.provider().copy(sourcePath, destPath, - StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.ATOMIC_MOVE)); - } - - @ParameterizedTest - @CsvSource(value = {"1,1", "1,2", "1,3", "2,1", "2,2", "2,3", "3,1", "3,2", "3,3"}) - public void copyDepth(int sourceDepth, int destDepth) throws IOException { - AzureFileSystem fs = createFS(config); - - // Generate resource names. - // Don't use default directory to ensure we honor the root. - String rootName = getNonDefaultRootDir(fs); - AzurePath sourcePath = (AzurePath) fs.getPath(rootName, getPathWithDepth(sourceDepth), generateBlobName()); - - String destParent = getPathWithDepth(destDepth); - AzurePath destPath = (AzurePath) fs.getPath(rootName, destParent, generateBlobName()); - - // Generate clients to resources. - BlobClient sourceClient = sourcePath.toBlobClient(); - BlobClient destinationClient = destPath.toBlobClient(); - BlobClient destParentClient = ((AzurePath) destPath.getParent()).toBlobClient(); - - // Create resources as necessary - sourceClient.upload(DATA.getDefaultBinaryData()); - putDirectoryBlob(destParentClient.getBlockBlobClient()); - - fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES); - - ByteArrayOutputStream outStream = new ByteArrayOutputStream(); - destinationClient.download(outStream); - assertArraysEqual(DATA.getDefaultBytes(), outStream.toByteArray()); - } - - @Test - public void copyNoParentForDest() throws IOException { - AzureFileSystem fs = createFS(config); - // Generate resource names. - // Don't use default directory to ensure we honor the root. - String rootName = getNonDefaultRootDir(fs); - AzurePath sourcePath = (AzurePath) fs.getPath(rootName, generateBlobName()); - AzurePath destPath = (AzurePath) fs.getPath(rootName, generateBlobName(), generateBlobName()); - - // Generate clients to resources. - BlobClient sourceClient = sourcePath.toBlobClient(); - BlobClient destinationClient = destPath.toBlobClient(); - - // Create resources as necessary - sourceClient.upload(new ByteArrayInputStream(getRandomByteArray(20)), 20); - - assertThrows(IOException.class, () -> fs.provider().copy(sourcePath, destPath, - StandardCopyOption.COPY_ATTRIBUTES)); - assertFalse(destinationClient.exists()); - } - - @Test - public void copySourceDoesNotExist() { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - assertThrows(IOException.class, () -> fs.provider().copy(sourcePath, destPath, - StandardCopyOption.COPY_ATTRIBUTES)); - } - - @Test - public void copyNoRootDir() { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - // Source root - assertThrows(IllegalArgumentException.class, () -> fs.provider().copy(sourcePath.getRoot(), destPath, - StandardCopyOption.COPY_ATTRIBUTES)); - - // Dest root - assertThrows(IllegalArgumentException.class, () -> fs.provider().copy(sourcePath, destPath.getRoot(), - StandardCopyOption.COPY_ATTRIBUTES)); - } - - @Test - public void copySameFileNoop() { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - - // Even when the source does not exist or COPY_ATTRIBUTES is not specified, this will succeed as no-op - assertDoesNotThrow(() -> fs.provider().copy(sourcePath, sourcePath)); - } - - @Test - public void copyAcrossContainers() throws IOException { - AzureFileSystem fs = createFS(config); - - // Generate resource names. - AzurePath sourcePath = (AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName()); - AzurePath destPath = (AzurePath) fs.getPath(getDefaultDir(fs), generateBlobName()); - - // Generate clients to resources. - BlobClient sourceClient = sourcePath.toBlobClient(); - BlobClient destinationClient = destPath.toBlobClient(); - - // Create resources as necessary - sourceClient.upload(DATA.getDefaultBinaryData()); - fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES); - - assertTrue(sourceClient.exists()); - assertTrue(destinationClient.exists()); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void copyClosedFS(boolean sourceClosed) throws IOException { - AzureFileSystem fs = createFS(config); - basicSetupForCopyTest(fs); - AzureFileSystem fsDest = createFS(config); - Path destPath = fsDest.getPath(getDefaultDir(fsDest), generateBlobName()); - sourceClient.upload(DATA.getDefaultBinaryData()); - - if (sourceClosed) { - fs.close(); - } else { - fsDest.close(); - } - - assertThrows(ClosedFileSystemException.class, - () -> fs.provider().copy(sourcePath, destPath, StandardCopyOption.COPY_ATTRIBUTES)); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void delete(boolean isDir) throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); - - if (isDir) { - putDirectoryBlob(blobClient); - } else { - blobClient.upload(DATA.getDefaultBinaryData()); - } - - fs.provider().delete(path); - - assertFalse(blobClient.exists()); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void deleteNonEmptyDir(boolean virtual) throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); - BlobClient childClient = ((AzurePath) path.resolve(generateBlobName())).toBlobClient(); - - childClient.upload(DATA.getDefaultBinaryData()); - if (!virtual) { - putDirectoryBlob(blobClient); - } - - assertThrows(DirectoryNotEmptyException.class, () -> fs.provider().delete(path)); - assertTrue(new AzureResource(path).checkDirectoryExists()); - } - - @Test - public void deleteNoTarget() { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - - assertThrows(NoSuchFileException.class, () -> fs.provider().delete(path)); - } - - @Test - public void deleteDefaultDir() throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(generateBlobName())); - BlobClient client = path.toBlobClient(); - - client.upload(DATA.getDefaultBinaryData()); - fs.provider().delete(path); - - assertFalse(client.exists()); - } - - @Test - public void deleteClosedFS() throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); - putDirectoryBlob(blobClient); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().delete(path)); - } - - @Test - public void directoryStream() throws IOException { - AzureFileSystem fs = createFS(config); - AzureResource resource = new AzureResource(fs.getPath("a" + generateBlobName())); - resource.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - resource = new AzureResource(fs.getPath("b" + generateBlobName())); - resource.getBlobClient().getBlockBlobClient().commitBlockList(Collections.emptyList()); - - Iterator iterator = fs.provider().newDirectoryStream(fs.getPath(getDefaultDir(fs)), - path -> path.getFileName().toString().startsWith("a")).iterator(); - - assertTrue(iterator.hasNext()); - assertTrue(iterator.next().getFileName().toString().startsWith("a")); - assertFalse(iterator.hasNext()); - } - - @Test - public void directoryStreamInvalidRoot() { - AzureFileSystem fs = createFS(config); - - assertThrows(IOException.class, () -> fs.provider().newDirectoryStream(fs.getPath("fakeRoot:"), path -> true)); - } - - @Test - public void directoryStreamClosedFS() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(getDefaultDir(fs)); - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().newDirectoryStream(path, null)); - } - - @Test - public void inputStreamDefault() throws IOException { - AzureFileSystem fs = createFS(config); - sourcePath = (AzurePath) fs.getPath(generateBlobName()); - sourceClient = sourcePath.toBlobClient(); - sourceClient.upload(DATA.getDefaultBinaryData()); - - compareInputStreams(fs.provider().newInputStream(sourcePath), DATA.getDefaultInputStream(), - DATA.getDefaultDataSize()); - } - - @ParameterizedTest - @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "CREATE", "CREATE_NEW", "DELETE_ON_CLOSE", - "DSYNC", "SPARSE", "SYNC", "TRUNCATE_EXISTING", "WRITE"}) - public void inputStreamOptionsFail(StandardOpenOption option) { - AzureFileSystem fs = createFS(config); - - // Options are validated before path is validated. - assertThrows(UnsupportedOperationException.class, - () -> fs.provider().newInputStream(fs.getPath("foo"), option)); - } - - @Test - public void inputStreamNonFileFailRoot() { - AzureFileSystem fs = createFS(config); - - assertThrows(IllegalArgumentException.class, () -> fs.provider().newInputStream(fs.getPath(getDefaultDir(fs)))); - } - - @Test - public void inputStreamNonFileFailDir() { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - putDirectoryBlob(bc); - - assertThrows(IOException.class, () -> fs.provider().newInputStream(fs.getPath(bc.getBlobName()))); - } - - @Test - public void inputStreamNonFileFailNoFile() { - AzureFileSystem fs = createFS(config); - - assertThrows(IOException.class, () -> fs.provider().newInputStream(fs.getPath("foo"))); - } - - @Test - public void inputStreamFSClosed() throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); - blobClient.upload(DATA.getDefaultBinaryData()); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().newInputStream(path)); - } - - @Test - public void outputStreamOptionsDefault() throws IOException { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - OutputStream nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName())); - - // Defaults should allow us to create a new file. - nioStream.write(DATA.getDefaultBytes()); - nioStream.close(); - - compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); - - // Defaults should allow us to open to an existing file and overwrite the destination. - byte[] randomData = getRandomByteArray(100); - nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName())); - nioStream.write(randomData); - nioStream.close(); - - compareInputStreams(bc.openInputStream(), new ByteArrayInputStream(randomData), 100); - } - - @Test - public void outputStreamOptionsCreate() { - // Create works both on creating new and opening existing. We test these scenarios above. - // Here we assert that we cannot create without this option (i.e. you are only allowed to overwrite, not create) - AzureFileSystem fs = createFS(config); - - // Explicitly exclude a create option. - assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), - StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); - } - - @Test - public void outputStreamOptionsCreateNew() throws IOException { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - - // Succeed in creating a new file - OutputStream nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName()), - StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - nioStream.write(DATA.getDefaultBytes()); - nioStream.close(); - - compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); - - // Fail in overwriting an existing - assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(bc.getBlobName()), - StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); - } - - @Test - public void outputStreamOptionsMissingRequired() { - AzureFileSystem fs = createFS(config); - - // Missing WRITE - assertThrows(IllegalArgumentException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), - StandardOpenOption.TRUNCATE_EXISTING)); - - // Missing TRUNCATE_EXISTING and CREATE_NEW - assertThrows(IllegalArgumentException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), - StandardOpenOption.WRITE)); - - // Missing only TRUNCATE_EXISTING - assertDoesNotThrow(() -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), StandardOpenOption.WRITE, - StandardOpenOption.CREATE_NEW)); - - // Missing only CREATE_NEW - assertDoesNotThrow(() -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE)); - } - - @ParameterizedTest - @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "DELETE_ON_CLOSE", "DSYNC", "READ", "SPARSE", - "SYNC"}) - public void outputStreamOptionsInvalid(StandardOpenOption option) { - AzureFileSystem fs = createFS(config); - - assertThrows(UnsupportedOperationException.class, () -> fs.provider().newOutputStream( - fs.getPath(generateBlobName()), option, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); - } - - @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") - @ParameterizedTest - @CsvSource(value = {"60,0", "150,3"}) - public void outputStreamFileSystemConfig(int dataSize, int blockCount) throws IOException { - config.put(AzureFileSystem.AZURE_STORAGE_UPLOAD_BLOCK_SIZE, 50L); - config.put(AzureFileSystem.AZURE_STORAGE_PUT_BLOB_THRESHOLD, 100L); - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - OutputStream nioStream = fs.provider().newOutputStream(fs.getPath(bc.getBlobName())); - byte[] data = getRandomByteArray(dataSize); - - nioStream.write(data); - nioStream.close(); - - assertEquals(blockCount, bc.listBlocks(BlockListType.COMMITTED).getCommittedBlocks().size()); - } - - @Test - public void outputSteamOpenDirectoryFail() { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - putDirectoryBlob(bc); - - assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(bc.getBlobName()))); - } - - @Test - public void outputStreamClosedFS() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().newOutputStream(path)); - } - - @Test - public void byteChannelDefault() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - Files.createFile(path); - - SeekableByteChannel channel = fs.provider().newByteChannel(path, null); - - // This indicates the channel is open in read mode, which is the default - assertDoesNotThrow(() -> channel.read(ByteBuffer.allocate(1))); - } - - @ParameterizedTest - @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "DELETE_ON_CLOSE", "DSYNC", "SPARSE", "SYNC"}) - public void byteChannelOptionsFail(StandardOpenOption option) { - AzureFileSystem fs = createFS(config); - - // Options are validated before path is validated. - assertThrows(UnsupportedOperationException.class, - () -> fs.provider().newByteChannel(fs.getPath("foo"), new HashSet<>(Arrays.asList(option)))); - } - - @Test - public void byteChannelReadNonFileFailRoot() { - AzureFileSystem fs = createFS(config); - - assertThrows(IllegalArgumentException.class, - () -> fs.provider().newByteChannel(fs.getPath(getDefaultDir(fs)), null)); - } - - @Test - public void byteChannelReadFileFailDir() { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - putDirectoryBlob(bc); - - assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), null)); - } - - @Test - public void byteChannelReadNonFileFailNoFile() { - AzureFileSystem fs = createFS(config); - - assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath("foo"), null)); - } - - @Test - public void byteChannelFSClosed() throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - path.toBlobClient().getBlockBlobClient().upload(DATA.getDefaultBinaryData()); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().newByteChannel(path, null)); - } - - @Test - public void byteChannelOptionsCreate() throws IOException { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - - // There are no default options for write as read is the default for channel. We must specify all required. - SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE, - StandardOpenOption.TRUNCATE_EXISTING))); - - // Create should allow us to create a new file. - nioChannel.write(DATA.getDefaultData().duplicate()); - nioChannel.close(); - - compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); - - // Explicitly exclude a create option. - assertThrows(IOException.class, () -> fs.provider().newOutputStream(fs.getPath(generateBlobName()), - StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); - } - - @Test - public void byteChannelOptionsCreateNew() throws IOException { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - - // Succeed in creating a new file - SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING))); - nioChannel.write(DATA.getDefaultData().duplicate()); - nioChannel.close(); - - compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); - - // Fail in overwriting an existing file - assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING)))); - } - - @Test - public void byteChannelFileAttributes() throws NoSuchAlgorithmException, IOException { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - byte[] contentMd5 = MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes()); - FileAttribute[] attributes = new FileAttribute[]{ - new TestFileAttribute<>("fizz", "buzz"), - new TestFileAttribute<>("foo", "bar"), - new TestFileAttribute<>("Content-Type", "myType"), - new TestFileAttribute<>("Content-Disposition", "myDisposition"), - new TestFileAttribute<>("Content-Language", "myLanguage"), - new TestFileAttribute<>("Content-Encoding", "myEncoding"), - new TestFileAttribute<>("Cache-Control", "myControl"), - new TestFileAttribute<>("Content-MD5", contentMd5) - }; - - SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, - StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)), attributes); - nioChannel.write(DATA.getDefaultData().duplicate()); - nioChannel.close(); - BlobProperties props = bc.getProperties(); - - compareInputStreams(bc.openInputStream(), DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); - assertEquals("buzz", props.getMetadata().get("fizz")); - assertEquals("bar", props.getMetadata().get("foo")); - assertFalse(props.getMetadata().containsKey("Content-Type")); - assertFalse(props.getMetadata().containsKey("Content-Disposition")); - assertFalse(props.getMetadata().containsKey("Content-Language")); - assertFalse(props.getMetadata().containsKey("Content-Encoding")); - assertFalse(props.getMetadata().containsKey("Content-MD5")); - assertFalse(props.getMetadata().containsKey("Cache-Control")); - assertEquals("myType", props.getContentType()); - assertEquals("myDisposition", props.getContentDisposition()); - assertEquals("myLanguage", props.getContentLanguage()); - assertEquals("myEncoding", props.getContentEncoding()); - assertArraysEqual(contentMd5, props.getContentMd5()); - assertEquals("myControl", props.getCacheControl()); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void byteChannelFileAttrNullEmpty(boolean isNull) throws IOException { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - ByteBuffer data = DATA.getDefaultData().duplicate(); - - SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING)), isNull ? null : new FileAttribute[0]); - assertDoesNotThrow(() -> nioChannel.write(data)); - assertDoesNotThrow(nioChannel::close); - } - - @Test - public void byteChannelWriteOptionsMissingRequired() { - AzureFileSystem fs = createFS(config); - - // Missing WRITE - assertThrows(UnsupportedOperationException.class, () -> fs.provider().newByteChannel( - fs.getPath(generateBlobName()), new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, - StandardOpenOption.TRUNCATE_EXISTING)))); - - // Missing TRUNCATE_EXISTING and CREATE_NEW - assertThrows(IllegalArgumentException.class, () -> fs.provider().newByteChannel(fs.getPath(generateBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE)))); - - // Missing TRUNCATE_EXISTING - assertDoesNotThrow(() -> fs.provider().newByteChannel(fs.getPath(generateBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)))); - - assertDoesNotThrow(() -> fs.provider().newByteChannel(fs.getPath(generateBlobName()), new HashSet<>( - Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE)))); - } - - @ParameterizedTest - @EnumSource(value = StandardOpenOption.class, names = {"APPEND", "DELETE_ON_CLOSE", "DSYNC", "READ", "SPARSE", - "SYNC"}) - public void byteChannelOptionsInvalid(StandardOpenOption option) { - AzureFileSystem fs = createFS(config); - - assertThrows(UnsupportedOperationException.class, () -> fs.provider().newOutputStream( - fs.getPath(generateBlobName()), option, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); - } - - @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") - @ParameterizedTest - @CsvSource(value = {"60,0", "150,3"}) - public void byteChannelFileSystemConfig(int dataSize, int blockCount) throws IOException { - config.put(AzureFileSystem.AZURE_STORAGE_UPLOAD_BLOCK_SIZE, 50L); - config.put(AzureFileSystem.AZURE_STORAGE_PUT_BLOB_THRESHOLD, 100L); - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - SeekableByteChannel nioChannel = fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE, - StandardOpenOption.TRUNCATE_EXISTING))); - - nioChannel.write(getRandomData(dataSize)); - nioChannel.close(); - - assertEquals(blockCount, bc.listBlocks(BlockListType.COMMITTED).getCommittedBlocks().size()); - } - - @Test - public void byteChannelOpenDirectoryFail() { - AzureFileSystem fs = createFS(config); - BlockBlobClient bc = rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(generateBlobName()) - .getBlockBlobClient(); - putDirectoryBlob(bc); - - assertThrows(IOException.class, () -> fs.provider().newByteChannel(fs.getPath(bc.getBlobName()), - new HashSet<>(Arrays.asList(StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)))); - } - - @Test - public void byteChannelClosedFS() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().newByteChannel(path, null)); - } - - @Test - public void checkAccess() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.provider().newOutputStream(path).close(); - - assertDoesNotThrow(() -> fs.provider().checkAccess(path)); - } - - @Test - public void checkAccessRoot() { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(getDefaultDir(fs)); - - assertDoesNotThrow(() -> fs.provider().checkAccess(path)); - } - - @ParameterizedTest - @EnumSource(value = AccessMode.class, names = {"READ", "WRITE", "EXECUTE"}) - public void checkAccessAccessDenied(AccessMode mode) throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.provider().newOutputStream(path).close(); - - assertThrows(AccessDeniedException.class, () -> fs.provider().checkAccess(path, mode)); - } - - @Test - public void checkAccessIOException() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.provider().newOutputStream(path).close(); - fs.close(); - - config = initializeConfigMap(new CheckAccessIoExceptionPolicy()); - fs = createFS(config); - path = fs.getPath(path.toString()); - - AzureFileSystem finalFs = fs; - Path finalPath = path; - IOException e = assertThrows(IOException.class, () -> finalFs.provider().checkAccess(finalPath)); - assertFalse(e instanceof NoSuchFileException); - } - - class CheckAccessIoExceptionPolicy implements HttpPipelinePolicy { - @Override - public Mono process(HttpPipelineCallContext httpPipelineCallContext, - HttpPipelineNextPolicy httpPipelineNextPolicy) { - HttpRequest request = httpPipelineCallContext.getHttpRequest(); - // GetProperties call to blob - if (request.getUrl().getPath().split("/").length == 3 && request.getHttpMethod() == (HttpMethod.HEAD)) { - return Mono.just(new MockHttpResponse(request, 403, new HttpHeaders() - .set("x-ms-error-code", BlobErrorCode.AUTHORIZATION_FAILURE.toString()))); - } else { - return httpPipelineNextPolicy.process(); - } - } - } - - @Test - public void checkAccessNoFile() { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - - assertThrows(NoSuchFileException.class, () -> fs.provider().checkAccess(path)); - } - - @Test - public void checkAccessFSClosed() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.provider().newOutputStream(path).close(); - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().checkAccess(path)); - } - - @ParameterizedTest - @ValueSource(classes = {BasicFileAttributeView.class, AzureBasicFileAttributeView.class, - AzureBlobFileAttributeView.class}) - public void getAttributeView(Class type) { - Class expected = type == AzureBlobFileAttributeView.class - ? AzureBlobFileAttributeView.class : BasicFileAttributeView.class; - AzureFileSystem fs = createFS(config); - - // No path validation is expected for getting the view - assertInstanceOf(expected, fs.provider().getFileAttributeView(fs.getPath("path"), type)); - } - - @Test - public void getAttributeViewFail() { - AzureFileSystem fs = createFS(config); - - // No path validation is expected for getting the view - assertNull(fs.provider().getFileAttributeView(fs.getPath("path"), DosFileAttributeView.class)); - } - - @ParameterizedTest - @ValueSource(classes = {BasicFileAttributes.class, AzureBasicFileAttributes.class, AzureBlobFileAttributes.class}) - public void readAttributes(Class type) throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.provider().newOutputStream(path).close(); - - Class expected = type.equals(AzureBlobFileAttributes.class) - ? AzureBlobFileAttributes.class : AzureBasicFileAttributes.class; - - assertInstanceOf(expected, fs.provider().readAttributes(path, type)); - } - - @Test - public void readAttributesDirectory() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); - - assertDoesNotThrow(() -> fs.provider().readAttributes(path, BasicFileAttributes.class)); - } - - @Test - public void readAttributesUnsupported() { - AzureFileSystem fs = createFS(config); - - assertThrows(UnsupportedOperationException.class, - () -> fs.provider().readAttributes(fs.getPath("path"), DosFileAttributes.class)); - } - - @Test - public void readAttributesIOException() { - AzureFileSystem fs = createFS(config); - - // Path doesn't exist. - assertThrows(IOException.class, - () -> fs.provider().readAttributes(fs.getPath("path"), BasicFileAttributes.class)); - } - - @Test - public void readAttributesFSClosed() throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - path.toBlobClient().getBlockBlobClient().upload(DATA.getDefaultBinaryData()); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, - () -> fs.provider().readAttributes(path, AzureBasicFileAttributes.class)); - } - - @ParameterizedTest - @MethodSource("readAttributesStrParsingSupplier") - public void readAttributesStrParsing(String attrStr, List attrList) throws IOException { - // This test checks that we correctly parse the attribute string and that all the requested attributes are - // represented in the return value. We can also just test a subset of attributes for parsing logic. - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.provider().newOutputStream(path).close(); - - Map result = fs.provider().readAttributes(path, attrStr); - for (String attr : attrList) { - assertTrue(result.containsKey(attr)); - } - assertEquals(attrList.size(), result.keySet().size()); - } - - private static Stream readAttributesStrParsingSupplier() { - List basic = Arrays.asList("lastModifiedTime", "creationTime", "isRegularFile", "isDirectory", - "isVirtualDirectory", "isSymbolicLink", "isOther", "size"); - return Stream.of( - Arguments.of("*", basic), - Arguments.of("basic:*", basic), - Arguments.of("azureBasic:*", basic), - Arguments.of("azureBlob:*", Arrays.asList("lastModifiedTime", "creationTime", "eTag", "blobHttpHeaders", - "blobType", "copyId", "copyStatus", "copySource", "copyProgress", "copyCompletionTime", - "copyStatusDescription", "isServerEncrypted", "accessTier", "isAccessTierInferred", "archiveStatus", - "accessTierChangeTime", "metadata", "isRegularFile", "isDirectory", "isVirtualDirectory", - "isSymbolicLink", "isOther", "size")), - Arguments.of("lastModifiedTime,creationTime", Arrays.asList("lastModifiedTime", "creationTime")), - Arguments.of("basic:isRegularFile,isDirectory,isVirtualDirectory", - Arrays.asList("isRegularFile", "isDirectory", "isVirtualDirectory")), - Arguments.of("azureBasic:size", Collections.singletonList("size")), - Arguments.of("azureBlob:eTag,blobHttpHeaders,blobType,copyId", - Arrays.asList("eTag", "blobHttpHeaders", "blobType", "copyId")) - ); - } - - @Test - public void readAttributesStrDirectory() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); - - assertDoesNotThrow(() -> fs.provider().readAttributes(path, "creationTime")); - } - - @ParameterizedTest - @ValueSource(strings = {"azureBlob:size:foo", "", "azureBasic:foo"}) - public void readAttributesStrIA(String attrStr) { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - - assertThrows(IllegalArgumentException.class, () -> fs.provider().readAttributes(path, attrStr)); - } - - @Test - public void readAttributesStrInvalidView() { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - - assertThrows(UnsupportedOperationException.class, () -> fs.provider().readAttributes(path, "foo:size")); - } - - @Test - public void readAttributesStrIOException() { - AzureFileSystem fs = createFS(config); - - // Path doesn't exist - assertThrows(IOException.class, () -> fs.provider().readAttributes(fs.getPath("path"), "basic:creationTime")); - } - - @Test - public void readAtrributesStrClosedFS() throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); - blobClient.upload(DATA.getDefaultBinaryData()); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> fs.provider().readAttributes(path, "basic:*")); - } - - @ParameterizedTest - @MethodSource("setAttributesHeadersSupplier") - public void setAttributesHeaders(String cacheControl, String contentDisposition, String contentEncoding, - String contentLanguage, byte[] contentMD5, String contentType) throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - fs.provider().newOutputStream(path).close(); - BlobHttpHeaders headers = new BlobHttpHeaders().setCacheControl(cacheControl) - .setContentDisposition(contentDisposition) - .setContentEncoding(contentEncoding) - .setContentLanguage(contentLanguage) - .setContentMd5(contentMD5) - .setContentType(contentType); - - fs.provider().setAttribute(path, "azureBlob:blobHttpHeaders", headers); - headers = fs.provider().readAttributes(path, AzureBlobFileAttributes.class).blobHttpHeaders(); - - assertEquals(cacheControl, headers.getCacheControl()); - assertEquals(contentDisposition, headers.getContentDisposition()); - assertEquals(contentEncoding, headers.getContentEncoding()); - assertEquals(contentLanguage, headers.getContentLanguage()); - assertArraysEqual(contentMD5, headers.getContentMd5()); - assertEquals(contentType, headers.getContentType()); - } - - private static Stream setAttributesHeadersSupplier() throws NoSuchAlgorithmException { - return Stream.of( - Arguments.of(null, null, null, null, null, null), - Arguments.of("control", "disposition", "encoding", "language", - Base64.getEncoder().encode(MessageDigest.getInstance("MD5").digest(DATA.getDefaultBytes())), "type") - ); - } - - @ParameterizedTest - @CsvSource(value = {"null,null,null,null,200", "foo,bar,fizz,buzz,200", "i0,a,i_,a,200"}, nullValues = "null") - public void setAttributesMetadata(String key1, String value1, String key2, String value2) throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - OutputStream os = fs.provider().newOutputStream(path); - os.close(); - - Map metadata = new HashMap<>(); - if (key1 != null && value1 != null) { - metadata.put(key1, value1); - } - if (key2 != null && value2 != null) { - metadata.put(key2, value2); - } - - fs.provider().setAttribute(path, "azureBlob:metadata", metadata); - - assertEquals(metadata, fs.provider().readAttributes(path, AzureBlobFileAttributes.class).metadata()); - } - - @ParameterizedTest - @MethodSource("setAttributesTierSupplier") - public void setAttributesTier(AccessTier tier) throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - OutputStream os = fs.provider().newOutputStream(path); - os.close(); - - fs.provider().setAttribute(path, "azureBlob:tier", tier); - - assertEquals(tier, fs.provider().readAttributes(path, AzureBlobFileAttributes.class).accessTier()); - } - - private static Stream setAttributesTierSupplier() { - return Stream.of(AccessTier.HOT, AccessTier.COOL); - } - - @Test - public void setAttributesDirectory() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - putDirectoryBlob(new AzureResource(path).getBlobClient().getBlockBlobClient()); - - assertDoesNotThrow(() -> fs.provider().setAttribute(path, "azureBlob:tier", AccessTier.COOL)); - } - - @ParameterizedTest - @ValueSource(strings = { - "azureBlob:metadata:foo", // Invalid format - "", // empty - "azureBasic:foo" // Invalid property - }) - public void setAttribuesIA(String attrStr) { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - - assertThrows(IllegalArgumentException.class, () -> fs.provider().setAttribute(path, attrStr, "Foo")); - } - - @Test - public void setAttributesInvalidView() { - AzureFileSystem fs = createFS(config); - Path path = fs.getPath(generateBlobName()); - - assertThrows(UnsupportedOperationException.class, () -> fs.provider().setAttribute(path, "foo:size", "foo")); - } - - @Test - public void setAttributesIOException() { - AzureFileSystem fs = createFS(config); - - // Path does not exist - // Covers virtual directory, too - assertThrows(IOException.class, - () -> fs.provider().setAttribute(fs.getPath("path"), "azureBlob:metadata", Collections.emptyMap())); - } - - @Test - public void setAttributesFSClosed() throws IOException { - AzureFileSystem fs = createFS(config); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), generateBlobName())); - BlockBlobClient blobClient = path.toBlobClient().getBlockBlobClient(); - blobClient.upload(DATA.getDefaultBinaryData()); - - fs.close(); - - assertThrows(ClosedFileSystemException.class, - () -> fs.provider().setAttribute(path, "azureBlob:blobHttpHeaders", new BlobHttpHeaders())); - } - - private void basicSetupForCopyTest(FileSystem fs) { - // Generate resource names. - // Don't use default directory to ensure we honor the root. - String rootName = getNonDefaultRootDir(fs); - sourcePath = (AzurePath) fs.getPath(rootName, generateBlobName()); - destPath = (AzurePath) fs.getPath(rootName, generateBlobName()); - - // Generate clients to resources. - try { - sourceClient = sourcePath.toBlobClient(); - destinationClient = destPath.toBlobClient(); - } catch (IOException ex) { - throw new UncheckedIOException(ex); - } - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java deleted file mode 100644 index 0a1048a7247..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureFileSystemTests.java +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.core.credential.AzureSasCredential; -import com.azure.core.util.CoreUtils; -import com.azure.storage.common.sas.AccountSasPermission; -import com.azure.storage.common.sas.AccountSasResourceType; -import com.azure.storage.common.sas.AccountSasService; -import com.azure.storage.common.sas.AccountSasSignatureValues; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.IOException; -import java.net.URI; -import java.nio.file.FileSystem; -import java.nio.file.FileSystemNotFoundException; -import java.nio.file.InvalidPathException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class AzureFileSystemTests extends BlobNioTestBase { - private Map config; - - @Override - protected void beforeTest() { - super.beforeTest(); - config = initializeConfigMap(); - } - - // We do not have a meaningful way of testing the configurations for the ServiceClient. - @ParameterizedTest - @CsvSource(value = {"1,false,false", "3,false,true", "3,true,false", "3,true,true"}) - public void create(int numContainers, boolean createContainers, boolean sasToken) throws IOException { - List containerNames = IntStream.range(0, numContainers) - .mapToObj(i -> generateContainerName()) - .collect(Collectors.toList()); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, CoreUtils.stringJoin(",", containerNames)); - if (!sasToken) { - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); - } else { - config.put(AzureFileSystem.AZURE_STORAGE_SAS_TOKEN_CREDENTIAL, new AzureSasCredential( - primaryBlobServiceClient.generateAccountSas( - new AccountSasSignatureValues(testResourceNamer.now().plusDays(2), - AccountSasPermission.parse("rwcdl"), new AccountSasService().setBlobAccess(true), - new AccountSasResourceType().setContainer(true))))); - } - - AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), - ENV.getPrimaryAccount().getBlobEndpoint(), config); - - - List actualContainerNames = new ArrayList<>(); - fileSystem.getFileStores().forEach(fs -> actualContainerNames.add(fs.name())); - - assertEquals(containerNames.size(), actualContainerNames.size()); - for (String containerName : containerNames) { - assertTrue(actualContainerNames.contains(containerName)); - assertTrue(primaryBlobServiceClient.getBlobContainerClient(containerName).exists()); - } - assertEquals(primaryBlobServiceAsyncClient.getAccountUrl(), fileSystem.getFileSystemUrl()); - } - - @ParameterizedTest - @CsvSource(value = {"true,false", "false,true"}) - public void createFailIa(boolean credential, boolean containers) { - if (containers) { - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); - } - if (credential) { - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getKey()); - } - - assertThrows(IllegalArgumentException.class, - () -> new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getName(), config)); - } - - @Test - public void createFailContainerCheck() { - config.put(AzureFileSystem.AZURE_STORAGE_SAS_TOKEN_CREDENTIAL, new AzureSasCredential( - primaryBlobServiceClient.generateAccountSas( - new AccountSasSignatureValues(testResourceNamer.now().plusDays(2), - AccountSasPermission.parse("d"), new AccountSasService().setBlobAccess(true), - new AccountSasResourceType().setContainer(true))))); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); - - assertThrows(IOException.class, () -> new AzureFileSystem(new AzureFileSystemProvider(), - ENV.getPrimaryAccount().getBlobEndpoint(), config)); - } - - @Test - public void createSkipContainerCheck() { - config.put(AzureFileSystem.AZURE_STORAGE_SAS_TOKEN_CREDENTIAL, new AzureSasCredential( - primaryBlobServiceClient.generateAccountSas( - new AccountSasSignatureValues(testResourceNamer.now().plusDays(2), - AccountSasPermission.parse("d"), new AccountSasService().setBlobAccess(true), - new AccountSasResourceType().setContainer(true))))); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); - config.put(AzureFileSystem.AZURE_STORAGE_SKIP_INITIAL_CONTAINER_CHECK, true); - - // This would fail, but we skipped the check - assertDoesNotThrow(() -> - new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getBlobEndpoint(), config)); - } - - @Test - public void close() throws IOException { - AzureFileSystemProvider provider = new AzureFileSystemProvider(); - URI uri = getFileSystemUri(); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName()); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); - FileSystem fileSystem = provider.newFileSystem(uri, config); - fileSystem.close(); - - assertFalse(fileSystem.isOpen()); - assertThrows(FileSystemNotFoundException.class, () -> provider.getFileSystem(uri)); - assertDoesNotThrow(fileSystem::close); // Closing twice should have no effect - - // Creating a file system with the same ID after the old one is closed should work. - assertDoesNotThrow(() -> provider.newFileSystem(uri, config)); - assertNotNull(provider.getFileSystem(uri)); - } - - @ParameterizedTest - @MethodSource("getPathSupplier") - public void getPath(String path0, List pathArr, String resultStr) { - String[] arr = pathArr == null ? null : Arrays.copyOf(pathArr.toArray(), pathArr.size(), String[].class); - - assertEquals(resultStr, createFS(config).getPath(path0, arr).toString()); - } - - private static Stream getPathSupplier() { - return Stream.of( - Arguments.of("foo", null, "foo"), - Arguments.of("foo/bar", null, "foo/bar"), - Arguments.of("/foo/", null, "foo"), - Arguments.of("/foo/bar/", null, "foo/bar"), - Arguments.of("foo", Collections.singletonList("bar"), "foo/bar"), - Arguments.of("foo/bar/fizz/buzz", null, "foo/bar/fizz/buzz"), - Arguments.of("foo", Arrays.asList("bar", "fizz", "buzz"), "foo/bar/fizz/buzz"), - Arguments.of("foo", Arrays.asList("bar/fizz", "buzz"), "foo/bar/fizz/buzz"), - Arguments.of("foo", Arrays.asList("bar", "fizz/buzz"), "foo/bar/fizz/buzz"), - Arguments.of("root:/foo", null, "root:/foo"), - Arguments.of("root:/foo", Collections.singletonList("bar"), "root:/foo/bar"), - Arguments.of("///root:////foo", Arrays.asList("//bar///fizz//", "buzz"), "root:/foo/bar/fizz/buzz"), - Arguments.of("root:/", null, "root:"), - Arguments.of("", null, "") - ); - } - - @ParameterizedTest - @ValueSource(strings = {"root1:/dir1:", "root1:/d:ir", ":root1:/dir", "root1::/dir", "root:1/dir", "root1/dir:", - "root1:/foo/bar/dir:"}) - public void getPathFail(String path) { - assertThrows(InvalidPathException.class, () -> createFS(config).getPath(path)); - } - - @Test - public void isReadOnlyGetSeparator() { - AzureFileSystem fs = createFS(config); - - assertFalse(fs.isReadOnly()); - assertEquals("/", fs.getSeparator()); - } - - @Test - public void getRootDirsGetFileStores() { - AzureFileSystem fs = createFS(config); - String[] containers = ((String) config.get(AzureFileSystem.AZURE_STORAGE_FILE_STORES)).split(","); - List fileStoreNames = new ArrayList<>(); - fs.getFileStores().forEach(store -> fileStoreNames.add(store.name())); - List rootDirectories = new ArrayList<>(); - fs.getRootDirectories().forEach(rootDirectories::add); - - assertEquals(containers.length, rootDirectories.size()); - assertEquals(containers.length, fileStoreNames.size()); - for (String container : containers) { - assertTrue(rootDirectories.contains(fs.getPath(container + ":"))); - assertTrue(fileStoreNames.contains(container)); - } - } - - @ParameterizedTest - @CsvSource(value = {"basic,true", "azureBasic,true", "azureBlob,true", "posix,false"}) - public void supportsFileAttributeView(String view, boolean supports) { - assertEquals(supports, createFS(config).supportedFileAttributeViews().contains(view)); - } - - @Test - public void getDefaultDirectory() { - AzureFileSystem fs = createFS(config); - - assertEquals( - ((String) config.get(AzureFileSystem.AZURE_STORAGE_FILE_STORES)).split(",")[0] + AzurePath.ROOT_DIR_SUFFIX, - fs.getDefaultDirectory().toString()); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java deleted file mode 100644 index d88f45ee36c..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzurePathTests.java +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.storage.blob.BlobClient; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.parallel.ResourceLock; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.file.FileSystemNotFoundException; -import java.nio.file.FileSystems; -import java.nio.file.Path; -import java.util.Iterator; -import java.util.Map; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - -@ResourceLock("AzurePathTest") -public class AzurePathTests extends BlobNioTestBase { - private AzureFileSystem fs; - - // Just need one fs instance for creating the paths. - @Override - public void beforeTest() { - super.beforeTest(); - Map config = initializeConfigMap(); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, ENV.getPrimaryAccount().getCredential()); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "jtcazurepath1,jtcazurepath2"); - try { - fs = (AzureFileSystem) new AzureFileSystemProvider().newFileSystem( - new URI("azb://?endpoint=" + ENV.getPrimaryAccount().getBlobEndpoint()), config); - } catch (IOException | URISyntaxException e) { - throw new RuntimeException(e); - } - } - - @Test - public void getFileSystem() { - Path path = fs.getPath("Foo"); - - assertEquals(fs, path.getFileSystem()); - } - - @ParameterizedTest - @CsvSource(value = {"foo,false,null", "foo/bar,false,null", "jtcazurepath1/foo,false,null", - "jtcazurepath1:/foo,true,jtcazurepath1:/", "fakeroot:/foo,true,fakeroot:/", - "jtcazurepath2:/,true,jtcazurepath2:/", "jtcazurepath2:,true,jtcazurepath2:/", "'',false,null"}, - nullValues = "null") - public void isAbsoluteGetRoot(String path, boolean absolute, String root) { - assertEquals(absolute, fs.getPath(path).isAbsolute()); - assertEquals((root == null ? null : fs.getPath(root)), fs.getPath(path).getRoot()); - } - - @ParameterizedTest - @CsvSource(value = {"root:/,null,null,0", "root:/foo,foo,root:,1", "root:/foo/bar,bar,root:/foo,2", - "foo,foo,null,1", "foo/,foo,null,1", "/foo,foo,null,1", "foo/bar,bar,foo,2", "foo/bar/baz,baz,foo/bar,3", - "foo/../bar/baz,baz,foo/../bar/,4", "foo/..,..,foo/,2", "foo/./bar,bar,foo/./,3", "foo/bar/.,.,foo/bar/,3", - "'','',null,1"}, nullValues = "null") - public void getFileNameGetParentGetNameCount(String path, String fileName, String parent, int nameCount) { - assertEquals((fileName == null ? null : fs.getPath(fileName)), fs.getPath(path).getFileName()); - assertEquals((parent == null ? null : fs.getPath(parent)), fs.getPath(path).getParent()); - assertEquals(nameCount, fs.getPath(path).getNameCount()); - } - - @ParameterizedTest - @CsvSource(value = {"0,foo", "1,bar", "2,baz"}) - public void getName(int index, String name) { - assertEquals(fs.getPath("root:/foo/bar/baz").getName(index), fs.getPath(name)); - assertEquals(fs.getPath("foo/bar/baz").getName(index), fs.getPath(name)); - } - - @ParameterizedTest - @ValueSource(ints = {-1, 2}) - public void getNameFail(int index) { - assertThrows(IllegalArgumentException.class, () -> fs.getPath("foo/bar").getName(index)); - - // Special case with no name elements - assertThrows(IllegalArgumentException.class, () -> fs.getPath("root:/").getName(0)); - } - - @ParameterizedTest - @CsvSource(value = {"0,1,foo", "0,3,foo/bar/fizz", "0,5,foo/bar/fizz/buzz/dir", "1,2,bar", "1,4,bar/fizz/buzz", - "1,5,bar/fizz/buzz/dir", "4,5,dir"}) - public void subPath(int begin, int end, String resultPath) { - assertEquals(fs.getPath(resultPath), fs.getPath("root:/foo/bar/fizz/buzz/dir").subpath(begin, end)); - assertEquals(fs.getPath(resultPath), fs.getPath("foo/bar/fizz/buzz/dir").subpath(begin, end)); - } - - // The javadocs define an equivalence between these two methods in special cases. - @Test - public void subPathGetParent() { - Path path = fs.getPath("foo/bar/fizz/buzz"); - - assertEquals(path.getParent(), path.subpath(0, path.getNameCount() - 1)); - } - - @ParameterizedTest - @CsvSource(value = {"-1,1", "5,5", "3,3", "3,1", "3,6"}) - public void subPathFail(int begin, int end) { - assertThrows(IllegalArgumentException.class, () -> fs.getPath("foo/bar/fizz/buzz/dir").subpath(begin, end)); - } - - @ParameterizedTest - @CsvSource(value = {"root:/foo,foo,false", "foo,root:/foo,false", "foo,foo,true", "root:/foo,root:/foo,true", - "root2:/foo,root:/foo,false", "root:/foo,root2:/foo,false", "foo/bar,foo,true", "foo/bar,foo/bar,true", - "foo/bar/fizz,foo,true", "foo/bar/fizz,f,false", "foo/bar/fizz,foo/bar/f,false", "foo,foo/bar,false", - "'',foo,false", "foo,'',false"}) - public void startsWith(String path, String otherPath, boolean startsWith) { - assertEquals(startsWith, fs.getPath(path).startsWith(fs.getPath(otherPath))); - assertEquals(startsWith, fs.getPath(path).startsWith(otherPath)); - - // If the paths are not from the same file system, false is always returned - assertFalse(fs.getPath("foo/bar").startsWith(FileSystems.getDefault().getPath("foo/bar"))); - } - - @ParameterizedTest - @CsvSource(value = {"root:/foo,foo,true", "foo,root:/foo,false", "foo,foo,true", "root:/foo,root:/foo,true", - "root2:/foo,root:/foo,false", "root:/foo,root2:/foo,false", "foo/bar,bar,true", "foo/bar,foo/bar,true", - "foo/bar/fizz,fizz,true", "foo/bar/fizz,z,false", "foo/bar/fizz,r/fizz,false", "foo,foo/bar,false", - "'',foo,false", "foo,'',false"}) - public void endsWith(String path, String otherPath, boolean endsWith) { - assertEquals(endsWith, fs.getPath(path).endsWith(fs.getPath(otherPath))); - assertEquals(endsWith, fs.getPath(path).endsWith(otherPath)); - - // If the paths are not from the same file system, false is always returned - assertFalse(fs.getPath("foo/bar").endsWith(FileSystems.getDefault().getPath("foo/bar"))); - } - - @ParameterizedTest - @CsvSource(value = {"foo/bar,foo/bar", ".,''", "..,..", "foo/..,''", "foo/bar/..,foo", "foo/../bar,bar", - "foo/./bar,foo/bar", "foo/bar/.,foo/bar", "foo/bar/fizz/../..,foo", "foo/bar/../fizz/.,foo/fizz", - "foo/../..,..", "foo/../../bar,../bar", "root:/foo/bar,root:/foo/bar", "root:/.,root:/", "root:/..,root:/", - "root:/../../..,root:/", "root:/foo/..,root:", "'',''"}) - public void normalize(String path, String resultPath) { - assertEquals(fs.getPath(resultPath), fs.getPath(path).normalize()); - } - - @ParameterizedTest - @CsvSource(value = {"foo/bar,root:/fizz/buzz,root:/fizz/buzz", "root:/foo/bar,root:/fizz/buzz,root:/fizz/buzz", - "foo/bar,'',foo/bar", "foo/bar,fizz/buzz,foo/bar/fizz/buzz", - "foo/bar/..,../../fizz/buzz,foo/bar/../../../fizz/buzz", - "root:/../foo/./,fizz/../buzz,root:/../foo/./fizz/../buzz", "'',foo/bar,foo/bar"}) - public void resolve(String path, String other, String resultPath) { - assertEquals(fs.getPath(resultPath), fs.getPath(path).resolve(fs.getPath(other))); - assertEquals(fs.getPath(resultPath), fs.getPath(path).resolve(other)); - } - - @ParameterizedTest - @CsvSource(value = {"foo,fizz,fizz", "foo/bar,root:/fizz,root:/fizz", "foo/bar,'',foo", "foo,'',''", "'',foo,foo", - "foo/bar,fizz,foo/fizz", "foo/bar/fizz,buzz/dir,foo/bar/buzz/dir", "root:/foo/bar,fizz,root:/foo/fizz", - "root:/foo,fizz,root:/fizz", "root:/,fizz,fizz"}) - public void resolveSibling(String path, String other, String resultPath) { - assertEquals(fs.getPath(resultPath), fs.getPath(path).resolveSibling(fs.getPath(other))); - assertEquals(fs.getPath(resultPath), fs.getPath(path).resolveSibling(other)); - } - - @ParameterizedTest - @CsvSource(value = {"foo/bar,foo/bar/fizz/buzz/,fizz/buzz,true", "foo/bar,foo/bar,'',true", - "root:/foo/bar,root:/foo/bar/fizz,fizz,false", "foo/dir,foo/fizz/buzz,../fizz/buzz,true", - "foo/bar/a/b/c,foo/bar/fizz,../../../fizz,true", "a/b/c,foo/bar/fizz,../../../foo/bar/fizz,true", - "foo/../bar,bar/./fizz,fizz,false", "root:,root:/foo/bar,foo/bar,false", "'',foo,foo,true", "foo,'',..,true"}) - public void relativize(String path, String other, String result, boolean equivalence) { - Path p = fs.getPath(path); - Path otherP = fs.getPath(other); - - assertEquals(fs.getPath(result), p.relativize(otherP)); - if (equivalence) { // Only applies when neither path has a root and both are normalized. - assertEquals(otherP, p.relativize(p.resolve(otherP))); - } - } - - @ParameterizedTest - @CsvSource(value = {"root:/foo/bar,foo/bar/fizz/buzz", "foo/bar,root:/foo/bar/fizz"}) - public void relativizeFail(String path, String other) { - assertThrows(IllegalArgumentException.class, () -> fs.getPath(path).relativize(fs.getPath(other))); - } - - @ParameterizedTest - @CsvSource(value = {"root:/foo/bar,root:/foo/bar", "foo/bar,jtcazurepath1:/foo/bar", "'',jtcazurepath1:"}) - public void toUriToAbsolute(String path, String expected) { - assertEquals(expected, fs.getPath(path).toAbsolutePath().toString()); - assertEquals(fs.provider().getScheme() + ":/" + expected, fs.getPath(path).toUri().toString()); - } - - @ParameterizedTest - @ValueSource(strings = {"root:/foo/bar", "foo/bar/fizz/buzz", "foo", "root:/"}) - public void iterator(String path) { - Path p = fs.getPath(path); - Iterator it = p.iterator(); - int i = 0; - - Iterator emptyIt = fs.getPath("").iterator(); - - while (it.hasNext()) { - assertEquals(p.getName(i), it.next()); - i++; - } - - assertEquals("", emptyIt.next().toString()); - assertFalse(emptyIt.hasNext()); - } - - @ParameterizedTest - @CsvSource(value = {"a/b/c,a/b,false", "a/b/c,foo/bar,false", "foo/bar,foo/bar,true", "'',foo,false"}) - public void compareToEquals(String path1, String path2, boolean equals) { - assertEquals(path1.compareTo(path2), fs.getPath(path1).compareTo(fs.getPath(path2))); - assertEquals(equals, fs.getPath(path1).equals(fs.getPath(path2))); - } - - @Test - public void compareToEqualsFails() { - Path path1 = fs.getPath("a/b"); - Path path2 = FileSystems.getDefault().getPath("a/b"); - - assertNotEquals(path1, path2); - assertThrows(ClassCastException.class, () -> path1.compareTo(path2)); - } - - @Test - public void getBlobClientRelative() throws IOException { - BlobClient client = ((AzurePath) fs.getPath("foo/bar")).toBlobClient(); - - assertEquals("foo/bar", client.getBlobName()); - assertEquals(rootNameToContainerName(getDefaultDir(fs)), client.getContainerName()); - } - - @Test - public void getBlobClientEmpty() { - assertThrows(IOException.class, () -> ((AzurePath) fs.getPath(getNonDefaultRootDir(fs))).toBlobClient()); - assertThrows(IOException.class, () -> ((AzurePath) fs.getPath("")).toBlobClient()); - } - - @Test - public void getBlobClientAbsolute() throws IOException { - Path path = fs.getPath(getNonDefaultRootDir(fs), "foo/bar"); - BlobClient client = ((AzurePath) path).toBlobClient(); - - assertEquals("foo/bar", client.getBlobName()); - assertEquals(rootNameToContainerName(getNonDefaultRootDir(fs)), client.getContainerName()); - } - - @Test - public void getBlobClientFail() { - // Can't get a client to a nonexistent root/container. - assertThrows(IOException.class, () -> ((AzurePath) fs.getPath("fakeRoot:", "foo/bar")).toBlobClient()); - } - - @ParameterizedTest - @CsvSource(value = { - "://myaccount.blob.core.windows.net/containername/blobname,containername:/blobname", - "://myaccount.blob.core.windows.net/containername/dirname/blobname,containername:/dirname/blobname", - "://myaccount.blob.core.windows.net/containername,containername:", - "://myaccount.blob.core.windows.net/,''", - }) - public void fromBlobUrl(String url, String path) throws URISyntaxException { - // Adjust the parameterized urls to point at real resources - String scheme = ENV.getPrimaryAccount().getBlobEndpoint().startsWith("https") ? "https" : "http"; - url = scheme + url; - url = url.replace("myaccount", ENV.getPrimaryAccount().getName()); - url = url.replace("containername", "jtcazurepath1"); - - path = path.replace("myaccount", ENV.getPrimaryAccount().getName()); - path = path.replace("containername", "jtcazurepath1"); - - AzurePath resultPath = AzurePath.fromBlobUrl((AzureFileSystemProvider) fs.provider(), url); - - assertEquals(fs, resultPath.getFileSystem()); - assertEquals(path, resultPath.toString()); - } - - @Test - public void fromBlobUrlNoOpenFileSystem() { - assertThrows(FileSystemNotFoundException.class, () -> AzurePath.fromBlobUrl(new AzureFileSystemProvider(), - "http://myaccount.blob.core.windows.net/container/blob")); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java deleted file mode 100644 index e1f3eb74708..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureResourceTests.java +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.storage.blob.BlobClient; -import com.azure.storage.blob.models.BlobErrorCode; -import com.azure.storage.blob.models.BlobProperties; -import com.azure.storage.blob.models.BlobRequestConditions; -import com.azure.storage.blob.models.BlobStorageException; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.attribute.FileAttribute; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.stream.Stream; - -import static com.azure.core.test.utils.TestUtils.assertArraysEqual; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; - -public class AzureResourceTests extends BlobNioTestBase { - private Map config; - - @Override - protected void beforeTest() { - super.beforeTest(); - config = initializeConfigMap(); - } - - @Test - public void constructor() throws IOException { - AzureFileSystem fs = createFS(config); - AzureResource resource = new AzureResource(fs.getPath(getNonDefaultRootDir(fs), "foo/bar")); - - assertEquals(getNonDefaultRootDir(fs) + "/foo/bar", resource.getPath().toString()); - assertEquals(resource.getPath().toBlobClient().getBlobUrl(), resource.getBlobClient().getBlobUrl()); - } - - @Test - public void noRoot() { - assertThrows(IllegalArgumentException.class, () -> new AzureResource(createFS(config).getPath("root:"))); - } - - @Test - public void instanceType() { - assertThrows(IllegalArgumentException.class, () -> new AzureResource(mock(Path.class))); - } - - @ParameterizedTest - @MethodSource("directoryStatusAndExistsSupplier") - public void directoryStatusAndExists(DirectoryStatus status, boolean isVirtual) throws IOException { - AzureFileSystem fs = createFS(config); - - // Generate resource names. - // In root1, the resource will be in the root. In root2, the resource will be several levels deep. Also - // root1 will be non-default directory and root2 is default directory. - AzurePath parentPath1 = (AzurePath) fs.getPath(rootNameToContainerName(getNonDefaultRootDir(fs)), - generateBlobName()); - AzurePath parentPath2 = (AzurePath) fs.getPath(getPathWithDepth(3), generateBlobName()); - - // Generate clients to resources. - BlobClient blobClient1 = parentPath1.toBlobClient(); - BlobClient blobClient2 = parentPath2.toBlobClient(); - BlobClient childClient1 = ((AzurePath) parentPath1.resolve(generateBlobName())).toBlobClient(); - BlobClient childClient2 = ((AzurePath) parentPath2.resolve(generateBlobName())).toBlobClient(); - - // Create resources as necessary - if (status == DirectoryStatus.NOT_A_DIRECTORY) { - blobClient1.upload(DATA.getDefaultBinaryData()); - blobClient2.upload(DATA.getDefaultBinaryData()); - } else if (status == DirectoryStatus.EMPTY) { - putDirectoryBlob(blobClient1.getBlockBlobClient()); - putDirectoryBlob(blobClient2.getBlockBlobClient()); - } else if (status == DirectoryStatus.NOT_EMPTY) { - if (!isVirtual) { - putDirectoryBlob(blobClient1.getBlockBlobClient()); - putDirectoryBlob(blobClient2.getBlockBlobClient()); - } - childClient1.upload(DATA.getDefaultBinaryData()); - childClient2.upload(DATA.getDefaultBinaryData()); - } - - boolean directoryExists = status == DirectoryStatus.EMPTY || status == DirectoryStatus.NOT_EMPTY; - assertEquals(status, new AzureResource(parentPath1).checkDirStatus()); - assertEquals(status, new AzureResource(parentPath2).checkDirStatus()); - assertEquals(directoryExists, new AzureResource(parentPath1).checkDirectoryExists()); - assertEquals(directoryExists, new AzureResource(parentPath2).checkDirectoryExists()); - } - - private static Stream directoryStatusAndExistsSupplier() { - return Stream.of(Arguments.of(DirectoryStatus.DOES_NOT_EXIST, false), - Arguments.of(DirectoryStatus.NOT_A_DIRECTORY, false), Arguments.of(DirectoryStatus.EMPTY, false), - Arguments.of(DirectoryStatus.NOT_EMPTY, true), Arguments.of(DirectoryStatus.NOT_EMPTY, false)); - } - - @Test - public void directoryStatusFilesWithSamePrefix() throws IOException { - AzureFileSystem fs = createFS(config); - // Create two files with same prefix. Both paths should have DirectoryStatus.NOT_A_DIRECTORY - String pathName = generateBlobName(); - Path path1 = fs.getPath("/foo/bar/" + pathName + ".txt"); - Path path2 = fs.getPath("/foo/bar/" + pathName + ".txt.backup"); - Files.createFile(path1); - Files.createFile(path2); - - assertEquals(DirectoryStatus.NOT_A_DIRECTORY, new AzureResource(path1).checkDirStatus()); - assertEquals(DirectoryStatus.NOT_A_DIRECTORY, new AzureResource(path2).checkDirStatus()); - } - - @Test - public void directoryStatusDirectoriesWithSamePrefix() throws IOException { - // Create two folders where one is a prefix of the others - AzureFileSystem fs = createFS(config); - String pathName = generateBlobName(); - String pathName2 = pathName + '2'; - Files.createDirectory(fs.getPath(pathName)); - Files.createDirectory(fs.getPath(pathName2)); - - // Both should be empty - assertEquals(DirectoryStatus.EMPTY, new AzureResource(fs.getPath(pathName)).checkDirStatus()); - assertEquals(DirectoryStatus.EMPTY, new AzureResource(fs.getPath(pathName2)).checkDirStatus()); - } - - @Test - public void directoryStatusFilesBetweenPrefixAndChild() throws IOException { - AzureFileSystem fs = createFS(config); - Path dirPath = fs.getPath(generateBlobName()); - Path childPath = fs.getPath(dirPath.toString(), generateBlobName()); - // Under an old listing scheme, it was possible for a file with the same name as a directory but with a trailing - // '+' to cut in between the parent and child in the listing as we did it and the listing may not register the - // child and erroneously return that the directory is empty. This ensures that listing is done in such a way as - // to account for this and return correctly that the directory is not empty. - Path middlePath = fs.getPath(dirPath + "+"); - - Files.createDirectory(dirPath); - Files.createFile(childPath); - Files.createFile(middlePath); - - assertEquals(DirectoryStatus.NOT_EMPTY, new AzureResource(dirPath).checkDirStatus()); - } - - @Test - public void parentDirExistsFalse() throws IOException { - assertFalse(new AzureResource(createFS(config).getPath(generateBlobName(), "bar")) - .checkParentDirectoryExists()); - } - - @Test - public void parentDirExistsVirtual() throws IOException { - AzureFileSystem fs = createFS(config); - String fileName = generateBlobName(); - String childName = generateBlobName(); - rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName + fs.getSeparator() + childName) - .getAppendBlobClient() - .create(); - - assertTrue(new AzureResource(fs.getPath(fileName, childName)).checkParentDirectoryExists()); - } - - @Test - public void parentDirExistsConcrete() throws IOException { - AzureFileSystem fs = createFS(config); - String fileName = generateBlobName(); - putDirectoryBlob(rootNameToContainerClient(getDefaultDir(fs)).getBlobClient(fileName).getBlockBlobClient()); - - assertTrue(new AzureResource(fs.getPath(fileName, "bar")).checkParentDirectoryExists()); - } - - @Test - public void parentDirExistsRoot() throws IOException { - // No parent means the parent is implicitly the default root, which always exists - assertTrue(new AzureResource(createFS(config).getPath("foo")).checkParentDirectoryExists()); - - } - - @Test - public void parentDirExistsNonDefaultRoot() throws IOException { - // Checks for a bug where we would check the wrong root container for existence on a path with depth > 1 - AzureFileSystem fs = createFS(config); - String rootName = getNonDefaultRootDir(fs); - rootNameToContainerClient(rootName).getBlobClient("fizz/buzz/bazz").getAppendBlobClient().create(); - - assertTrue(new AzureResource(fs.getPath(rootName, "fizz/buzz")).checkParentDirectoryExists()); - } - - @ParameterizedTest - @CsvSource(value = {"false,false", "true,false", "false,true", "true,true"}) - public void putDirectoryBlob(boolean metadata, boolean properties) throws IOException, NoSuchAlgorithmException { - AzureResource resource = new AzureResource(createFS(config).getPath(generateBlobName())); - byte[] contentMd5 = MessageDigest.getInstance("MD5").digest(new byte[0]); - List> attributes = new ArrayList<>(); - if (metadata) { - attributes.add(new TestFileAttribute<>("fizz", "buzz")); - attributes.add(new TestFileAttribute<>("foo", "bar")); - } - if (properties) { - attributes.add(new TestFileAttribute<>("Content-Type", "myType")); - attributes.add(new TestFileAttribute<>("Content-Disposition", "myDisposition")); - attributes.add(new TestFileAttribute<>("Content-Language", "myLanguage")); - attributes.add(new TestFileAttribute<>("Content-Encoding", "myEncoding")); - attributes.add(new TestFileAttribute<>("Cache-Control", "myControl")); - attributes.add(new TestFileAttribute<>("Content-MD5", contentMd5)); - } - - if (metadata || properties) { - resource.setFileAttributes(attributes); - } - resource.putDirectoryBlob(null); - checkBlobIsDir(resource.getBlobClient()); - BlobProperties props = resource.getBlobClient().getProperties(); - - if (metadata) { - assertEquals("buzz", props.getMetadata().get("fizz")); - assertEquals("bar", props.getMetadata().get("foo")); - assertFalse(props.getMetadata().containsKey("Content-Type")); - assertFalse(props.getMetadata().containsKey("Content-Disposition")); - assertFalse(props.getMetadata().containsKey("Content-Language")); - assertFalse(props.getMetadata().containsKey("Content-Encoding")); - assertFalse(props.getMetadata().containsKey("Content-MD5")); - assertFalse(props.getMetadata().containsKey("Cache-Control")); - } - if (properties) { - assertEquals("myType", props.getContentType()); - assertEquals("myDisposition", props.getContentDisposition()); - assertEquals("myLanguage", props.getContentLanguage()); - assertEquals("myEncoding", props.getContentEncoding()); - assertArraysEqual(contentMd5, props.getContentMd5()); - assertEquals("myControl", props.getCacheControl()); - } - } - - @ParameterizedTest - @MethodSource("putDirectoryBlobACSupplier") - public void putDirectoryBlobAC(OffsetDateTime modified, OffsetDateTime unmodified, String match, String noneMatch) - throws IOException { - AzureResource resource = new AzureResource(createFS(config).getPath(generateBlobName())); - resource.getBlobClient().upload(DATA.getDefaultBinaryData()); - match = setupBlobMatchCondition(resource.getBlobClient(), match); - resource.putDirectoryBlob(new BlobRequestConditions() - .setIfMatch(match) - .setIfNoneMatch(noneMatch) - .setIfModifiedSince(modified) - .setIfUnmodifiedSince(unmodified)); - - checkBlobIsDir(resource.getBlobClient()); - } - - private static Stream putDirectoryBlobACSupplier() { - return Stream.of(Arguments.of(null, null, null, null), Arguments.of(OLD_DATE, null, null, null), - Arguments.of(null, NEW_DATE, null, null), Arguments.of(null, null, RECEIVED_ETAG, null), - Arguments.of(null, null, null, GARBAGE_ETAG)); - } - - @ParameterizedTest - @MethodSource("putDirectoryBlobACFailSupplier") - public void putDirectoryBlobACFail(OffsetDateTime modified, OffsetDateTime unmodified, String match, - String noneMatch) throws IOException { - AzureResource resource = new AzureResource(createFS(config).getPath(generateBlobName())); - resource.getBlobClient().upload(DATA.getDefaultBinaryData()); - noneMatch = setupBlobMatchCondition(resource.getBlobClient(), noneMatch); - BlobRequestConditions bac = new BlobRequestConditions() - .setIfMatch(match) - .setIfNoneMatch(noneMatch) - .setIfModifiedSince(modified) - .setIfUnmodifiedSince(unmodified); - - BlobStorageException e = assertThrows(BlobStorageException.class, () -> resource.putDirectoryBlob(bac)); - assertTrue(e.getErrorCode() == BlobErrorCode.CONDITION_NOT_MET - || e.getErrorCode() == BlobErrorCode.LEASE_ID_MISMATCH_WITH_BLOB_OPERATION); - } - - private static Stream putDirectoryBlobACFailSupplier() { - return Stream.of(Arguments.of(NEW_DATE, null, null, null), Arguments.of(null, OLD_DATE, null, null), - Arguments.of(null, null, GARBAGE_ETAG, null), Arguments.of(null, null, null, RECEIVED_ETAG)); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java deleted file mode 100644 index 49f57d643b7..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/AzureSeekableByteChannelTests.java +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.core.test.TestMode; -import com.azure.core.util.BinaryData; -import com.azure.storage.blob.BlobClient; -import com.azure.storage.blob.specialized.BlobOutputStream; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.Mockito; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.NonReadableChannelException; -import java.nio.channels.NonWritableChannelException; -import java.nio.channels.SeekableByteChannel; -import java.nio.file.ClosedFileSystemException; -import java.nio.file.Path; -import java.util.Random; -import java.util.concurrent.TimeUnit; -import java.util.stream.Stream; - -import static com.azure.core.test.utils.TestUtils.assertArraysEqual; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class AzureSeekableByteChannelTests extends BlobNioTestBase { - private int sourceFileSize; - private byte[] fileBytes; - private File sourceFile; - private BlobClient bc; - private BlobClient writeBc; - private AzureSeekableByteChannel readByteChannel; - private AzureSeekableByteChannel writeByteChannel; - private FileInputStream fileStream; - private AzureFileSystem fs; - - @Override - protected void beforeTest() { - super.beforeTest(); - sourceFileSize = 5 * 1024 * 1024; - fileBytes = getRandomByteArray(sourceFileSize); - sourceFile = getRandomFile(fileBytes); - - cc.create(); - bc = cc.getBlobClient(generateBlobName()); - writeBc = cc.getBlobClient(generateBlobName()); - bc.upload(DATA.getDefaultBinaryData()); - fs = createFS(initializeConfigMap()); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); - AzurePath writePath = ((AzurePath) fs.getPath(writeBc.getContainerName() + ":", writeBc.getBlobName())); - - readByteChannel = new AzureSeekableByteChannel(new NioBlobInputStream(bc.openInputStream(), path), path); - // For writing, we don't want a blob to exist there yet - writeByteChannel = new AzureSeekableByteChannel( - new NioBlobOutputStream(writeBc.getBlockBlobClient().getBlobOutputStream(true), writePath), writePath); - try { - fileStream = new FileInputStream(sourceFile); - } catch (FileNotFoundException e) { - throw new RuntimeException(e); - } - } - - private void resetForLargeSource() { - if (getTestMode() != TestMode.PLAYBACK) { - // Base setup only uploads a small source to reduce size of session record. - BlobClient blobClient = getNonRecordingServiceClient() - .getBlobContainerClient(bc.getContainerName()) - .getBlobClient(bc.getBlobName()); - blobClient.upload(BinaryData.fromBytes(fileBytes), true); - } - - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); - AzurePath writePath = ((AzurePath) fs.getPath(writeBc.getContainerName() + ":", writeBc.getBlobName())); - - readByteChannel = new AzureSeekableByteChannel(new NioBlobInputStream(bc.openInputStream(), path), path); - // For writing, we don't want a blob to exist there yet - writeByteChannel = new AzureSeekableByteChannel( - new NioBlobOutputStream(writeBc.getBlockBlobClient().getBlobOutputStream(true), writePath), writePath); - } - - @Test - public void read() throws IOException { - resetForLargeSource(); - ByteArrayOutputStream os = new ByteArrayOutputStream(); - int count = 0; - Random rand = new Random(); - - while (count < sourceFileSize) { - ByteBuffer buffer = ByteBuffer.allocate(rand.nextInt(1024 * 1024)); - int readAmount = readByteChannel.read(buffer); - os.write(buffer.array(), 0, readAmount); // limit the write in case we allocated more than we needed - count += readAmount; - } - - assertArrayEquals(fileBytes, os.toByteArray()); - } - - @Test - @Timeout(value = 60, unit = TimeUnit.SECONDS) // fail if test runs >= 1 minute - public void readLoopUntilEof() throws IOException { - resetForLargeSource(); - ByteArrayOutputStream os = new ByteArrayOutputStream(sourceFileSize); - Random rand = new Random(); - - while (true) { // ensures test duration is bounded - ByteBuffer buffer = ByteBuffer.allocate(rand.nextInt(1024 * 1024)); - int readAmount = readByteChannel.read(buffer); - if (readAmount == -1) { - break; // reached EOF - } - os.write(buffer.array(), 0, readAmount); // limit the write in case we allocated more than we needed - } - - assertArrayEquals(fileBytes, os.toByteArray()); - } - - @Test - public void readRespectDestBufferPos() throws IOException { - resetForLargeSource(); - Random rand = new Random(); - int initialOffset = rand.nextInt(512) + 1; // always > 0 - byte[] randArray = new byte[2 * initialOffset + sourceFileSize]; - rand.nextBytes(randArray); // fill with random bytes - - // copy same random bytes, but in this copy some will eventually be overwritten by read() - byte[] destArray = new byte[randArray.length]; - System.arraycopy(randArray, 0, destArray, 0, randArray.length); - ByteBuffer dest = ByteBuffer.wrap(destArray); - dest.position(initialOffset); // will have capacity on either side that should not be touched - - int readAmount = 0; - while (readAmount != -1) { - assert dest.position() != 0; - readAmount = readByteChannel.read(dest); // backed by an array, but position != 0 - } - - assertEquals(initialOffset + sourceFileSize, dest.position()); - // destination content should match file content at initial read position - assertArraysEqual(fileBytes, 0, destArray, initialOffset, sourceFileSize); - // destination content should be untouched prior to initial position - assertArraysEqual(randArray, 0, destArray, 0, initialOffset); - // destination content should be untouched past end of read - assertArraysEqual(randArray, initialOffset + sourceFileSize, destArray, initialOffset + sourceFileSize, - initialOffset); - } - - @Test - public void readFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> readByteChannel.read(ByteBuffer.allocate(1))); - } - - @Test - public void write() throws IOException { - resetForLargeSource(); - int count = 0; - Random rand = new Random(); - writeByteChannel.write(ByteBuffer.wrap(fileBytes)); - - while (count < sourceFileSize) { - int writeAmount = Math.min(rand.nextInt(1024 * 1024), sourceFileSize - count); - byte[] buffer = new byte[writeAmount]; - fileStream.read(buffer); - writeByteChannel.write(ByteBuffer.wrap(buffer)); - count += writeAmount; - } - - writeByteChannel.close(); - compareInputStreams(writeBc.openInputStream(), new ByteArrayInputStream(fileBytes), sourceFileSize); - } - - @Test - public void writeRespectSrcBufferPos() throws IOException { - resetForLargeSource(); - Random rand = new Random(); - int initialOffset = rand.nextInt(512) + 1; // always > 0 - byte[] srcBufferContent = new byte[2 * initialOffset + sourceFileSize]; - rand.nextBytes(srcBufferContent); // fill with random bytes - - // place expected file content into source buffer at random location, retain other random bytes - System.arraycopy(fileBytes, 0, srcBufferContent, initialOffset, sourceFileSize); - ByteBuffer srcBuffer = ByteBuffer.wrap(srcBufferContent); - srcBuffer.position(initialOffset); - srcBuffer.limit(initialOffset + sourceFileSize); - - // This test aims to observe the actual bytes written by the ByteChannel to the underlying OutputStream, - // not just the number of bytes allegedly written as reported by its position. It would prefer to examine - // the OutputStream directly, but the channel requires the specific NioBlobOutputStream implementation - // and does not accept something generic like a ByteArrayOutputStream. NioBlobOutputStream is final, so - // it cannot be subclassed or mocked and has little state of its own -- writes go to a BlobOutputStream. - // That class is abstract, but its constructor is not accessible outside its package and cannot normally - // be subclassed to provide custom behavior, but a runtime mocking framework like Mockito can. This is - // the nearest accessible observation point, so the test mocks a BlobOutputStream such that all write - // methods store data in ByteArrayOutputStream which it can later examine for its size and content. - ByteArrayOutputStream actualOutput = new ByteArrayOutputStream(sourceFileSize); - BlobOutputStream blobOutputStream = Mockito.mock( - BlobOutputStream.class, Mockito.withSettings().useConstructor(4096 /* block size */)); - Mockito.doAnswer(invocation -> { - actualOutput.write(invocation.getArgument(0)); - return null; - }).when(blobOutputStream).write(Mockito.anyInt()); - Mockito.doAnswer(invoked -> { - actualOutput.write(invoked.getArgument(0)); - return null; - }).when(blobOutputStream).write(Mockito.any(byte[].class)); - Mockito.doAnswer(invoked -> { - actualOutput.write(invoked.getArgument(0), invoked.getArgument(1), invoked.getArgument(2)); - return null; - }).when(blobOutputStream).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); - Path path = writeByteChannel.getPath(); - writeByteChannel = new AzureSeekableByteChannel(new NioBlobOutputStream(blobOutputStream, path), path); - - int written = 0; - while (written < sourceFileSize) { - written += writeByteChannel.write(srcBuffer); - } - writeByteChannel.close(); - - assertEquals(initialOffset + sourceFileSize, srcBuffer.position()); // src buffer position SHOULD be updated - assertEquals(srcBuffer.position(), srcBuffer.limit()); // limit SHOULD be unchanged (still at end of content) - // the above report back to the caller, but this verifies the correct bytes are going to the blob: - assertArraysEqual(fileBytes, 0, actualOutput.toByteArray(), 0, sourceFileSize); - } - - @Test - public void writeFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> writeByteChannel.write(ByteBuffer.allocate(1))); - } - - @Test - public void positionRead() throws IOException { - resetForLargeSource(); - int bufferSize = sourceFileSize / 10; - ByteBuffer dest = ByteBuffer.allocate(bufferSize); - - assertEquals(0, readByteChannel.position()); - - for (int i = 0; i < 10; i++) { - readByteChannel.read(dest); - assertEquals((i + 1) * bufferSize, readByteChannel.position()); - dest.flip(); - } - } - - @Test - public void positionSizeWrite() throws IOException { - resetForLargeSource(); - int bufferSize = sourceFileSize / 10; - ByteBuffer src = getRandomData(bufferSize); - - assertEquals(0, writeByteChannel.position()); - assertEquals(0, writeByteChannel.size()); - - for (int i = 0; i < 10; i++) { - writeByteChannel.write(src); - assertEquals((i + 1) * bufferSize, writeByteChannel.position()); - assertEquals(writeByteChannel.position(), writeByteChannel.size()); - src.flip(); - } - } - - @Test - public void positionFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, readByteChannel::position); - assertThrows(ClosedFileSystemException.class, writeByteChannel::position); - } - - @ParameterizedTest - @MethodSource("seekSupplier") - public void seek(int readCount0, int seekPos1, int readCount1, int seekPos2, int readCount2) throws IOException { - resetForLargeSource(); - ByteBuffer streamContent = ByteBuffer.allocate(readCount0); - readByteChannel(readByteChannel, streamContent); - compareInputStreams(fileStream, new ByteArrayInputStream(streamContent.array()), readCount0); - - readByteChannel.position(seekPos1); - assertEquals(seekPos1, readByteChannel.position()); - - fileStream = new FileInputStream(sourceFile); - fileStream.skip(seekPos1); - streamContent = ByteBuffer.allocate(readCount1); - readByteChannel(readByteChannel, streamContent); - compareInputStreams(fileStream, new ByteArrayInputStream(streamContent.array()), readCount1); - - readByteChannel.position(seekPos2); - assertEquals(seekPos2, readByteChannel.position()); - - fileStream = new FileInputStream(sourceFile); - fileStream.skip(seekPos2); - streamContent = ByteBuffer.allocate(readCount2); - readByteChannel(readByteChannel, streamContent); - compareInputStreams(fileStream, new ByteArrayInputStream(streamContent.array()), readCount2); - } - - private static Stream seekSupplier() { - return Stream.of( - Arguments.of(1024, 1024, (2 * 1024 * 1024) - 1024, 3 * 1024 * 1024, 2 * 1024 * 1024), // Only ever seek in place. Read whole blob - Arguments.of(1024, (5 * 1024 * 1024) - 1024, 1024, 2048, 1024), // Seek forward then seek backward - Arguments.of(2 * 1024 * 1024, 1024, 1024, (5 * 1024 * 1024) - 1024, 1024) // Seek backward then seek forward - ); - } - - private static void readByteChannel(SeekableByteChannel channel, ByteBuffer dst) throws IOException { - while (dst.remaining() > 0) { - if (channel.read(dst) == -1) { // Prevent infinite read - break; - } - } - } - - @Test - public void seekOutOfBounds() throws IOException { - assertThrows(IllegalArgumentException.class, () -> readByteChannel.position(-1)); - - readByteChannel.position(sourceFileSize); // position is 0-based, so seeking to size --> EOF - assertEquals(-1, readByteChannel.read(ByteBuffer.allocate(1))); // Seeking to the end and then reading should indicate EOF - } - - @Test - public void seekFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> readByteChannel.position(0)); - } - - @Test - public void sizeRead() throws IOException { - bc.upload(DATA.getDefaultBinaryData(), true); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); - readByteChannel = new AzureSeekableByteChannel(new NioBlobInputStream(bc.openInputStream(), path), path); - - assertEquals(DATA.getDefaultDataSize(), readByteChannel.size()); - } - - @Test - public void sizeFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, readByteChannel::size); - assertThrows(ClosedFileSystemException.class, writeByteChannel::size); - } - - @Test - public void close() throws IOException { - readByteChannel.close(); - writeByteChannel.close(); - - assertThrows(ClosedChannelException.class, () -> readByteChannel.read(ByteBuffer.allocate(1))); - assertThrows(ClosedChannelException.class, readByteChannel::size); - assertThrows(ClosedChannelException.class, readByteChannel::position); - assertThrows(ClosedChannelException.class, () -> writeByteChannel.write(ByteBuffer.allocate(1))); - assertThrows(ClosedChannelException.class, writeByteChannel::size); - assertThrows(ClosedChannelException.class, writeByteChannel::position); - } - - @Test - public void closeFSClose() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, readByteChannel::close); - assertThrows(ClosedFileSystemException.class, writeByteChannel::close); - } - - @Test - public void isOpen() throws IOException { - assertTrue(readByteChannel.isOpen()); - assertTrue(writeByteChannel.isOpen()); - - readByteChannel.close(); - writeByteChannel.close(); - - assertFalse(readByteChannel.isOpen()); - assertFalse(writeByteChannel.isOpen()); - } - - @Test - public void isOpenFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, readByteChannel::isOpen); - assertThrows(ClosedFileSystemException.class, writeByteChannel::isOpen); - } - - @Test - public void unsupportedOperations() { - assertThrows(NonWritableChannelException.class, () -> readByteChannel.write(ByteBuffer.allocate(1))); - assertThrows(NonReadableChannelException.class, () -> writeByteChannel.read(ByteBuffer.allocate(1))); - assertThrows(NonReadableChannelException.class, () -> writeByteChannel.position(5)); - assertThrows(UnsupportedOperationException.class, () -> readByteChannel.truncate(0)); - assertThrows(UnsupportedOperationException.class, () -> writeByteChannel.truncate(0)); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java deleted file mode 100644 index 3bc1dd68d35..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/BlobNioTestBase.java +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import static com.azure.core.test.utils.TestUtils.assertArraysEqual; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.lang.reflect.Method; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.http.HttpClient; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.attribute.FileAttribute; -import java.time.Duration; -import java.time.OffsetDateTime; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.zip.CRC32; - -import com.azure.core.client.traits.HttpTrait; -import com.azure.core.http.netty.NettyAsyncHttpClientBuilder; -import com.azure.core.http.okhttp.OkHttpAsyncHttpClientBuilder; -import com.azure.core.http.policy.HttpPipelinePolicy; -import com.azure.core.http.rest.Response; -import com.azure.core.test.TestMode; -import com.azure.core.test.TestProxyTestBase; -import com.azure.core.test.models.CustomMatcher; -import com.azure.core.test.models.TestProxySanitizer; -import com.azure.core.test.models.TestProxySanitizerType; -import com.azure.core.util.ServiceVersion; -import com.azure.storage.blob.BlobClient; -import com.azure.storage.blob.BlobContainerAsyncClient; -import com.azure.storage.blob.BlobContainerClient; -import com.azure.storage.blob.BlobServiceAsyncClient; -import com.azure.storage.blob.BlobServiceClient; -import com.azure.storage.blob.BlobServiceClientBuilder; -import com.azure.storage.blob.models.BlobContainerItem; -import com.azure.storage.blob.models.BlockBlobItem; -import com.azure.storage.blob.models.ListBlobContainersOptions; -import com.azure.storage.blob.specialized.BlobClientBase; -import com.azure.storage.blob.specialized.BlockBlobClient; -import com.azure.storage.common.StorageSharedKeyCredential; -import com.azure.storage.common.implementation.Constants; -import com.azure.storage.common.test.shared.ServiceVersionValidationPolicy; -import com.azure.storage.common.test.shared.TestAccount; -import com.azure.storage.common.test.shared.TestDataFactory; -import com.azure.storage.common.test.shared.TestEnvironment; - -import okhttp3.ConnectionPool; - -public class BlobNioTestBase extends TestProxyTestBase { - protected static final TestEnvironment ENV = TestEnvironment.getInstance(); - protected static final TestDataFactory DATA = TestDataFactory.getInstance(); - private static final HttpClient NETTY_HTTP_CLIENT = new NettyAsyncHttpClientBuilder().build(); - private static final HttpClient OK_HTTP_CLIENT = new OkHttpAsyncHttpClientBuilder() - .connectionPool(new ConnectionPool(50, 5, TimeUnit.MINUTES)) - .build(); - - // Used to generate stable container names for recording tests requiring multiple containers. - private int entityNo = 0; - - // both sync and async clients point to same container - protected BlobContainerClient cc; - protected BlobContainerAsyncClient ccAsync; - protected BlobServiceClient primaryBlobServiceClient; - protected BlobServiceAsyncClient primaryBlobServiceAsyncClient; - protected BlobServiceClient alternateBlobServiceClient; - protected String containerName; - protected String prefix; - - - // The values below are used to create data-driven tests for access conditions. - protected static final OffsetDateTime OLD_DATE = OffsetDateTime.now().minusDays(1); - protected static final OffsetDateTime NEW_DATE = OffsetDateTime.now().plusDays(1); - protected static final String GARBAGE_ETAG = "garbage"; - // Note that this value is only used to check if we depend on the received ETag. This value will not actually be - // used. - protected static final String RECEIVED_ETAG = "received"; - - @Override - protected void beforeTest() { - super.beforeTest(); - prefix = getCrc32(testContextManager.getTestPlaybackRecordingName()); - - primaryBlobServiceClient = getServiceClient(ENV.getPrimaryAccount()); - primaryBlobServiceAsyncClient = getServiceAsyncClient(ENV.getPrimaryAccount()); - alternateBlobServiceClient = getServiceClient(ENV.getPrimaryAccount()); - - containerName = generateContainerName(); - cc = primaryBlobServiceClient.getBlobContainerClient(containerName); - ccAsync = primaryBlobServiceAsyncClient.getBlobContainerAsyncClient(containerName); - - if (getTestMode() != TestMode.LIVE) { - interceptorManager.addSanitizers( - Collections.singletonList(new TestProxySanitizer("sig=(.*)", "REDACTED", TestProxySanitizerType.URL))); - // Ignore changes to the order of query parameters and wholly ignore the 'sv' (service version) query parameter - // in SAS tokens. - interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher() - .setComparingBodies(false) - .setExcludedHeaders(Arrays.asList("x-ms-copy-source", "If-Match", "x-ms-range", "If-Modified-Since", - "If-Unmodified-Since")) - .setQueryOrderingIgnored(true) - .setIgnoredQueryParameters(Arrays.asList("sv")))); - } - } - - @Override - protected void afterTest() { - super.afterTest(); - - if (getTestMode() == TestMode.PLAYBACK) { - return; - } - - BlobServiceClient cleanupClient = getNonRecordingServiceClient(); - ListBlobContainersOptions options = new ListBlobContainersOptions().setPrefix(prefix); - for (BlobContainerItem container : cleanupClient.listBlobContainers(options, Duration.ofSeconds(120))) { - BlobContainerClient containerClient = cleanupClient.getBlobContainerClient(container.getName()); - - containerClient.delete(); - } - } - - protected BlobServiceClient getNonRecordingServiceClient() { - return new BlobServiceClientBuilder() - .httpClient(getHttpClient()) - .credential(ENV.getPrimaryAccount().getCredential()) - .endpoint(ENV.getPrimaryAccount().getBlobEndpoint()) - .buildClient(); - } - - protected BlobServiceClient getServiceClient(TestAccount account) { - return getServiceClient(account.getCredential(), account.getBlobEndpoint()); - } - - protected BlobServiceClient getServiceClient(StorageSharedKeyCredential credential, String endpoint, - HttpPipelinePolicy... policies) { - return getServiceClientBuilder(credential, endpoint, policies).buildClient(); - } - - protected BlobServiceAsyncClient getServiceAsyncClient(TestAccount account) { - return getServiceClientBuilder(account.getCredential(), account.getBlobEndpoint()) - .buildAsyncClient(); - } - - protected BlobServiceClientBuilder getServiceClientBuilder(StorageSharedKeyCredential credential, String endpoint, - HttpPipelinePolicy... policies) { - BlobServiceClientBuilder builder = new BlobServiceClientBuilder() - .endpoint(endpoint); - - for (HttpPipelinePolicy policy : policies) { - builder.addPolicy(policy); - } - - instrument(builder); - - if (credential != null) { - builder.credential(credential); - } - - return builder; - } - - protected Map initializeConfigMap(HttpPipelinePolicy... policies) { - Map config = new HashMap<>(); - config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, getHttpClient()); - List policyList = new ArrayList<>(Arrays.asList(policies)); - if (getTestMode() == TestMode.RECORD) { - policyList.add(interceptorManager.getRecordPolicy()); - } - config.put(AzureFileSystem.AZURE_STORAGE_HTTP_POLICIES, policyList.toArray(new HttpPipelinePolicy[0])); - - return config; - } - - protected URI getFileSystemUri() { - try { - return new URI("azb://?endpoint=" + ENV.getPrimaryAccount().getBlobEndpoint()); - } catch (URISyntaxException ex) { - throw new RuntimeException(ex); - } - } - - protected String generateContainerName() { - return generateResourceName(entityNo++); - } - - protected String generateBlobName() { - return generateResourceName(entityNo++); - } - - private String generateResourceName(int entityNo) { - return testResourceNamer.randomName(prefix + entityNo, 63); - } - - protected AzureFileSystem createFS(Map config) { - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, generateContainerName() + "," + generateContainerName()); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, - ENV.getPrimaryAccount().getCredential()); - - try { - return new AzureFileSystem(new AzureFileSystemProvider(), ENV.getPrimaryAccount().getBlobEndpoint(), - config); - } catch (IOException ex) { - throw new UncheckedIOException(ex); - } - } - - protected byte[] getRandomByteArray(int size) { - long seed = UUID.fromString(testResourceNamer.randomUuid()).getMostSignificantBits() & Long.MAX_VALUE; - Random rand = new Random(seed); - byte[] data = new byte[size]; - rand.nextBytes(data); - return data; - } - - /* - Size must be an int because ByteBuffer sizes can only be an int. Long is not supported. - */ - protected ByteBuffer getRandomData(int size) { - return ByteBuffer.wrap(getRandomByteArray(size)); - } - - /* - We only allow int because anything larger than 2GB (which would require a long) is left to stress/perf. - */ - protected File getRandomFile(byte[] bytes) { - try { - File file = File.createTempFile(UUID.randomUUID().toString(), ".txt"); - file.deleteOnExit(); - Files.write(file.toPath(), bytes); - - return file; - } catch (IOException ex) { - throw new UncheckedIOException(ex); - } - } - - protected static void compareInputStreams(InputStream stream1, InputStream stream2, long count) { - long pos = 0L; - int defaultReadBuffer = 128 * Constants.KB; - try (InputStream s1 = stream1; InputStream s2 = stream2) { - // If the amount we are going to read is smaller than the default buffer size use that instead. - int bufferSize = (int) Math.min(defaultReadBuffer, count); - - while (pos < count) { - // Number of bytes we expect to read. - int expectedReadCount = (int) Math.min(bufferSize, count - pos); - byte[] buffer1 = new byte[expectedReadCount]; - byte[] buffer2 = new byte[expectedReadCount]; - - int readCount1 = s1.read(buffer1); - int readCount2 = s2.read(buffer2); - - // Use Arrays.equals as it is more optimized than Groovy/Spock's '==' for arrays. - assertEquals(readCount1, readCount2); - assertArraysEqual(buffer1, buffer2); - - pos += expectedReadCount; - } - - int verificationRead = s2.read(); - assertEquals(count, pos); - assertEquals(-1, verificationRead); - } catch (IOException ex) { - throw new UncheckedIOException(ex); - } - } - - protected String rootNameToContainerName(String root) { - return root.substring(0, root.length() - 1); - } - - protected BlobContainerClient rootNameToContainerClient(String root) { - return primaryBlobServiceClient.getBlobContainerClient(rootNameToContainerName(root)); - } - - protected String getNonDefaultRootDir(FileSystem fs) { - for (Path dir : fs.getRootDirectories()) { - if (!dir.equals(((AzureFileSystem) fs).getDefaultDirectory())) { - return dir.toString(); - } - } - throw new RuntimeException("File system only contains the default directory"); - } - - protected String getDefaultDir(FileSystem fs) { - return ((AzureFileSystem) fs).getDefaultDirectory().toString(); - } - - protected String getPathWithDepth(int depth) { - StringBuilder pathStr = new StringBuilder(); - for (int i = 0; i < depth; i++) { - pathStr.append(generateBlobName()).append(AzureFileSystem.PATH_SEPARATOR); - } - return pathStr.toString(); - } - - protected Response putDirectoryBlob(BlockBlobClient blobClient) { - return blobClient.commitBlockListWithResponse(Collections.emptyList(), null, - Collections.singletonMap(AzureResource.DIR_METADATA_MARKER, "true"), null, null, null, null); - } - - /** - * This will retrieve the etag to be used in testing match conditions. The result will typically be assigned to the - * ifMatch condition when testing success and the ifNoneMatch condition when testing failure. - * - * @param bc The URL to the blob to get the etag on. - * @param match The ETag value for this test. If {@code receivedEtag} is passed, that will signal that the test is - * expecting the blob's actual etag for this test, so it is retrieved. - * @return The appropriate etag value to run the current test. - */ - protected String setupBlobMatchCondition(BlobClientBase bc, String match) { - return RECEIVED_ETAG.equals(match) ? bc.getProperties().getETag() : match; - } - - protected void checkBlobIsDir(BlobClient blobClient) { - assertTrue(Boolean.parseBoolean(blobClient.getPropertiesWithResponse(null, null, null) - .getValue().getMetadata().get(AzureResource.DIR_METADATA_MARKER))); - } - - static class TestFileAttribute implements FileAttribute { - private final String name; - private final T value; - - TestFileAttribute(String name, T value) { - this.name = name; - this.value = value; - } - - @Override - public String name() { - return this.name; - } - - @Override - public T value() { - return this.value; - } - } - - private static String getCrc32(String input) { - CRC32 crc32 = new CRC32(); - crc32.update(input.getBytes(StandardCharsets.UTF_8)); - return String.format(Locale.US, "%08X", crc32.getValue()).toLowerCase(); - } - - @SuppressWarnings("unchecked") - protected , E extends Enum> T instrument(T builder) { - builder.httpClient(getHttpClient()); - if (getTestMode() == TestMode.RECORD) { - builder.addPolicy(interceptorManager.getRecordPolicy()); - } - - - if (ENV.getServiceVersion() != null) { - try { - Method serviceVersionMethod = Arrays.stream(builder.getClass().getDeclaredMethods()) - .filter(method -> "serviceVersion".equals(method.getName()) - && method.getParameterCount() == 1 - && ServiceVersion.class.isAssignableFrom(method.getParameterTypes()[0])) - .findFirst() - .orElseThrow(() -> new RuntimeException("Unable to find serviceVersion method for builder: " - + builder.getClass())); - Class serviceVersionClass = (Class) serviceVersionMethod.getParameterTypes()[0]; - ServiceVersion serviceVersion = (ServiceVersion) Enum.valueOf(serviceVersionClass, - ENV.getServiceVersion()); - serviceVersionMethod.invoke(builder, serviceVersion); - builder.addPolicy(new ServiceVersionValidationPolicy(serviceVersion.getVersion())); - } catch (ReflectiveOperationException ex) { - throw new RuntimeException(ex); - } - } - - builder.httpLogOptions(BlobServiceClientBuilder.getDefaultHttpLogOptions()); - - return builder; - } - - protected HttpClient getHttpClient() { - if (getTestMode() != TestMode.PLAYBACK) { - switch (ENV.getHttpClientType()) { - case NETTY: - return NETTY_HTTP_CLIENT; - case OK_HTTP: - return OK_HTTP_CLIENT; - default: - throw new IllegalArgumentException("Unknown http client type: " + ENV.getHttpClientType()); - } - } else { - return interceptorManager.getPlaybackClient(); - } - } - - public static boolean liveOnly() { - return ENV.getTestMode() == TestMode.LIVE; - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java deleted file mode 100644 index a750af92d55..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/CompositeTests.java +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.storage.blob.BlobClient; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.StandardCopyOption; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.Map; -import java.util.stream.Stream; - -import static com.azure.core.test.utils.TestUtils.assertArraysEqual; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class CompositeTests extends BlobNioTestBase { - private Map config; - - @Override - protected void beforeTest() { - super.beforeTest(); - config = initializeConfigMap(); - } - - @Test - public void filesCreateDirs() throws IOException { - AzureFileSystem fs = createFS(config); - Path dirs = fs.getPath("mydir1/mydir2/mydir3"); - Files.createDirectories(dirs); - - assertTrue(Files.isDirectory(fs.getPath("mydir1"))); - assertTrue(Files.isDirectory(fs.getPath("mydir1/mydir2"))); - assertTrue(Files.isDirectory(fs.getPath("mydir1/mydir2/mydir3"))); - } - - @Test - public void filesCreate() throws IOException { - AzureFileSystem fs = createFS(config); - Path path = Files.createFile(fs.getPath(generateBlobName())); - - assertDoesNotThrow(() -> fs.provider().checkAccess(path)); - } - - @Test - public void filesCopy() throws IOException { - AzureFileSystem fs = createFS(config); - Path dest = fs.getPath("dest"); - byte[] resultArr = new byte[DATA.getDefaultDataSize()]; - Files.copy(DATA.getDefaultInputStream(), dest); - fs.provider().newInputStream(dest).read(resultArr); - - assertArraysEqual(DATA.getDefaultBytes(), resultArr); - - Path dest2 = fs.getPath("dest2"); - OutputStream outStream = fs.provider().newOutputStream(dest2); - Files.copy(dest, outStream); - outStream.close(); - resultArr = new byte[DATA.getDefaultDataSize()]; - fs.provider().newInputStream(dest2).read(resultArr); - - assertArraysEqual(DATA.getDefaultBytes(), resultArr); - - Path dest3 = fs.getPath("dest3"); - Files.copy(dest, dest3, StandardCopyOption.COPY_ATTRIBUTES); - resultArr = new byte[DATA.getDefaultDataSize()]; - fs.provider().newInputStream(dest3).read(resultArr); - - assertArraysEqual(DATA.getDefaultBytes(), resultArr); - } - - // Bug: https://github.com/Azure/azure-sdk-for-java/issues/20325 - @Test - public void filesReadAllBytes() throws IOException { - AzureFileSystem fs = createFS(config); - String pathName = generateBlobName(); - Path path1 = fs.getPath("/foo/bar/" + pathName); - Path path2 = fs.getPath("/foo/bar/" + pathName + ".backup"); - Files.createFile(path1); - Files.createFile(path2); - - assertDoesNotThrow(() -> Files.readAllBytes(path1)); - } - - @Test - public void filesDeleteEmptyDirectory() throws IOException { - // Create two folders where one is a prefix of the others - AzureFileSystem fs = createFS(config); - String pathName = generateBlobName(); - String pathName2 = pathName + '2'; - Files.createDirectory(fs.getPath(pathName)); - Files.createDirectory(fs.getPath(pathName2)); - - // Delete the one that is a prefix to ensure the other one does not interfere - assertDoesNotThrow(() -> Files.delete(fs.getPath(pathName))); - } - - @ParameterizedTest - @MethodSource("filesExistsSupplier") - public void filesExists(DirectoryStatus status, boolean isVirtual) throws IOException { - AzureFileSystem fs = createFS(config); - - // Generate resource names. - AzurePath path = (AzurePath) fs.getPath(rootNameToContainerName(getNonDefaultRootDir(fs)), generateBlobName()); - - // Generate clients to resources. - BlobClient blobClient = path.toBlobClient(); - BlobClient childClient1 = ((AzurePath) path.resolve(generateBlobName())).toBlobClient(); - - // Create resources as necessary - if (status == DirectoryStatus.NOT_A_DIRECTORY) { - blobClient.upload(DATA.getDefaultBinaryData()); - } else if (status == DirectoryStatus.NOT_EMPTY) { - if (!isVirtual) { - putDirectoryBlob(blobClient.getBlockBlobClient()); - } - childClient1.upload(DATA.getDefaultBinaryData()); - } - - assertEquals(status != DirectoryStatus.DOES_NOT_EXIST, Files.exists(path)); - } - - private static Stream filesExistsSupplier() { - return Stream.of(Arguments.of(DirectoryStatus.DOES_NOT_EXIST, false), - Arguments.of(DirectoryStatus.NOT_A_DIRECTORY, false), Arguments.of(DirectoryStatus.NOT_EMPTY, true), - Arguments.of(DirectoryStatus.NOT_EMPTY, false)); - } - - @Test - public void filesWalkFileTree() throws IOException { - AzureFileSystem fs = createFS(config); - /* - file1 - cDir1 - cDir2 - |__file2 - |__cDir3 - |__vDir1 - |__file3 - vDir2 - |__file4 - |__cDir4 - |__vDir3 - |__file5 - */ - String baseDir = "a"; - - // Create files and directories - ((AzurePath) fs.getPath("a/file1")).toBlobClient().upload(DATA.getDefaultBinaryData()); - ((AzurePath) fs.getPath("a/cDir2/file2")).toBlobClient().upload(DATA.getDefaultBinaryData()); - ((AzurePath) fs.getPath("a/cDir2/vDir1/file3")).toBlobClient().upload(DATA.getDefaultBinaryData()); - ((AzurePath) fs.getPath("a/vDir2/file4")).toBlobClient().upload(DATA.getDefaultBinaryData()); - ((AzurePath) fs.getPath("a/vDir2/vDir3/file5")).toBlobClient().upload(DATA.getDefaultBinaryData()); - - putDirectoryBlob(((AzurePath) fs.getPath(baseDir)).toBlobClient().getBlockBlobClient()); - putDirectoryBlob(((AzurePath) fs.getPath("a/cDir1")).toBlobClient().getBlockBlobClient()); - putDirectoryBlob(((AzurePath) fs.getPath("a/cDir2")).toBlobClient().getBlockBlobClient()); - putDirectoryBlob(((AzurePath) fs.getPath("a/cDir2/cDir3")).toBlobClient().getBlockBlobClient()); - putDirectoryBlob(((AzurePath) fs.getPath("a/vDir2/cDir4")).toBlobClient().getBlockBlobClient()); - - TestFileVisitor visitor = new TestFileVisitor<>(); - // System.out.println(Files.readAttributes(fs.getPath(baseDir), AzureBasicFileAttributes.class).isDirectory()); - Files.walkFileTree(fs.getPath(baseDir), visitor); - - // might need to make this work on root directories as well, which would probably mean inspecting the path and - // adding an isRoot method - assertEquals(5, visitor.fileCount); - assertEquals(8, visitor.directoryCount); // includes baseDir - } - - static class TestFileVisitor extends SimpleFileVisitor { - private int fileCount = 0; - private int directoryCount = 0; - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { - fileCount++; - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult postVisitDirectory(Path dir, IOException exc) { - directoryCount++; - return FileVisitResult.CONTINUE; - } - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java deleted file mode 100644 index 2bf4026f80d..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobInputStreamTests.java +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.core.test.TestMode; -import com.azure.core.util.BinaryData; -import com.azure.storage.blob.BlobClient; -import com.azure.storage.blob.models.BlobStorageException; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.function.Executable; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.ClosedFileSystemException; -import java.nio.file.Files; -import java.util.function.Function; -import java.util.stream.Stream; - -import static com.azure.core.test.utils.TestUtils.assertArraysEqual; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class NioBlobInputStreamTests extends BlobNioTestBase { - private byte[] fileBytes; - private File sourceFile; - private BlobClient bc; - private NioBlobInputStream nioStream; - private FileInputStream fileStream; - private AzureFileSystem fs; - - @Override - protected void beforeTest() { - super.beforeTest(); - fileBytes = getRandomByteArray(5 * 1024 * 1024); - sourceFile = getRandomFile(fileBytes); - - cc.create(); - bc = cc.getBlobClient(generateBlobName()); - bc.upload(DATA.getDefaultBinaryData()); - fs = createFS(initializeConfigMap()); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); - - nioStream = new NioBlobInputStream(bc.openInputStream(), path); - try { - fileStream = new FileInputStream(sourceFile); - } catch (FileNotFoundException e) { - throw new RuntimeException(e); - } - } - - private void resetForLargeSource() { - if (getTestMode() != TestMode.PLAYBACK) { - // Base setup only uploads a small source to reduce size of session record. - BlobClient blobClient = getNonRecordingServiceClient() - .getBlobContainerClient(bc.getContainerName()) - .getBlobClient(bc.getBlobName()); - blobClient.upload(BinaryData.fromBytes(fileBytes), true); - } - - // Base setup only uploads a small source to reduce size of session record. - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); - nioStream = new NioBlobInputStream(bc.openInputStream(), path); - } - - @Override - protected void afterTest() { - super.afterTest(); - sourceFile.delete(); - } - - @Test - public void readWholeFile() throws IOException { - resetForLargeSource(); - compareInputStreams(nioStream, fileStream, Files.size(sourceFile.toPath())); - } - - @Test - public void readMin() throws IOException { - resetForLargeSource(); - for (int i = 0; i < 100; i++) { - assertEquals(fileStream.read(), nioStream.read()); - } - } - - @ParameterizedTest - @ValueSource(ints = {0, 100, 4 * 1024 * 1024}) - public void readBuff(int size) throws IOException { - resetForLargeSource(); - byte[] nioBytes = new byte[size]; - nioStream.read(nioBytes); - - assertArraysEqual(fileBytes, 0, nioBytes, 0, size); - } - - @Test - public void readBuffOffsetLen() throws IOException { - resetForLargeSource(); - byte[] nioBytes = new byte[100]; - nioStream.read(nioBytes, 5, 50); - - assertArraysEqual(fileBytes, 0, nioBytes, 5, 50); - } - - @ParameterizedTest - @CsvSource(value = {"-1,5", "3,-1", "0,11", "3,8"}) - public void readBuffOffsetLenFail(int off, int len) { - byte[] b = new byte[10]; - - assertThrows(IndexOutOfBoundsException.class, () -> nioStream.read(b, off, len)); - } - - @ParameterizedTest - @MethodSource("readFailSupplier") - public void readFail(Function methodCall) throws IOException { - resetForLargeSource(); - bc.delete(); - nioStream.read(new byte[4 * 1024 * 1024]); // Must read through the initial download to trigger failed response - - IOException e = assertThrows(IOException.class, methodCall.apply(nioStream)); - assertInstanceOf(BlobStorageException.class, e.getCause()); - } - - private static Stream> readFailSupplier() { - return Stream.of(nioStream -> nioStream::read, nioStream -> () -> nioStream.read(new byte[5]), - nioStream -> () -> nioStream.read(new byte[5], 0, 4)); - } - - @Test - public void readFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, nioStream::read); - assertThrows(ClosedFileSystemException.class, () -> nioStream.read(new byte[1])); - assertThrows(ClosedFileSystemException.class, () -> nioStream.read(new byte[10], 2, 5)); - } - - - @ParameterizedTest - @CsvSource(value = {"0,0", "0,50", "50,0", "50,50", "50,5242780", "5242780,50"}) - public void markAndReset(int markAfter, int resetAfter) throws IOException { - resetForLargeSource(); - byte[] b = new byte[markAfter]; - nioStream.read(b); - fileStream.skip(markAfter); // Position the file stream where we expect to be after resetting. - - // Read some bytes past the mark - nioStream.mark(Integer.MAX_VALUE); - - nioStream.read(new byte[resetAfter]); - - // Reset to the mark - nioStream.reset(); - - compareInputStreams(nioStream, fileStream, sourceFile.length() - markAfter); - } - - @Test - public void markReadLimit() throws IOException { - nioStream.mark(5); - nioStream.read(new byte[6]); - - assertThrows(IOException.class, nioStream::reset); - } - - @Test - public void resetFail() throws IOException { - // Mark never set - nioStream.read(); - - assertThrows(IOException.class, nioStream::reset); - } - - @Test - public void resetFSClosed() throws IOException { - nioStream.mark(5); - fs.close(); - - assertThrows(ClosedFileSystemException.class, nioStream::reset); - } - - @Test - public void markSupported() { - assertTrue(nioStream.markSupported()); - } - - @ParameterizedTest - @ValueSource(ints = {0, 10, 4 * 1024 * 1024, (5 * 1024 * 1024) - 1}) - public void skip(int skip) throws IOException { - resetForLargeSource(); - nioStream.skip(skip); - fileStream.skip(skip); - - compareInputStreams(nioStream, fileStream, Files.size(sourceFile.toPath()) - skip); - } - - @Test - public void skipFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> nioStream.skip(5)); - } - - @Test - public void close() throws IOException { - nioStream.close(); - - assertThrows(IOException.class, nioStream::read); - assertThrows(IOException.class, () -> nioStream.read(new byte[5])); - assertThrows(IOException.class, () -> nioStream.read(new byte[5], 0, 4)); - } - - @Test - public void closeFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, nioStream::close); - } - - @ParameterizedTest - @CsvSource(value = {"0,4194304", "5,4194299", "5242880,0"}) - public void available(int readAmount, int available) throws IOException { - resetForLargeSource(); - nioStream.read(new byte[readAmount]); - - assertEquals(available, nioStream.available()); - } - - @Test - public void availableFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, nioStream::available); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java deleted file mode 100644 index 753b6674733..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioBlobOutputStreamTests.java +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.storage.blob.models.BlobErrorCode; -import com.azure.storage.blob.models.BlobStorageException; -import com.azure.storage.blob.models.BlockListType; -import com.azure.storage.blob.models.ParallelTransferOptions; -import com.azure.storage.blob.options.BlockBlobOutputStreamOptions; -import com.azure.storage.blob.specialized.BlockBlobClient; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.EnabledIf; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.file.ClosedFileSystemException; - -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - -public class NioBlobOutputStreamTests extends BlobNioTestBase { - private static final int BLOCK_SIZE = 50; - private static final int MAX_SINGLE_UPLOAD_SIZE = 200; - - private BlockBlobClient bc; - private NioBlobOutputStream nioStream; - private AzureFileSystem fs; - - @Override - protected void beforeTest() { - super.beforeTest(); - cc.create(); - bc = cc.getBlobClient(generateBlobName()).getBlockBlobClient(); - - fs = createFS(initializeConfigMap()); - AzurePath path = ((AzurePath) fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); - - nioStream = new NioBlobOutputStream(bc.getBlobOutputStream(new ParallelTransferOptions(BLOCK_SIZE, null, null, - MAX_SINGLE_UPLOAD_SIZE), null, null, null, null), path); - } - - @Test - public void writeMin() throws IOException { - nioStream.write(1); - nioStream.close(); - - assertEquals(1, bc.getProperties().getBlobSize()); - - InputStream inputStream = bc.openInputStream(); - - assertEquals(1, inputStream.read()); - assertEquals(-1, inputStream.read()); - } - - @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks - @Disabled("failing in ci") - public void writeMinError() throws IOException { - // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via - // writing that there was an error - cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); - - // Write enough data to force making network requests. - nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); - // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough - // time to round trip. - bc.getProperties(); - - assertThrows(IOException.class, () -> nioStream.write(1)); - } - - @Test - public void writeArray() throws IOException { - int dataSize = 100; - byte[] data = getRandomByteArray(dataSize); - nioStream.write(data); - nioStream.close(); - - assertEquals(dataSize, bc.getProperties().getBlobSize()); - compareInputStreams(bc.openInputStream(), new ByteArrayInputStream(data), dataSize); - } - - @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks - @Disabled("failing in ci") - public void writeArrayError() throws IOException { - // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via - // writing that there was an error - cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); - - /* - Write enough data to force making network requests. The error will not be thrown until the next time a method - on the stream is called. - */ - nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); - // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough - // time to round trip. - bc.getProperties(); - - assertThrows(IOException.class, () -> nioStream.write(new byte[1])); - } - - @ParameterizedTest - @CsvSource(value = {"0,100", "20,80", "20,40"}) - public void writeOffsetLen(int offset, int len) throws IOException { - int dataSize = 100; - byte[] data = getRandomByteArray(dataSize); - - nioStream.write(data, offset, len); - nioStream.close(); - - assertEquals(len, bc.getProperties().getBlobSize()); - compareInputStreams(bc.openInputStream(), new ByteArrayInputStream(data, offset, len), dataSize); - } - - // To ensure the error isn't being wrapped unnecessarily - @Test - public void writeOffsetLenIOB() { - assertThrows(IndexOutOfBoundsException.class, () -> nioStream.write(new byte[5], -1, 6)); - } - - @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks - @Disabled("failing in ci") - public void writeOffsetLenNetworkError() throws IOException { - // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via - // writing that there was an error - cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); - - // Write enough data to force making network requests. - nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); - // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough - // time to round trip. - bc.getProperties(); - - assertThrows(IOException.class, () -> nioStream.write(new byte[1], 0, 1)); - } - - @Test - public void writeFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, () -> nioStream.write(5)); - assertThrows(ClosedFileSystemException.class, () -> nioStream.write(new byte[5])); - assertThrows(ClosedFileSystemException.class, () -> nioStream.write(new byte[5], 2, 1)); - } - - // Flush does not actually flush data right now - @Test - public void flush() throws IOException { - nioStream.write(1); - nioStream.flush(); - - BlobStorageException e = assertThrows(BlobStorageException.class, () -> bc.listBlocks(BlockListType.ALL)); - assertEquals(BlobErrorCode.BLOB_NOT_FOUND, e.getErrorCode()); - } - - // Flush should at least check the stream state - @EnabledIf("com.azure.storage.blob.nio.BlobNioTestBase#liveOnly") // Because we upload in blocks - @Disabled("failing in ci") - public void flushError() throws IOException { - // Create an append blob at the destination to ensure writes fail. Customers should eventually be notified via - // writing that there was an error - cc.getBlobClient(bc.getBlobName()).getAppendBlobClient().create(); - - // Write enough data to force making network requests. - nioStream.write(getRandomByteArray(MAX_SINGLE_UPLOAD_SIZE + 1)); - // Issue a spurious request: A more reliable way than sleeping to ensure the previous stage block has enough - // time to round trip. - bc.getProperties(); - - assertThrows(IOException.class, nioStream::flush); - } - - @Test - public void flushClosedFS() throws IOException { - nioStream.write(1); - fs.close(); - - assertThrows(ClosedFileSystemException.class, nioStream::flush); - } - - @Test - public void close() throws IOException { - nioStream.close(); - - assertThrows(IOException.class, () -> nioStream.write(1)); - } - - @Test - public void closeError() throws IOException { - // now calling close multiple times does not cause any error - nioStream.close(); - assertDoesNotThrow(nioStream::close); - } - - @Test - public void closeDoesNotThrowError() throws IOException { - bc = cc.getBlobClient(generateBlobName()).getBlockBlobClient(); - OutputStream nioStream = new NioBlobOutputStream(bc.getBlobOutputStream(new BlockBlobOutputStreamOptions()), - fs.getPath(getNonDefaultRootDir(fs), bc.getBlobName())); - - nioStream.write(1); - nioStream.close(); - // assert no error is thrown since close handles multiple close requests now - assertDoesNotThrow(nioStream::close); - } - - @Test - public void closeFSClosed() throws IOException { - fs.close(); - - assertThrows(ClosedFileSystemException.class, nioStream::close); - } -} diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java b/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java deleted file mode 100644 index 413a8062352..00000000000 --- a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/test/java/com/azure/storage/blob/nio/NioClientBuilderTests.java +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.storage.blob.nio; - -import com.azure.core.http.HttpClient; -import com.azure.core.http.HttpHeaderName; -import com.azure.core.http.HttpMethod; -import com.azure.core.http.HttpPipeline; -import com.azure.core.http.HttpRequest; -import com.azure.core.http.HttpResponse; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.core.util.CoreUtils; -import com.azure.storage.blob.implementation.util.BlobUserAgentModificationPolicy; -import com.azure.storage.common.StorageSharedKeyCredential; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; -import reactor.test.StepVerifier; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class NioClientBuilderTests { - private static final Map PROPERTIES = - CoreUtils.getProperties("azure-storage-blob-nio.properties"); - private static final String CLIENT_NAME = PROPERTIES.getOrDefault("name", "UnknownName"); - private static final String CLIENT_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); - - static HttpRequest request(String url) { - return new HttpRequest(HttpMethod.HEAD, url); - } - - @Test - public void azureFileSystemServiceClient() throws IOException { - Map config = new HashMap<>(); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "containerName"); - config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, new StorageSharedKeyCredential("accountName", "accountKey")); - - AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), "https://accountName.blob.core.windows.net", config); - HttpPipeline pipeline = fileSystem.getBlobServiceClient().getHttpPipeline(); - - verifyPipelineAndResponse(pipeline, fileSystem.getBlobServiceClient().getAccountUrl()); - } - - @Test - public void azureFileStoreContainerClient() throws IOException { - Map config = new HashMap<>(); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "containerName"); - config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, new StorageSharedKeyCredential("accountName", "accountKey")); - AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), "https://accountName.blob.core.windows.net", config); - AzureFileStore fileStore = (AzureFileStore) fileSystem.getFileStore("containerName"); - HttpPipeline pipeline = fileStore.getContainerClient().getHttpPipeline(); - - verifyPipelineAndResponse(pipeline, fileStore.getContainerClient().getBlobContainerUrl()); - } - - @Test - public void azResourceBlobClient() throws IOException { - Map config = new HashMap<>(); - config.put(AzureFileSystem.AZURE_STORAGE_FILE_STORES, "containerName"); - config.put(AzureFileSystem.AZURE_STORAGE_HTTP_CLIENT, new UAStringTestClient("azsdk-java-azure-storage-blob/\\d+\\.\\d+\\.\\d+[-beta\\.\\d+]* azsdk-java-" + CLIENT_NAME + "/" + CLIENT_VERSION + " " + "(.)*")); - config.put(AzureFileSystem.AZURE_STORAGE_SHARED_KEY_CREDENTIAL, new StorageSharedKeyCredential("accountName", "accountKey")); - AzureFileSystem fileSystem = new AzureFileSystem(new AzureFileSystemProvider(), "https://accountName.blob.core.windows.net", config); - AzurePath path = (AzurePath) fileSystem.getPath("blobName"); - AzureResource resource = new AzureResource(path); - HttpPipeline pipeline = resource.getBlobClient().getHttpPipeline(); - - verifyPipelineAndResponse(pipeline, resource.getBlobClient().getBlobUrl()); - } - - private static void verifyPipelineAndResponse(HttpPipeline pipeline, String url) { - boolean foundPolicy = false; - for (int i = 0; i < pipeline.getPolicyCount(); i++) { - foundPolicy |= (pipeline.getPolicy(i) instanceof BlobUserAgentModificationPolicy); - } - - assertTrue(foundPolicy); - StepVerifier.create(pipeline.send(request(url))) - .assertNext(response -> assertEquals(200, response.getStatusCode())) - .verifyComplete(); - } - - private static final class UAStringTestClient implements HttpClient { - private final Pattern pattern; - - UAStringTestClient(String regex) { - this.pattern = Pattern.compile(regex); - } - - @Override - public Mono send(HttpRequest request) { - if (CoreUtils.isNullOrEmpty(request.getHeaders().getValue(HttpHeaderName.USER_AGENT))) { - throw new RuntimeException("Failed to set 'User-Agent' header."); - } - Matcher matcher = pattern.matcher(request.getHeaders().getValue(HttpHeaderName.USER_AGENT)); - assertTrue(matcher.matches()); - return Mono.just(new MockHttpResponse(request, 200)); - } - } -} From f9511a17099ff364f22e5575f144927c9094f629 Mon Sep 17 00:00:00 2001 From: Christian Freitas Date: Thu, 29 Jun 2023 15:18:12 -0400 Subject: [PATCH 3/7] Remove slf4j dependency from nio --- project/Dependencies.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index c941a157df8..8a5208a579c 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -219,7 +219,6 @@ object Dependencies { "org.junit.jupiter" % "junit-jupiter-api" % "5.9.3", "io.projectreactor" % "reactor-test" % "3.4.29", "cglib" % "cglib-nodep" % "3.2.7", - "org.slf4j" % "slf4j-simple" % slf4jV, "com.azure" % "azure-core-http-okhttp" % "1.11.10", "org.mockito" % "mockito-core" % "4.11.0", "com.github.sbt" % "junit-interface" % "0.13.2" % Test // For running junit tests associated with this library From 8f905f6a4fe53e44cb1f2e4540dc953206ebff83 Mon Sep 17 00:00:00 2001 From: Christian Freitas Date: Tue, 11 Jul 2023 16:28:31 -0400 Subject: [PATCH 4/7] Move nio library to top level project for compilation --- .../storage/blob/nio => azure-blob-nio}/CHANGELOG.md | 0 .../storage/blob/nio => azure-blob-nio}/DesignDoc.md | 0 .../storage/blob/nio => azure-blob-nio}/README.md | 0 .../storage/blob/nio => azure-blob-nio}/assets.json | 0 .../blob/nio/AzureBasicFileAttributeView.java | 0 .../storage/blob/nio/AzureBasicFileAttributes.java | 0 .../storage/blob/nio/AzureBlobFileAttributeView.java | 0 .../storage/blob/nio/AzureBlobFileAttributes.java | 0 .../azure/storage/blob/nio/AzureDirectoryStream.java | 0 .../com/azure/storage/blob/nio/AzureFileStore.java | 0 .../com/azure/storage/blob/nio/AzureFileSystem.java | 0 .../storage/blob/nio/AzureFileSystemProvider.java | 0 .../java/com/azure/storage/blob/nio/AzurePath.java | 0 .../com/azure/storage/blob/nio/AzureResource.java | 0 .../storage/blob/nio/AzureSeekableByteChannel.java | 0 .../com/azure/storage/blob/nio/DirectoryStatus.java | 0 .../com/azure/storage/blob/nio/LoggingUtility.java | 0 .../azure/storage/blob/nio/NioBlobInputStream.java | 0 .../azure/storage/blob/nio/NioBlobOutputStream.java | 0 .../com/azure/storage/blob/nio/package-info.java | 0 .../services/java.nio.file.spi.FileSystemProvider | 0 .../main/resources/azure-storage-blob-nio.properties | 0 .../com/azure/storage/blob/nio/ReadmeSamples.java | 0 build.sbt | 5 +++++ .../cromwell/cloudsupport/azure/AzureUtils.scala | 2 +- .../filesystems/blob/BlobPathBuilderSpec.scala | 12 ++++++------ 26 files changed, 12 insertions(+), 7 deletions(-) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/CHANGELOG.md (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/DesignDoc.md (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/README.md (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/assets.json (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzurePath.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureResource.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/java/com/azure/storage/blob/nio/package-info.java (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/main/resources/azure-storage-blob-nio.properties (100%) rename {filesystems/blob/src/main/java/com/azure/storage/blob/nio => azure-blob-nio}/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java (100%) diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/CHANGELOG.md b/azure-blob-nio/CHANGELOG.md similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/CHANGELOG.md rename to azure-blob-nio/CHANGELOG.md diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/DesignDoc.md b/azure-blob-nio/DesignDoc.md similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/DesignDoc.md rename to azure-blob-nio/DesignDoc.md diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/README.md b/azure-blob-nio/README.md similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/README.md rename to azure-blob-nio/README.md diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/assets.json b/azure-blob-nio/assets.json similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/assets.json rename to azure-blob-nio/assets.json diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributeView.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBasicFileAttributes.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributeView.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureBlobFileAttributes.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureDirectoryStream.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileStore.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystem.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureFileSystemProvider.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzurePath.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureResource.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/AzureSeekableByteChannel.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/DirectoryStatus.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/LoggingUtility.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/NioBlobInputStream.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/NioBlobOutputStream.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/package-info.java b/azure-blob-nio/src/main/java/com/azure/storage/blob/nio/package-info.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/java/com/azure/storage/blob/nio/package-info.java rename to azure-blob-nio/src/main/java/com/azure/storage/blob/nio/package-info.java diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider b/azure-blob-nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider rename to azure-blob-nio/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/azure-storage-blob-nio.properties b/azure-blob-nio/src/main/resources/azure-storage-blob-nio.properties similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/main/resources/azure-storage-blob-nio.properties rename to azure-blob-nio/src/main/resources/azure-storage-blob-nio.properties diff --git a/filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java b/azure-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java similarity index 100% rename from filesystems/blob/src/main/java/com/azure/storage/blob/nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java rename to azure-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java diff --git a/build.sbt b/build.sbt index 48f986badd7..6b2641e024f 100644 --- a/build.sbt +++ b/build.sbt @@ -97,12 +97,16 @@ lazy val cloudSupport = project .dependsOn(common) .dependsOn(common % "test->test") +lazy val azureBlobNio = (project in file("azure-blob-nio")) + .withLibrarySettings("cromwell-azure-blobNio", azureBlobNioDependencies) + lazy val azureBlobFileSystem = (project in file("filesystems/blob")) .withLibrarySettings("cromwell-azure-blobFileSystem", blobFileSystemDependencies) .dependsOn(core) .dependsOn(core % "test->test") .dependsOn(common % "test->test") .dependsOn(cloudSupport) + .dependsOn(azureBlobNio) lazy val awsS3FileSystem = (project in file("filesystems/s3")) .withLibrarySettings("cromwell-aws-s3filesystem", s3FileSystemDependencies) @@ -408,6 +412,7 @@ lazy val root = (project in file(".")) .aggregate(`cromwell-drs-localizer`) .aggregate(awsBackend) .aggregate(awsS3FileSystem) + .aggregate(azureBlobNio) .aggregate(azureBlobFileSystem) .aggregate(backend) .aggregate(centaur) diff --git a/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala b/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala index 09cf5f3869d..ed2c8593990 100644 --- a/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala +++ b/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala @@ -32,7 +32,7 @@ object AzureUtils { .authorityHost(azureProfile.getEnvironment.getActiveDirectoryEndpoint) .build - def authenticateWithSubscription(sub: String) = AzureResourceManager.authenticate(azureCredentialBuilder, azureProfile).withSubscription(sub) + def authenticateWithSubscription(sub: String) = AzureResourceManager.authenticate(azureCredentialBuilder, azureProfile).withTenantId("/tenants/fad90753-2022-4456-9b0a-c7e5b934e408").withSubscription(sub) def authenticateWithDefaultSubscription = AzureResourceManager.authenticate(azureCredentialBuilder, azureProfile).withDefaultSubscription() diff --git a/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala b/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala index 4012e241eb3..2e511ba31e6 100644 --- a/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala +++ b/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala @@ -100,7 +100,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { new BlobPathBuilder(store, endpoint)(fsm) } - ignore should "resolve an absolute path string correctly to a path" in { + it should "resolve an absolute path string correctly to a path" in { val builder = makeBlobPathBuilder(endpoint, store) val rootString = s"${endpoint.value}/${store.value}/cromwell-execution" val blobRoot: BlobPath = builder build rootString getOrElse fail() @@ -109,7 +109,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { otherFile.toAbsolutePath.pathAsString should equal ("https://coaexternalstorage.blob.core.windows.net/inputs/cromwell-execution/test/inputFile.txt") } - ignore should "build a blob path from a test string and read a file" in { + it should "build a blob path from a test string and read a file" in { val builder = makeBlobPathBuilder(endpoint, store) val endpointHost = BlobPathBuilder.parseURI(endpoint.value).map(_.getHost).getOrElse(fail("Could not parse URI")) val evalPath = "/test/inputFile.txt" @@ -125,7 +125,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { fileText should include ("This is my test file!!!! Did it work?") } - ignore should "build duplicate blob paths in the same filesystem" in { + it should "build duplicate blob paths in the same filesystem" in { val builder = makeBlobPathBuilder(endpoint, store) val evalPath = "/test/inputFile.txt" val testString = endpoint.value + "/" + store + evalPath @@ -138,7 +138,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { fileText should include ("This is my test file!!!! Did it work?") } - ignore should "resolve a path without duplicating container name" in { + it should "resolve a path without duplicating container name" in { val builder = makeBlobPathBuilder(endpoint, store) val rootString = s"${endpoint.value}/${store.value}/cromwell-execution" val blobRoot: BlobPath = builder build rootString getOrElse fail() @@ -147,7 +147,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { otherFile.toAbsolutePath.pathAsString should equal ("https://coaexternalstorage.blob.core.windows.net/inputs/cromwell-execution/test/inputFile.txt") } - ignore should "correctly remove a prefix from the blob path" in { + it should "correctly remove a prefix from the blob path" in { val builder = makeBlobPathBuilder(endpoint, store) val rootString = s"${endpoint.value}/${store.value}/cromwell-execution/" val execDirString = s"${endpoint.value}/${store.value}/cromwell-execution/abc123/myworkflow/task1/def4356/execution/" @@ -160,7 +160,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { blobFile.pathStringWithoutPrefix(blobFile) should equal ("") } - ignore should "not change a path if it doesn't start with a prefix" in { + it should "not change a path if it doesn't start with a prefix" in { val builder = makeBlobPathBuilder(endpoint, store) val otherRootString = s"${endpoint.value}/${store.value}/foobar/" val fileString = s"${endpoint.value}/${store.value}/cromwell-execution/abc123/myworkflow/task1/def4356/execution/stdout" From 052859a52604bca7e20cad458e16529189e6ca06 Mon Sep 17 00:00:00 2001 From: Christian Freitas Date: Tue, 11 Jul 2023 20:24:18 -0400 Subject: [PATCH 5/7] Clean up --- .../cromwell/cloudsupport/azure/AzureUtils.scala | 2 +- .../filesystems/blob/BlobPathBuilderSpec.scala | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala b/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala index ed2c8593990..09cf5f3869d 100644 --- a/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala +++ b/cloudSupport/src/main/scala/cromwell/cloudsupport/azure/AzureUtils.scala @@ -32,7 +32,7 @@ object AzureUtils { .authorityHost(azureProfile.getEnvironment.getActiveDirectoryEndpoint) .build - def authenticateWithSubscription(sub: String) = AzureResourceManager.authenticate(azureCredentialBuilder, azureProfile).withTenantId("/tenants/fad90753-2022-4456-9b0a-c7e5b934e408").withSubscription(sub) + def authenticateWithSubscription(sub: String) = AzureResourceManager.authenticate(azureCredentialBuilder, azureProfile).withSubscription(sub) def authenticateWithDefaultSubscription = AzureResourceManager.authenticate(azureCredentialBuilder, azureProfile).withDefaultSubscription() diff --git a/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala b/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala index 2e511ba31e6..4012e241eb3 100644 --- a/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala +++ b/filesystems/blob/src/test/scala/cromwell/filesystems/blob/BlobPathBuilderSpec.scala @@ -100,7 +100,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { new BlobPathBuilder(store, endpoint)(fsm) } - it should "resolve an absolute path string correctly to a path" in { + ignore should "resolve an absolute path string correctly to a path" in { val builder = makeBlobPathBuilder(endpoint, store) val rootString = s"${endpoint.value}/${store.value}/cromwell-execution" val blobRoot: BlobPath = builder build rootString getOrElse fail() @@ -109,7 +109,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { otherFile.toAbsolutePath.pathAsString should equal ("https://coaexternalstorage.blob.core.windows.net/inputs/cromwell-execution/test/inputFile.txt") } - it should "build a blob path from a test string and read a file" in { + ignore should "build a blob path from a test string and read a file" in { val builder = makeBlobPathBuilder(endpoint, store) val endpointHost = BlobPathBuilder.parseURI(endpoint.value).map(_.getHost).getOrElse(fail("Could not parse URI")) val evalPath = "/test/inputFile.txt" @@ -125,7 +125,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { fileText should include ("This is my test file!!!! Did it work?") } - it should "build duplicate blob paths in the same filesystem" in { + ignore should "build duplicate blob paths in the same filesystem" in { val builder = makeBlobPathBuilder(endpoint, store) val evalPath = "/test/inputFile.txt" val testString = endpoint.value + "/" + store + evalPath @@ -138,7 +138,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { fileText should include ("This is my test file!!!! Did it work?") } - it should "resolve a path without duplicating container name" in { + ignore should "resolve a path without duplicating container name" in { val builder = makeBlobPathBuilder(endpoint, store) val rootString = s"${endpoint.value}/${store.value}/cromwell-execution" val blobRoot: BlobPath = builder build rootString getOrElse fail() @@ -147,7 +147,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { otherFile.toAbsolutePath.pathAsString should equal ("https://coaexternalstorage.blob.core.windows.net/inputs/cromwell-execution/test/inputFile.txt") } - it should "correctly remove a prefix from the blob path" in { + ignore should "correctly remove a prefix from the blob path" in { val builder = makeBlobPathBuilder(endpoint, store) val rootString = s"${endpoint.value}/${store.value}/cromwell-execution/" val execDirString = s"${endpoint.value}/${store.value}/cromwell-execution/abc123/myworkflow/task1/def4356/execution/" @@ -160,7 +160,7 @@ class BlobPathBuilderSpec extends AnyFlatSpec with Matchers with MockSugar { blobFile.pathStringWithoutPrefix(blobFile) should equal ("") } - it should "not change a path if it doesn't start with a prefix" in { + ignore should "not change a path if it doesn't start with a prefix" in { val builder = makeBlobPathBuilder(endpoint, store) val otherRootString = s"${endpoint.value}/${store.value}/foobar/" val fileString = s"${endpoint.value}/${store.value}/cromwell-execution/abc123/myworkflow/task1/def4356/execution/stdout" From f2fc27f1c2180ca276e1bf6d4bca5c4f87726231 Mon Sep 17 00:00:00 2001 From: Christian Freitas Date: Wed, 12 Jul 2023 10:04:28 -0400 Subject: [PATCH 6/7] Fix kotlin module merge strategy --- project/Dependencies.scala | 2 +- project/Merging.scala | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 75b93ea1435..1a7681da1e3 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -223,7 +223,7 @@ object Dependencies { "cglib" % "cglib-nodep" % "3.2.7", "com.azure" % "azure-core-http-okhttp" % "1.11.10", "org.mockito" % "mockito-core" % "4.11.0", - "com.github.sbt" % "junit-interface" % "0.13.2" % Test // For running junit tests associated with this library + "com.github.sbt" % "junit-interface" % "0.13.2" ) val azureDependencies: List[ModuleID] = List( diff --git a/project/Merging.scala b/project/Merging.scala index 5ecd951014b..91b135e59eb 100644 --- a/project/Merging.scala +++ b/project/Merging.scala @@ -12,6 +12,11 @@ object Merging { // Merge mozilla/public-suffix-list.txt if duplicated case PathList(ps@_*) if ps.last == "public-suffix-list.txt" => MergeStrategy.last + // Merge kotlin modules if duplicated + case PathList(ps@_*) if ps.last == "kotlin-stdlib-common.kotlin_module" => + MergeStrategy.last + case PathList(ps@_*) if ps.last == "kotlin-stdlib.kotlin_module" => + MergeStrategy.last // AWS SDK v2 configuration files - can be discarded case PathList(ps@_*) if Set("codegen.config" , "service-2.json" , "waiters-2.json" , "customization.config" , "examples-1.json" , "paginators-1.json").contains(ps.last) => MergeStrategy.discard From 6fecc1253d40b47398d664705a8f28c56fe1a728 Mon Sep 17 00:00:00 2001 From: Christian Freitas Date: Wed, 12 Jul 2023 16:06:04 -0400 Subject: [PATCH 7/7] Add README and clean up sdk documentation --- azure-blob-nio/CHANGELOG.md | 134 --------------- azure-blob-nio/DesignDoc.md | 235 ------------------------- azure-blob-nio/README.md | 332 +----------------------------------- 3 files changed, 2 insertions(+), 699 deletions(-) delete mode 100644 azure-blob-nio/CHANGELOG.md delete mode 100644 azure-blob-nio/DesignDoc.md diff --git a/azure-blob-nio/CHANGELOG.md b/azure-blob-nio/CHANGELOG.md deleted file mode 100644 index af31b4e0c74..00000000000 --- a/azure-blob-nio/CHANGELOG.md +++ /dev/null @@ -1,134 +0,0 @@ -# Release History - -## 12.0.0-beta.20 (Unreleased) - -### Features Added -- Added support for 2021-12-02 service version. - -### Breaking Changes - -### Bugs Fixed - -### Other Changes -- Migrate test recordings to assets repo. - -## 12.0.0-beta.19 (2022-05-06) - -### Features Added -- Added support for 2021-06-08 service version. - -## 12.0.0-beta.18 (2022-04-07) - -### Other Changes -#### Dependency Updates -- Updated blob dependency to 12.16.0 - -## 12.0.0-beta.17 (2022-03-09) - -### Features Added -- Enabled support for Files.exists() -- Enabled support for Files.walkFileTree() - -### Breaking Changes -- `AzureFileSystemProvider.readAttributes()` no longer throws an IOException for virtual directories and instead returns a set of attributes that are all empty except for an `isVirtual` property set to true. - -### Other Changes -- Enabling support for Files.exists() to support virtual directories required supporting virtual directories in reading file attributes. This required introducing a perf hit in the way of an extra getProps request - -#### Dependency Updates - -- Updated blob dependency to 12.15.0 - -## 12.0.0-beta.16 (2022-02-11) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-storage-blob` from `12.15.0-beta.3` to version `12.14.4`. - -## 12.0.0-beta.15 (2022-02-09) - -### Features Added -- Added support for 2021-04-10 service version. -- Added `AzurePath.fromBlobUrl` to help convert from a blob url to an AzurePath -- Added a configuration option `AZURE_STORAGE_SKIP_INITIAL_CONTAINER_CHECK` to skip the initial container check in cases where the authentication method used will not have necessary permissions. - -### Bugs Fixed -- Fixed a bug that would prevent deleting an empty directory in the case where one directory name was a prefix of the other. - - -## 12.0.0-beta.14 (2022-01-14) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.23.0` to version `1.24.1`. -- Upgraded `azure-core-http-netty` from `1.11.3` to version `1.11.6`. -- Upgraded `azure-storage-blob` from `12.15.0-beta.2` to version `12.14.3`. - -## 12.0.0-beta.13 (2021-12-07) - -### Features Added -- Added support for 2021-02-12 service version. - -## 12.0.0-beta.12 (2021-11-10) - -### Other Changes - -#### Dependency Updates - -- Upgraded `azure-core` from `1.21.0` to version `1.22.0`. -- Upgraded `azure-core-http-netty` from `1.11.1` to version `1.11.2`. -- Upgraded `azure-storage-blob` from `12.15.0-beta.1` to version `12.14.2. - -## 12.0.0-beta.11 (2021-11-05) - -### Features Added -- Added support for the 2020-12-06 service version. - -### Bugs Fixed -- Fixes an off-by-one error in read() returns 0 bytes read instead of -1 (EOF) when reading at channel position == size. -- Fixes a bug where read() (and write()) do not respect initial position (and limit) of provided ByteBuffer when backed by an array - -## 12.0.0-beta.10 (2021-10-12) - -### Other Changes -#### Dependency Updates -- Updated `azure-storage-blob` to version `12.14.1` - -## 12.0.0-beta.9 (2021-09-15) -### Other changes -- Updated `azure-storage-blob` to version `12.14.0` - -## 12.0.0-beta.8 (2021-07-28) -- Added support for the 2020-10-02 service version. - -## 12.0.0-beta.7 (2021-06-09) -### Dependency Updates -- Updated `azure-storage-blob` to version `12.12.0` - -## 12.0.0-beta.6 (2021-04-29) -- Update `azure-storage-blob` to version `12.11.0` - -## 12.0.0-beta.5 (2021-04-16) -- Fixed a bug where a file would be determined to be a directory if another file with the same prefix exists - -## 12.0.0-beta.4 (2021-03-29) -- Made AzurePath.toBlobClient public -- Added support for Azurite -- Change FileSystem configuration to accept an endpoint and credential types instead of a string for the account name, key, and token - -## 12.0.0-beta.3 (2021-02-10) -- Added support for FileSystemProvider.checkAccess method -- Added support for file key on AzureBasicFileAttributes and AzureBlobFileAttributes -- Added support for SeekableByteChannel -- When an operation is performed on a closed FileSystem, a ClosedFileSystemException is thrown instead of an IOException -- Adjusted the required flags for opening an outputstream - -## 12.0.0-beta.2 (2020-08-13) -- Added checks to ensure file system has not been closed before operating on data - -## 12.0.0-beta.1 (2020-07-17) -- Initial Release. Please see the README for more information. diff --git a/azure-blob-nio/DesignDoc.md b/azure-blob-nio/DesignDoc.md deleted file mode 100644 index 583d89c1280..00000000000 --- a/azure-blob-nio/DesignDoc.md +++ /dev/null @@ -1,235 +0,0 @@ -# Azure Storage NIO Design Doc - -# Background - -Please refer to the [Project Overview](https://microsoft-my.sharepoint.com/:w:/p/frley/EQfMXjgWA4NPrAE9IIt7PUsBC-sahzFdMkc6im0Y4R4cww) for highlevel background on this project. - -## NIO - -The [nio package](https://docs.oracle.com/javase/7/docs/api/java/nio/file/package-summary.html) is reasonably large and has several subpackages. The docs are quite thorough in specifying expected behavior for implementing the interfaces and extending the abstract types. - -Oracle has written a [tutorial](https://docs.oracle.com/javase/tutorial/essential/io/fileio.html) on this package that can be helpful for getting started and understanding the fundamentals of how customers may use the FileSystem APIs. - -## Providers - -Java frequently works with a Service Provider Interface (SPI) architecture. This architecture is largely built on the [ServiceLoader](https://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html) type. In short, the JDK will define a static factory type, in this case [FileSystems](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html), that is used to instantiate providers, or implementations of the given service. When a client issues a request to the factory for a new instance, the ServiceLoader is invoked to search for installed implementations of this type. The requirements for installation are somewhat specific to the service, but in this case the type must be on the classpath and the package must have a resource file pointing to the implementation type. Once the ServiceLoader loads all available instances, it will query each to see if it fits the criteria that satisfies the client's request. In the case of FileSystems, it will look for a FileSystemProvider that uses a scheme which matches that of the passed URI. Upon finding the appropriate implementation, the service API is interacted with as normal. - -# Entry, Configuration, and Authentication - -## Entry - -The JVM relies on the [FileSystems](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html) API to dynamically load FileSystems. Assuming our package is [properly configured](https://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html) and loaded on the classpath (probably via a Maven dependency), a customer need only call [newFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map)) to create a new FileSystem backed by Azure Blob Storage or [getFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#getFileSystem(java.net.URI)) to retrieve an existing instance. - -A FileSystem is an abstract concept that may be distributed across one or more accounts. In the simple case, a FileSystem corresponds to an account and will be uniquely identified by the account name. E.g. a FileSystem backed by Azure Storage account "xclientdev" will be identified by the URI "azb://?account=xclientdev". Using the account as the analog for a FileSystem allows containers to be used as different FileStores (the equivalent of different drives on Windows). - -If data required by the FileSystem is spread across multiple accounts, the FileSystem will be uniquely identified by a UUID. In this case, the URI must be of the format "azb://?fileSystemId=\<UUID\>". The difference in query parameter will signal to the FileSystem that its storage is distributed across accounts. The account name and fileSystemId will be used to index the open FileSystems in the same way, so these values cannot be shared between two different FileSystems. The difference in query parameter is only a hint to the FileSystem. (See "Configuration and Authentication" below for further information on how this affects configuration). - -The scheme used for Azure Storage's implementation will be "azb". We specify 'b' as it is more flexible. This will leave room for later implementations to be built on top of Datalake ("azd") which will enable scenarios like working with [POSIX permissions](https://docs.oracle.com/javase/tutorial/essential/io/fileAttr.html#posix). It could also allow for loading a provider backed by Azure Share Files ("azs") for a fuller set of FileSystem features. - -A best effort attempt to make a request to the storage account will be made upon initialization by making a call to [getContainerProperties](https://docs.microsoft.com/rest/api/storageservices/get-container-properties) for each container specified (See "Configuration and Authentication" below). Failure to complete this connection on any container will result in an IOException and failure to load the FileSystem. Because this is a best effort check, it merely validates the existence of and minimal authorization to the FileSystem. It does not guarantee that there are sufficient permissions for all FileSystem operations. - -Once a FileSystem instance has been loaded and returned, a customer may perform their normal FileSystem operations backed by Azure Blob Storage. - -## Configuration and Authentication - -A FileSystem will be configured and authenticated via the options map available on [newFile](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map))[S](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map))[ystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html#newFileSystem(java.net.URI,%20java.util.Map)). It is left to the customer how to build this map (e.g. querying another source, reading a file, etc.), but if only one account is used to back the FileSystem, it must specify one of the following keys with an appropriate value for authentication: - -- AzureStorageAccountKey: String -- AzureStorageSasToken: String - -The map is queried in the above order, and the first one found is the authentication method used. If a Sas token is used, the customer must take care that it has appropriate permissions to perform the actions demanded of the FileSystem in a given workflow, including the initial connection check specified above. Furthermore, it must have an expiry time set after the client is expected to finish using the FileSystem. No token refresh is currently offered by the FileSystem implementation, though it is possible one may be offered in the future through some means of specifying a refresh period and location to read the new token at the correct time in the options. If the FileSystem is backed by multiple accounts, a SasToken must be attached to each container as specified below. - -A client must also specify the FileStores that they would like to configure. FileStores will correspond to containers, and the listed containers will be created if they do not already exist. Existing data will be preserved and if it is in one of the listed containers may be accessed via the FileSystem APIs, though care should be taken to ensure that the hierarchy is structured in a way intelligible to this implementation or behavior will be undefined (See "Directories" below). Any containers otherwise existing in the account will be ignored. The list of specified containers will be the return value for the name property on each value returned from [getFileStores](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getFileStores()). The result of [getRootDirectories](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getRootDirectories()) will be "\<containerName\>:/". This implies that all paths in this FileSystem will be prefixed with "\<containerName\>:/", or, more completely, a URI to a file in a FileSystem backed by Azure Blob Storage will always have the prefix "azb://\<containerName\>:/". The colon indicates a FileStore and is therefore disallowed in path elements other than the root directory. - -This entry must use the key "AzureStorageFileStores" and the value is an Iterable\<String\>. The format of each entry depends on the URI used to create the FileSystem. If the "account" parameter was used, the strings are simply container names. The same credentials will be applied to each container. If the "fileSystemId" parameter was used, the auth parameters will be ignored, and each container name must be fully qualified with the host and include a sas token that can access the container. E.g. "account.blob.core.windows.net/c1?\<sasToken\>". In either case, the first container listed will be considered the default and hence its root directory will be the default directory for the FileSystem. - -The following options allow for configuring the underlying blob client. If they are not specified, defaults from the blob sdk will be used: - -- AzureStorageHttpLogDetailLevel: com.azure.core.http.policy.HttpLogLevelDetail -- AzureStorageMaxTries: Integer -- AzureStorageTryTimeout: Integer -- AzureStorageRetryDelayInMs: Long -- AzureStorageMaxRetryDelayInMs: Long -- AzureStorageRetryPolicyType: com.azure.storage.common.policy.RetryPolicyType -- AzureStorageSecondaryHost: String -- AzureStorageUploadBlockSize: Long -- AzureStorageDownloadResumeRetries: Integer -- AzureStorageUseHttps: Boolean - -Using this map for configuration will allow for future extensibility. See the "Open Questions/Future Development" section below for more details. - -# Technical Details - -## Concurrent Use of Account and Containers by Other Applications - -Taken from the java.nio [package overview](https://docs.oracle.com/javase/7/docs/api/java/nio/file/package-summary.html): - -The view of the files and file system provided by classes in this package are guaranteed to be consistent with other views provided by other instances in the same Java virtual machine. The view may or may not, however, be consistent with the view of the file system as seen by other concurrently running programs due to caching performed by the underlying operating system and delays induced by network-filesystem protocols. This is true regardless of the language in which these other programs are written, and whether they are running on the same machine or on some other machine. The exact nature of any such inconsistencies are system-dependent and are therefore unspecified. - -Likewise for the AzureFileSystem, the view of the FileSystem from within an instance of the JVM will be consistent, but the AzureFileSystem makes no guarantees on behavior or state should other processes operate on the same data. The AzureFileSystem will assume that it has exclusive access to the resources stored in Azure Blob Storage and will behave without regard for potential interfering applications. - -Moreover, even from within a given application, it should be remembered that using a remote FileSystem introduces higher latency. Because of this, particular care must be taken when managing concurrency. Race conditions are more likely to manifest, network failures occur more frequently than disk failures, and other such distributed application scenarios must be considered when working with this FileSystem. While the AzureFileSystem will ensure it takes appropriate steps towards robustness and reliability, the application developer must also design around these failure scenarios and have fallback and retry options available. - -## Limitations - -It is important to recognize that Azure Blob Storage is not a true FileSystem, nor is it the goal of this project to force Azure Blob Storage to act like a full-fledged FileSystem. While providing FileSystem APIs on top of Azure Blob Storage can offer convenience and ease of access in certain cases, trying to force the Storage service to work in scenarios it is not designed for is bound to introduce performance and stability problems. To that end, this project will only offer APIs that can be sensibly and cleanly built on top of Azure Blob Storage APIs. We recognize that this will leave some scenarios unsupported indefinitely, but we would rather offer a product that works predictably and reliably in its well defined scenarios than eagerly support all possible scenarios at the expense of quality. - -Azure Storage has other storage offerings, such as Azure Datalake and Azure Files. Each of these has semantics that approach a traditional FileSystem more closely than Azure Blobs. Should there arise a need for greater nio support on top of Azure Storage, we may choose to implement these APIs on top of one of those services as well. - -## File Open Options - -Due to the above limitations, not all file I/O operations can be supported. In particular, random writes on existing data are not feasible on top of Azure Blob Storage. (See the "Open Questions/Future Development" section for a discussion on random IO. See the write()/close() operation notes in the "API" section below for more information on the implementation of writing). - -Due to these constraints, writing is only permitted in very specific scenarios. The supported [StandardOpenOptions](https://docs.oracle.com/javase/7/docs/api/java/nio/file/StandardOpenOption.html) are as follows: - -- APPEND: It should be possible to append to existing blobs by writing new blocks, retrieving the existing block list, and appending the new blocks to the list before committing. -- CREATE -- CREATE\_NEW -- DELETE\_ON\_CLOSE -- DSYNC: Every write requires a getBlockList + commitBlockList -- READ: Random reads are supported and fairly straightforward with Azure Blob Storage. -- SYNC -- TRUNCATE\_EXISTING: We would not follow the specified behavior exactly as we would simply commit a block list over the existing blob. This has the same result upon closing but does not actually involve a truncate operation. -- WRITE: Must be specified with APPEND to ensure that any write operations will not be random. If TRUNCATE\_EXISTING is specified, we will write as normal and blow away the old blob with a commitBlockList upon closing. - -## Directories - -Azure Blob Storage does not support actual directories. Virtual directories are supported by specifying a blob name that includes one or more path separators. Blobs may then be listed using a prefix and by specifying the delimiter to approximate a directory structure. The delimiter in this case is '/'. - -This project will use the same directory notation as blobFuse and other existing tools. Specifically, when creating a directory a zero-length blob whose name is the desired path and has a metadata value of "is\_hdi\_folder=true" will be created. Operations targeting directories will target blobs with these properties. In cases where there is existing data in the containers that appears to use virtual directories (determined by the presence of path separators) but does not have the empty blob and metadata markers, behavior will be undefined as specified above. One notable example is the case where deleting the only blob in a "directory" that does not have this marker will actually delete the directory because there will be no marker blob present to persist the path. - -## Optimistic Concurrency - -Though there are limitations on how much safety we can guarantee because of the limitations of a remote Storage system, we should attempt to be safe wherever possible and use ETag-locking to ensure we are giving a consistent view of a given file when required. - -# Release Criteria and Timeline - -## Preview Criteria - -In order to preview, the AzureFileSystem must implement the full set of features necessary to support the [Cromwell](https://github.com/broadinstitute/cromwell) scientific engine. Integration into this engine represents our MVP scenario and will give us a live environment in which we can exercise the preview for stress and perf. The set of APIs that must be included are listed below. Unless otherwise specified, their behavior will be as defined in the Oracle javadocs for the given type. Notes about behavior particular to our implementation are included inline. Anything not included in this list but included in the java.nio package will throw an UnsupportedOperationException unless otherwise specified by the Oracle docs. Release of the first preview should be targeted for the end of April. - -## GA Criteria - -In order to release a GA version, the AzureFileSystem must: - -- Be fully integrated into the Azure Sdk Repo. This includes CI checks, docs, samples, and any other infrastructure specified in the repo guidelines. -- Have a fully functional and thorough test suite with sufficient test coverage. Testing should include unit testing on any internal types and scenarios tests that include loading the FileSystemProvider and interacting with it as if it were a production environment (this may require a separate package that simply runs an end to end test). -- A CaseRunner should be written and tested on the Developer Experience team's perf framework. -- At least two extra customers of reasonable size should have engaged with the product in a meaningful way. We should engage the customers who requested this project on the azure-storage-java repo. - -Per Microsoft's guidelines and assuming all criteria are met, the product should GA no later than six months after preview. Additional time may be required for customer adoption, however. - -# APIs for Preview - -## [FileSystemProvider](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html) - -Note that this type contains the core implementations for the FileSystem apis and [Files](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Files.html) methods delegate here. It is also important that these methods are threadsafe. - -- [checkAccess](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#checkAccess(java.nio.file.Path,%20java.nio.file.AccessMode...)): AccessDeniedException is thrown in all cases where the Execute option is passed. In all other cases, no AccessDeniedException will ever be thrown as Azure Blob Storage does not keep track of permissions on a per-blob basis, and it is assumed that the authentication method provided is sufficient for accessing the blobs in the desired manner. While it would be feasible to test read access by attempting a read, it would not be safe to do the same for write access, and in this case it is preferable to keep the assumption consistent, so we check neither. Similarly, we could check the query string of a sas token for permissions, but we cannot do the same for token authentication, and we choose here to be consistent in our assumption for clarity. -- [copy](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#copy(java.nio.file.Path,%20java.nio.file.Path,%20java.nio.file.CopyOption...)): COPY\_ATTRIBUTES must be true as it is impossible not to copy blob properties; if this option is not passed, an UnsupportedOperationException (UOE) will be thrown. All copies within an account are atomic, so ATOMIC\_MOVE should be supported and in fact will always be the case even when unspecified for a FileSystem using one account. If the FileSystem uses multiple accounts, the account name of the source and destination will be compared, and an IOException will be thrown if they do not match. If REPLACE\_EXISTING is not passed, we will use an If-None-Match:"\*" condition on the destination to prevent overwrites. The authentication method used on each will be the same as configured on entry. Note that copies between accounts are implicitly disallowed because we cannot copy from outside the FileSystem. -- [createDirectory](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#createDirectory(java.nio.file.Path,%20java.nio.file.attribute.FileAttribute...)): Use Etag conditions to fulfill the required atomicity of check + create. See the section on directory behavior above. -- [delete](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#delete(java.nio.file.Path)) -- [deleteIfExists](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#deleteIfExists(java.nio.file.Path)) -- [getFileAttributeView](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getFileAttributeView(java.nio.file.Path,%20java.lang.Class,%20java.nio.file.LinkOption...)): Please see the AttributeView section below. -- [getFileStore](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getFileStore(java.nio.file.Path)): The FileStore (container) does not depend on the existence of the file (blob). See the [FileStore](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileStore.html) section below. -- [getFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getFileSystem(java.net.URI)): Once a FileSystem is closed, it will be removed from the FileSystemProvider's internal map. Therefore, trying to retrieve a closed FileSystem will throw a FileSystemNotFoundException. Note that it is possible to create a second instance of a FileSystem with the same URI if the first one was closed. -- [getPath](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getPath(java.net.URI)): See the Path section below. -- [getScheme](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#getScheme()): Returns "azb". -- [isSameFile](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Files.html#isSameFile(java.nio.file.Path,%20java.nio.file.Path)) -- [move](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#move(java.nio.file.Path,%20java.nio.file.Path,%20java.nio.file.CopyOption...)): Implemented as a copy and a delete. An AtomicMoveNotSupportedException will be thrown if the ATOMIC\_MOVE flag is passed. The same authentication method will be applied to both the source and the destination. We cannot copy the LMT of the source; the LMT will be updated as the copy time on the new blob, which is in violation of the javadocs but we do not have an alternative. -- [newDirectoryStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newDirectoryStream(java.nio.file.Path,%20java.nio.file.DirectoryStream.Filter)): See the DirectoryStream section below. -- [newFileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newFileSystem(java.net.URI,%20java.util.Map)): See the FileSystem section below. -- [newInputStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newInputStream(java.nio.file.Path,%20java.nio.file.OpenOption...)): See the InputStream section below. -- [newOutputStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#newOutputStream(java.nio.file.Path,%20java.nio.file.OpenOption...)): See the OutputStream section below. -- [readAttributes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#readAttributes(java.nio.file.Path,%20java.lang.Class,%20java.nio.file.LinkOption...)) -- [readAttributes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#readAttributes(java.nio.file.Path,%20java.lang.String,%20java.nio.file.LinkOption...)) -- [setAttributes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/spi/FileSystemProvider.html#setAttribute(java.nio.file.Path,%20java.lang.String,%20java.lang.Object,%20java.nio.file.LinkOption...)) - -## [Path](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Path.html) - -Note the need to support empty paths. Most methods in this type are straightforward and do not need further commentary. In this section we list only the methods that will **NOT** be supported. - -- [register](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Path.html#register(java.nio.file.WatchService,%20java.nio.file.WatchEvent.Kind...)) (both overloads; support may come at a later date) -- [toRealPath](https://docs.oracle.com/javase/7/docs/api/java/nio/file/Path.html#toRealPath(java.nio.file.LinkOption...)) (pending sym link support) - -## [InputStream](https://docs.oracle.com/javase/7/docs/api/java/io/InputStream.html)/[OutputStream](https://docs.oracle.com/javase/7/docs/api/java/io/OutputStream.html) - -We should be able to reuse BlobInputStream and BlobOutputStream from the blob package for these types. See above notes on OpenOptions for details on which options may be passed. - -## [DirectoryStream](https://docs.oracle.com/javase/7/docs/api/java/nio/file/DirectoryStream.html) - -A blob listing with a prefix and delimiter should suffice as we already return an Iterable. - -## [FileSystem](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html) - -A FileSystem is backed by an account. - -- [close](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#close()): The need for throwing a possible exception will require maintaining a "closed" Boolean. Because this closes all associated channels, etc., child objects will need to maintain a reference to their parent FileSystem and query it performing any operations. Because we don't hold onto any system resources outside of making network requests, outstanding operations can be allowed to finish and the channel will be considered closed upon the next attempted operation when the parent FileSystem is queried. -- [getFileStores](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getFileStores()): No permissions are checked. The list of FileStores will be the list passed in upon configuration. An exists call will be made on the container before returning it to ensure it is still viable. -- [getPath](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getPath(java.lang.String,%20java.lang.String...)): See the Path section above. -- [getRootDirectories](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getRootDirectories()): Returns the same list as getFileStores, but each element has a '/' appended to it. -- [getSeparator](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#getSeparator()): Returns '/'. -- [isOpen](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#isOpen()) -- [isReadOnly](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#isReadOnly()): Returns false. -- [provider](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#provider()) -- [supportedFileAttributeViews](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html#supportedFileAttributeViews()): Set AttributeViews section below. - -## [FileStore](https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html) - -A FileStore is backed by a container. As mentioned above, a list of containers is passed in upon initialization of the FileSystem. Because there is no limit to the storage space of a container, unallocated/usable/total space is MAX\_LONG. Other methods are self-evident. - -## AttributeViews - -- [BasicFileAttributeView](https://docs.oracle.com/javase/7/docs/api/java/nio/file/attribute/BasicFileAttributeView.html): - - [setTimes](https://docs.oracle.com/javase/7/docs/api/java/nio/file/attribute/BasicFileAttributeView.html#setTimes(java.nio.file.attribute.FileTime,%20java.nio.file.attribute.FileTime,%20java.nio.file.attribute.FileTime)): a copy in place can be used to update the LMT. UnsupportedOperationException thrown for other time values - - Symlink support pending -- [UserDefinedFileAttributeView](https://docs.oracle.com/javase/7/docs/api/java/nio/file/attribute/UserDefinedFileAttributeView.html): Stored as metadata on the blob. Both keys and values are Strings. RuntimePermission("accessUserDefinedAttributes") is not required. -- AzureStorageFileAttributeView: A new type that will allow clients to set Storage related properties such as tier. - -## [File](https://docs.oracle.com/javase/7/docs/api/java/io/File.html) - -Many of these methods are implemented by deferring to the Files implementation of many of these methods (paying attention to differences in behavior). Here again, only the methods that are NOT implemented are listed as most of these methods can be deferred to another type and are therefore fairly transparent to implement. - -- [isHidden](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#isHidden()) -- [setWritable](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setWritable(boolean,%20boolean))/[setReadable](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setReadable(boolean,%20boolean))/[setExecutable](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setExecutable(boolean,%20boolean))/[setLastmodified](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setLastModified(long))/[setReadOnly](https://docs.oracle.com/javase/7/docs/api/java/io/File.html#setReadOnly()) - -## [AsynchronousFileChannel](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html) - -- [force](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#force(boolean)): No-op as we don't keep a local cache, so all write go directly to the service. -- [open](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#open(java.nio.file.Path,%20java.nio.file.OpenOption...)): See the above OpenOptions section for more information. Opening with the ExecutorService is not initially supported. -- [read](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#read(java.nio.ByteBuffer,%20long)): CompletionEventHandler not initially supported. -- [size](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#size()) -- [write](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/AsynchronousFileChannel.html#write(java.nio.ByteBuffer,%20long,%20A,%20java.nio.channels.CompletionHandler)): CompletionEventHandler not initially supported. Additional checks are required before closing. Each write will add an entry to a (threadsafe) set of Strings that represent the range. At the time of closing, the set will be examined to ensure it forms a continuous range from 0 to the size of the blob. If it does not, an IOException will be thrown. If it does, the ranges will be converted to blockIDs and the list will be committed. This will enable parallel write scenarios for writing an entire file while ensuring that there is no random IO happening. Note that the docs do not specify the APPEND option for the open API. In this case, TRUNCATE\_EXISTING must be specified. - -## [SeekableByteChannel](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html) - -See the above OpenOptions section for more information. - -- [position](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#position(long)): If the position is set to any value other than the current size of the file, attempting to write will throw an UnsupportedOperationException. In read-only workloads, the position may be set wherever the client desires. Reading may fail even after a write if the channel is opened to a new blob because the data will not have been committed yet. -- [read](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#read(java.nio.ByteBuffer)) -- [size](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#size()) -- [write](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/SeekableByteChannel.html#write(java.nio.ByteBuffer)) - -## [FileChannel](https://docs.oracle.com/javase/7/docs/api/java/nio/channels/FileChannel.html) - -Note that this implements a SeekableByteChannel. Many of the methods should be deferred to an internal instance or this type should extend from our implementation of SeekableByteChannel. As such, it's seeking and writing behavior is the same as SeekableByteChannel. Mapping is not supported. Locking is not supported. Note the possible connection between a FileChannel and the stream that created it; where possible, a FileChannel should reference the position of the stream rather than maintaining its own pointer. - -# Open Questions/Further Development - -The following are not immediately necessary but may reasonably be implemented at a later time. - -- Symbolic links (createSymbolicLink) could be an empty blob with metadata field like x-ms-meta-link-target:path. Must be wary of link chains and circular links. -- Hard links (createLink) -- Hidden files (isHidden) could be a metadata field like x-ms-meta-is-hidden:true -- Random IO (newAsynchronousFileChannel, newSeekableByteChannel, newFileChannel). It would be theoretically possible to implement this functionality by downloading the file, working on a locally cached copy, and reuploading the file, but that incurs significant performance costs on large files (caching also introduces a significant amount of complexity when trying to maintain cache correctness and consistency in multithreaded environments). Because our MVP scenario is in workloads with large files, there is not much benefit to this option. Another alternative would be to use system wherein blocks roughly correlate to pages in traditional file I/O: the blockIds correspond to the range of data they hold. A random write would require downloading only a few blocks containing that range, making the desired edits, uploading the edited blocks, and re-committing the block list. This, too, introduces a large amount of complexity, a high number of round trip requests, and can be unsafe in multithreaded environments. -- Watches on directory events. -- PathMatcher (glob syntax?) -- File locks (leases? Can only be over the whole file. Can only be exclusive.) -- Read only FileSystem/Containers. Marking certain stores as read only could be configured in the initialization step if necessary. It could be a locally maintained list or we could require that the customer preconfigure the containers to be WORM containers. -- Opening AsyncFileChannel with the ExecutorService; reading with a CompletionEventHandler -- FileOwnership and POSIX permissions require ADLS. Random I/O may be improved with the use of Azure Files. -- Should we support AAD tokens? If so, we should probably look at azcopy for token refresh strategies. -- Which version should we release for GA? Should we jump to v12 to be consistent with other Storage offerings? -- Allowing customers access to single blobs. It is possible that customers may only need to access one blob from a given account. If that is the case, their credentials will likely be scoped just to that blob, and even checking the existence of a container upon initialization will be too restrictive. We can add an AzureStorageSkipInitialConnectionCheck parameter that bypasses this check and trusts the users credentials, allowing them access just to that blob. -- Providers built on other services. See comments in the "Entry" section. -- Some possible options for new flags include flags to optimize for certain behavior, to allow the filesystem to use all extant containers as FileStores rather than being restricted to the specified list, toggle the creation of specified containers, or to allow for specifying a CDN that can override the account name found in the URI. diff --git a/azure-blob-nio/README.md b/azure-blob-nio/README.md index 8bd92fcac36..ad6c553eabf 100644 --- a/azure-blob-nio/README.md +++ b/azure-blob-nio/README.md @@ -1,333 +1,5 @@ # Azure Storage Blob NIO FileSystemProvider -This package allows you to interact with Azure Blob Storage through the standard Java NIO Filesystem APIs. +[This is a copy of the NIO Filesystem implementation version 12.0.0-beta.19](https://github.com/Azure/azure-sdk-for-java/tree/2490e1e19e8531fe0a6378f40e299e7ec64cf3aa/sdk/storage/azure-storage-blob-nio) -[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][samples] - -## Getting started - -### Prerequisites - -- [Java Development Kit (JDK)][jdk] with version 8 or above -- [Azure Subscription][azure_subscription] -- [Create Storage Account][storage_account] - -### Include the package - -[//]: # ({x-version-update-start;com.azure:azure-storage-blob-nio;current}) -```xml - - com.azure - azure-storage-blob-nio - 12.0.0-beta.19 - -``` -[//]: # ({x-version-update-end}) - -### Create a Storage Account -To create a Storage Account you can use the [Azure Portal][storage_account_create_portal] or [Azure CLI][storage_account_create_cli]. - -```bash -az storage account create \ - --resource-group \ - --name \ - --location -``` - -### Authenticate the client - -The simplest way to interact with the Storage Service is to create an instance of the [FileSystem][file_system] class using the [FileSystems][file_systems] API. -To make this possible you'll need the Account SAS (shared access signature) string of the Storage Account or a Shared Key. Learn more at [SAS Token][sas_token] and [Shared Key][shared_key] - -#### Get credentials - -##### SAS Token - -a. Use the Azure CLI snippet below to get the SAS token from the Storage Account. - -```bash -az storage blob generate-sas \ - --account-name {Storage Account name} \ - --container-name {container name} \ - --name {blob name} \ - --permissions {permissions to grant} \ - --expiry {datetime to expire the SAS token} \ - --services {storage services the SAS allows} \ - --resource-types {resource types the SAS allows} -``` - -Example: - -```bash -CONNECTION_STRING= - -az storage blob generate-sas \ - --account-name MyStorageAccount \ - --container-name MyContainer \ - --name MyBlob \ - --permissions racdw \ - --expiry 2020-06-15 -``` - -b. Alternatively, get the Account SAS Token from the Azure Portal. - -1. Go to your Storage Account -2. Select `Shared access signature` from the menu on the left -3. Click on `Generate SAS and connection string` (after setup) - -##### **Shared Key Credential** - -Use Account name and Account key. Account name is your Storage Account name. - -1. Go to your Storage Account -2. Select `Access keys` from the menu on the left -3. Under `key1`/`key2` copy the contents of the `Key` field - -## Key concepts - -NIO on top of Blob Storage is designed for: - -- Working with Blob Storage as though it were a local file system -- Random access reads on large blobs without downloading the entire blob -- Uploading full files as blobs -- Creating and navigating a directory structure within an account -- Reading and setting attributes on blobs - -## Design Notes -It is important to recognize that Azure Blob Storage is not a true FileSystem, nor is it the goal of this project to -force Azure Blob Storage to act like a full-fledged FileSystem. While providing FileSystem APIs on top of Azure Blob -Storage can offer convenience and ease of access in certain cases, trying to force the Storage service to work in -scenarios it is not designed for is bound to introduce performance and stability problems. - -To that end, this project will only offer APIs that can be sensibly and cleanly built on top of Azure Blob Storage APIs. -We recognize that this will leave some scenarios unsupported indefinitely, but we would rather offer a product that -works predictably and reliably in its well defined scenarios than eagerly support all possible scenarios at the expense -of quality. Even still, supporting some fundamentally required use cases, such as directories, can result in unexpected -behavior due to the difference between blob storage and a file system. The javadocs on each type and method should -therefore be read and understood for ways in which they may diverge from the standard specified by the JDK. - -Moreover, even from within a given application, it should be remembered that using a remote FileSystem introduces higher -latency. Because of this, particular care must be taken when managing concurrency. Race conditions are more likely to -manifest, network failures occur more frequently than disk failures, and other such distributed application scenarios -must be considered when working with this FileSystem. While the AzureFileSystem will ensure it takes appropriate steps -towards robustness and reliability, the application developer must also design around these failure scenarios and have -fallback and retry options available. - -The view of the FileSystem from within an instance of the JVM will be consistent, but the AzureFileSystem makes no -guarantees on behavior or state should other processes operate on the same data. The AzureFileSystem will assume that it -has exclusive access to the resources stored in Azure Blob Storage and will behave without regard for potential -interfering applications. - -Finally, this implementation has currently chosen to always read/write directly to/from Azure Storage without a local -cache. Our team has determined that with the tradeoffs of complexity, correctness, safety, performance, debuggability, -etc. one option is not inherently better than the other and that this choice most directly addresses the current known -use cases for this project. While this has consequences for every API, of particular note is the limitations on writing -data. Data may only be written as an entire file (i.e. random IO or appends are not supported), and data is not -committed or available to be read until the write stream is closed. - -## Examples - -The following sections provide several code snippets covering some of the most common Azure Storage Blob NIO tasks, including: - -- [URI format](#uri-format) -- [Create a `FileSystem`](#create-a-filesystem) -- [Create a directory](#create-a-directory) -- [Iterate over directory contents](#iterate-over-directory-contents) -- [Read a file](#read-a-file) -- [Write to a file](#write-to-a-file) -- [Copy a file](#copy-a-file) -- [Delete a file](#delete-a-file) -- [Read attributes on a file](#read-attributes-on-a-file) -- [Write attributes to a file](#write-attributes-to-a-file) - -### URI format -URIs are the fundamental way of identifying a resource. This package defines its URI format as follows: - -The scheme for this provider is `"azb"`, and the format of the URI to identify an `AzureFileSystem` is -`"azb://?endpoint="`. The endpoint of the Storage account is used to uniquely identify the filesystem. - -The root component, if it is present, is the first element of the path and is denoted by a `':'` as the last character. -Hence, only one instance of `':'` may appear in a path string, and it may only be the last character of the first -element in the path. The root component is used to identify which container a path belongs to. - -All other path elements, including separators, are considered as the blob name. `AzurePath#fromBlobUrl` -may be used to convert a typical http url pointing to a blob into an `AzurePath` object pointing to the same resource. - -### Create a `FileSystem` - -Create a `FileSystem` using the [`shared key`](#get-credentials) retrieved above. - -Note that you can further configure the file system using constants available in `AzureFileSystem`. -Please see the docs for `AzureFileSystemProvider` for a full explanation of initializing and configuring a filesystem - -```java readme-sample-createAFileSystem -Map config = new HashMap<>(); -String stores = ","; // A comma separated list of container names -StorageSharedKeyCredential credential = new StorageSharedKeyCredential(" attributes = Files.readAttributes(filePath, "azureBlob:metadata,headers"); -``` - -### Write attributes to a file - -Set attributes of a file through the `AzureBlobFileAttributeView`. - -```java readme-sample-writeAttributesToAFile -AzureBlobFileAttributeView view = Files.getFileAttributeView(filePath, AzureBlobFileAttributeView.class); -view.setMetadata(Collections.emptyMap()); -``` - -Or set an attribute dynamically by specifying the attribute as a string. - -```java readme-sample-writeAttributesToAFileString -Files.setAttribute(filePath, "azureBlob:blobHttpHeaders", new BlobHttpHeaders()); -``` - -## Troubleshooting - -When using the NIO implementation for Azure Blob Storage, errors returned by the service are manifested as an -`IOException` which wraps a `BlobStorageException` having the same HTTP status codes returned for -[REST API][error_codes] requests. For example, if you try to read a file that doesn't exist in your Storage Account, a -`404` error is returned, indicating `Not Found`. - -### Default HTTP Client -All client libraries by default use the Netty HTTP client. Adding the above dependency will automatically configure -the client library to use the Netty HTTP client. Configuring or changing the HTTP client is detailed in the -[HTTP clients wiki](https://github.com/Azure/azure-sdk-for-java/wiki/HTTP-clients). - -### Default SSL library -All client libraries, by default, use the Tomcat-native Boring SSL library to enable native-level performance for SSL -operations. The Boring SSL library is an uber jar containing native libraries for Linux / macOS / Windows, and provides -better performance compared to the default SSL implementation within the JDK. For more information, including how to -reduce the dependency size, refer to the [performance tuning][performance_tuning] section of the wiki. - -## Continued development - -This project is still actively being developed in an effort to move from preview to GA. Below is a list of features that -are not currently supported but are under consideration and may be added before GA. We welcome feedback and input on -which of these may be most useful and are open to suggestions for items not included in this list. While all of these -items are being considered, they have not been investigated and designed and therefore we cannot confirm their -feasibility within Azure Blob Storage. Therefore, it may be the case that further investigation reveals a feature may -not be possible or otherwise may conflict with established design goals and therefor will not ultimately be supported. - -- Symbolic links -- Hard links -- Hidden files -- Random writes -- File locks -- Read only files or file stores -- Watches on directory events -- Support for other Azure Storage services such as ADLS Gen 2 (Datalake) and Azure Files (shares) -- Token authentication -- Multi-account filesystems -- Delegating access to single files -- Normalizing directory structure of data upon loading a FileSystem -- Local caching -- Other `OpenOptions` such as append or dsync -- Flags to toggle certain behaviors such as FileStore (container) creation, etc. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a [Contributor License Agreement (CLA)][cla] declaring that you have the right to, and actually do, grant us the rights to use your contribution. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For more information see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. - - -[source]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob-nio/src -[samples_readme]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob-nio/src/samples/README.md -[docs]: https://azure.github.io/azure-sdk-for-java/ -[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api -[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview -[sas_token]: https://docs.microsoft.com/azure/storage/common/storage-dotnet-shared-access-signature-part-1 -[shared_key]: https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key -[jdk]: https://docs.microsoft.com/java/azure/jdk/ -[azure_subscription]: https://azure.microsoft.com/free/ -[storage_account]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal -[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli -[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal -[identity]: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/identity/azure-identity/README.md -[error_codes]: https://docs.microsoft.com/rest/api/storageservices/blob-service-error-codes -[samples]: https://docs.oracle.com/javase/tutorial/essential/io/fileio.html -[cla]: https://cla.microsoft.com -[coc]: https://opensource.microsoft.com/codeofconduct/ -[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ -[coc_contact]: mailto:opencode@microsoft.com -[performance_tuning]: https://github.com/Azure/azure-sdk-for-java/wiki/Performance-Tuning -[file_system]: https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystem.html -[file_systems]: https://docs.oracle.com/javase/7/docs/api/java/nio/file/FileSystems.html - -![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-java%2Fsdk%2Fstorage%2Fazure-storage-blob%2FREADME.png) +For more information on the initial design and commit history see the Azure SDK repository linked above. Changes to this repo were necessary to support some of the specific needs Cromwell as an App on Azure has as a system in Terra. This is something that has some precedent as it has been done for other filesystems in the past.