From c25255358f13d4998a9515b53ba951a5ebef0e74 Mon Sep 17 00:00:00 2001 From: Anthony Shoumikhin Date: Mon, 16 Sep 2024 10:43:30 -0700 Subject: [PATCH] Move type arg to the end to match Aten constructors. (#5379) Summary: Pull Request resolved: https://github.com/pytorch/executorch/pull/5379 . Reviewed By: kirklandsign Differential Revision: D62701089 fbshipit-source-id: 3f05961a43db9e6e372ee039c2d832227951fbf6 --- extension/tensor/tensor_impl_ptr.cpp | 8 +- extension/tensor/tensor_impl_ptr.h | 204 +++++++++++++++--- extension/tensor/tensor_ptr.h | 201 ++++++++++++----- extension/tensor/tensor_ptr_maker.cpp | 2 +- extension/tensor/tensor_ptr_maker.h | 64 +++--- .../tensor/test/tensor_impl_ptr_test.cpp | 63 ++---- extension/tensor/test/tensor_ptr_test.cpp | 24 +-- 7 files changed, 388 insertions(+), 178 deletions(-) diff --git a/extension/tensor/tensor_impl_ptr.cpp b/extension/tensor/tensor_impl_ptr.cpp index cbc49299b9..358acfd185 100644 --- a/extension/tensor/tensor_impl_ptr.cpp +++ b/extension/tensor/tensor_impl_ptr.cpp @@ -54,11 +54,11 @@ struct TensorImplPtrDeleter final { } // namespace TensorImplPtr make_tensor_impl_ptr( - exec_aten::ScalarType type, std::vector sizes, void* data, std::vector dim_order, std::vector strides, + exec_aten::ScalarType type, exec_aten::TensorShapeDynamism dynamism, std::function deleter) { const auto dim = sizes.size(); @@ -129,24 +129,24 @@ TensorImplPtr make_tensor_impl_ptr( } TensorImplPtr make_tensor_impl_ptr( - exec_aten::ScalarType scalar_type, std::vector sizes, std::vector data, std::vector dim_order, std::vector strides, + exec_aten::ScalarType type, exec_aten::TensorShapeDynamism dynamism) { ET_CHECK_MSG( data.size() >= exec_aten::compute_numel(sizes.data(), sizes.size()) * - exec_aten::elementSize(scalar_type), + exec_aten::elementSize(type), "Data size is smaller than required by sizes and scalar type."); auto raw_data_ptr = data.data(); auto data_ptr = std::make_shared>(std::move(data)); return make_tensor_impl_ptr( - scalar_type, std::move(sizes), raw_data_ptr, std::move(dim_order), std::move(strides), + type, dynamism, [data_ptr = std::move(data_ptr)](void*) {}); } diff --git a/extension/tensor/tensor_impl_ptr.h b/extension/tensor/tensor_impl_ptr.h index 83bf534d73..8857dd1aca 100644 --- a/extension/tensor/tensor_impl_ptr.h +++ b/extension/tensor/tensor_impl_ptr.h @@ -21,15 +21,14 @@ namespace extension { #ifndef USE_ATEN_LIB /** - * A smart pointer type for managing the lifecycle of a TensorImpl. + * A smart pointer for managing the lifecycle of a TensorImpl. * - * TensorImplPtr uses a shared pointer because multiple Tensor objects might - * share the same underlying data and metadata. This shared ownership model - * ensures that the TensorImpl is only destroyed when all references to it are - * gone, providing a safe and efficient way to manage shared tensor - * implementations. This abstraction is designed to be a safer and more - * convenient alternative to the original TensorImpl, which does not - * manage metadata by design. + * TensorImplPtr uses a shared pointer since multiple Tensor objects may + * share the same underlying data and metadata. This shared ownership ensures + * that the TensorImpl is destroyed only when all references to it are gone, + * providing a safe and efficient way to manage shared tensor implementations. + * It serves as a safer, more convenient alternative to the original TensorImpl, + * which does not manage its metadata by design. */ using TensorImplPtr = std::shared_ptr; #else @@ -48,23 +47,23 @@ using TensorImplPtr = * Creates a TensorImplPtr that manages a newly created TensorImpl with the * specified properties. * - * @param type The scalar type of the tensor elements. * @param sizes A vector specifying the size of each dimension. * @param data A pointer to the data buffer. * @param dim_order A vector specifying the order of dimensions. * @param strides A vector specifying the strides of each dimension. + * @param type The scalar type of the tensor elements. * @param dynamism Specifies the mutability of the tensor's shape. * @param deleter A custom deleter function for managing the lifetime of the - * data buffer. If provided, this deleter will be called when the managed - * TensorImpl object is destroyed. + * data buffer. If provided, this deleter is called when the managed TensorImpl + * is destroyed. * @return A TensorImplPtr managing the newly created TensorImpl. */ TensorImplPtr make_tensor_impl_ptr( - exec_aten::ScalarType type, std::vector sizes, void* data, - std::vector dim_order = {}, - std::vector strides = {}, + std::vector dim_order, + std::vector strides, + exec_aten::ScalarType type = exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, std::function deleter = nullptr); @@ -73,37 +72,64 @@ TensorImplPtr make_tensor_impl_ptr( * Creates a TensorImplPtr that manages a newly created TensorImpl with the * specified properties. * - * This template overload is specialized for cases where the tensor data is - * provided as a vector. The scalar type is automatically deduced from the - * vector's data type. The deleter ensures that the data vector is properly - * managed and its lifetime is tied to the TensorImpl. + * @param sizes A vector specifying the size of each dimension. + * @param data A pointer to the data buffer. + * @param type The scalar type of the tensor elements. + * @param dynamism Specifies the mutability of the tensor's shape. + * @param deleter A custom deleter function for managing the lifetime of the + * data buffer. If provided, this deleter is called when the managed TensorImpl + * is destroyed. + * @return A TensorImplPtr managing the newly created TensorImpl. + */ +inline TensorImplPtr make_tensor_impl_ptr( + std::vector sizes, + void* data, + exec_aten::ScalarType type = exec_aten::ScalarType::Float, + exec_aten::TensorShapeDynamism dynamism = + exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, + std::function deleter = nullptr) { + return make_tensor_impl_ptr( + std::move(sizes), data, {}, {}, type, dynamism, std::move(deleter)); +} + +/** + * Creates a TensorImplPtr that manages a newly created TensorImpl with the + * specified properties. + * + * This template overload is specialized for cases where tensor data is provided + * as a vector. The scalar type is automatically deduced from the vector's data + * type. The deleter ensures that the data vector is properly managed, with its + * lifetime tied to the TensorImpl. * * @tparam T The C++ type of the tensor elements, deduced from the vector. * @param sizes A vector specifying the size of each dimension. * @param data A vector containing the tensor's data. * @param dim_order A vector specifying the order of dimensions. * @param strides A vector specifying the strides of each dimension. + * @param type The scalar type of the tensor elements. * @param dynamism Specifies the mutability of the tensor's shape. * @return A TensorImplPtr that manages the newly created TensorImpl. */ -template +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> inline TensorImplPtr make_tensor_impl_ptr( std::vector sizes, std::vector data, std::vector dim_order = {}, std::vector strides = {}, + exec_aten::ScalarType type = deduced_type, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { - constexpr exec_aten::ScalarType scalar_type = - runtime::CppTypeToScalarType::value; + ET_CHECK_MSG(type == deduced_type, "Type does not match the deduced type."); const auto raw_data_ptr = data.data(); auto data_ptr = std::make_shared>(std::move(data)); return make_tensor_impl_ptr( - scalar_type, std::move(sizes), raw_data_ptr, std::move(dim_order), std::move(strides), + type, dynamism, [data_ptr = std::move(data_ptr)](void*) {}); } @@ -119,17 +145,109 @@ inline TensorImplPtr make_tensor_impl_ptr( * * @tparam T The C++ type of the tensor elements, deduced from the vector. * @param data A vector containing the tensor's data. + * @param type The scalar type of the tensor elements. * @param dynamism Specifies the mutability of the tensor's shape. * @return A TensorImplPtr that manages the newly created TensorImpl. */ -template +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> inline TensorImplPtr make_tensor_impl_ptr( std::vector data, + exec_aten::ScalarType type = deduced_type, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { + ET_CHECK_MSG(type == deduced_type, "Type does not match the deduced type."); std::vector sizes{exec_aten::SizesType(data.size())}; return make_tensor_impl_ptr( - std::move(sizes), std::move(data), {0}, {1}, dynamism); + std::move(sizes), std::move(data), {0}, {1}, type, dynamism); +} + +/** + * Creates a TensorImplPtr that manages a newly created TensorImpl with the + * specified properties. + * + * This template overload is specialized for cases where tensor data is provided + * as an initializer list. The scalar type is automatically deduced from the + * initializer list's data type. The deleter ensures that the data is properly + * managed, with its lifetime tied to the TensorImpl. + * + * @tparam T The C++ type of the tensor elements, deduced from the initializer + * list. + * @param sizes A vector specifying the size of each dimension. + * @param list An initializer list containing the tensor's data. + * @param dim_order A vector specifying the order of dimensions. + * @param strides A vector specifying the strides of each dimension. + * @param type The scalar type of the tensor elements. + * @param dynamism Specifies the mutability of the tensor's shape. + * @return A TensorImplPtr that manages the newly created TensorImpl. + */ +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> +inline TensorImplPtr make_tensor_impl_ptr( + std::vector sizes, + std::initializer_list list, + std::vector dim_order = {}, + std::vector strides = {}, + exec_aten::ScalarType type = deduced_type, + exec_aten::TensorShapeDynamism dynamism = + exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { + ET_CHECK_MSG(type == deduced_type, "Type does not match the deduced type."); + auto data = std::vector(std::move(list)); + const auto raw_data_ptr = data.data(); + auto data_ptr = std::make_shared>(std::move(data)); + return make_tensor_impl_ptr( + std::move(sizes), + raw_data_ptr, + std::move(dim_order), + std::move(strides), + type, + dynamism, + [data_ptr = std::move(data_ptr)](void*) {}); +} + +/** + * Creates a TensorImplPtr that manages a newly created TensorImpl with the + * specified properties. + * + * This template overload is specialized for cases where the tensor data is + * provided as an initializer list. The scalar type is automatically deduced + * from the initializer list's data type. The deleter ensures that the data is + * properly managed and its lifetime is tied to the TensorImpl. + * + * @tparam T The C++ type of the tensor elements, deduced from the initializer + * list. + * @param sizes A vector specifying the size of each dimension. + * @param list An initializer list containing the tensor's data. + * @param type The scalar type of the tensor elements. + * @param dynamism Specifies the mutability of the tensor's shape. + * @return A TensorImplPtr that manages the newly created TensorImpl. + */ +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> +inline TensorImplPtr make_tensor_impl_ptr( + std::initializer_list list, + exec_aten::ScalarType type = deduced_type, + exec_aten::TensorShapeDynamism dynamism = + exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { + ET_CHECK_MSG(type == deduced_type, "Type does not match the deduced type."); + std::vector sizes{exec_aten::SizesType(list.size())}; + return make_tensor_impl_ptr( + std::move(sizes), std::move(list), {0}, {1}, type, dynamism); +} + +/** + * Creates a TensorImplPtr to manage a Tensor with a single scalar value. + * + * @tparam T The C++ type of the scalar value. + * @param value The scalar value used for the Tensor. + * @return A TensorImplPtr managing the newly created TensorImpl. + */ +template +inline TensorImplPtr make_tensor_impl_ptr(T value) { + return make_tensor_impl_ptr({}, std::vector{value}); } /** @@ -137,25 +255,49 @@ inline TensorImplPtr make_tensor_impl_ptr( * specified properties. * * This overload accepts a raw memory buffer stored in a std::vector - * and a scalar type to interpret the data. The vector is managed, and the - * memory's lifetime is tied to the TensorImpl. + * and a scalar type to interpret the data. The vector is managed, and its + * lifetime is tied to the TensorImpl. * - * @param scalar_type The scalar type of the tensor elements. * @param sizes A vector specifying the size of each dimension. - * @param data A vector containing the raw memory for the tensor's data. + * @param data A vector containing the raw memory buffer for the tensor's data. * @param dim_order A vector specifying the order of dimensions. * @param strides A vector specifying the strides of each dimension. + * @param type The scalar type of the tensor elements. * @param dynamism Specifies the mutability of the tensor's shape. * @return A TensorImplPtr managing the newly created TensorImpl. */ TensorImplPtr make_tensor_impl_ptr( - exec_aten::ScalarType scalar_type, std::vector sizes, std::vector data, - std::vector dim_order = {}, - std::vector strides = {}, + std::vector dim_order, + std::vector strides, + exec_aten::ScalarType type = exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND); +/** + * Creates a TensorImplPtr that manages a newly created TensorImpl with the + * specified properties. + * + * This overload accepts a raw memory buffer stored in a std::vector + * and a scalar type to interpret the data. The vector is managed, and the + * memory's lifetime is tied to the TensorImpl. + * + * @param sizes A vector specifying the size of each dimension. + * @param data A vector containing the raw memory for the tensor's data. + * @param type The scalar type of the tensor elements. + * @param dynamism Specifies the mutability of the tensor's shape. + * @return A TensorImplPtr managing the newly created TensorImpl. + */ +inline TensorImplPtr make_tensor_impl_ptr( + std::vector sizes, + std::vector data, + exec_aten::ScalarType type = exec_aten::ScalarType::Float, + exec_aten::TensorShapeDynamism dynamism = + exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { + return make_tensor_impl_ptr( + std::move(sizes), std::move(data), {}, {}, type, dynamism); +} + } // namespace extension } // namespace executorch diff --git a/extension/tensor/tensor_ptr.h b/extension/tensor/tensor_ptr.h index 17e18742be..41dc6282eb 100644 --- a/extension/tensor/tensor_ptr.h +++ b/extension/tensor/tensor_ptr.h @@ -17,13 +17,12 @@ namespace extension { #ifndef USE_ATEN_LIB namespace internal { /** - * Custom deleter for TensorPtr that ensures the associated TensorImplPtr - * is properly managed. + * Custom deleter for TensorPtr that ensures proper management of the associated + * TensorImplPtr. * - * Since Tensor does not own its TensorImpl, this deleter is responsible for - * managing the lifecycle of the TensorImplPtr, ensuring that the dynamic - * metadata (sizes, dim_order, strides) is properly released when the Tensor is - * destroyed. + * Since Tensor does not own its TensorImpl, this deleter manages the + * TensorImplPtr lifecycle, ensuring dynamic metadata (sizes, dim_order, + * strides) is released appropriately when the Tensor is destroyed. */ struct TensorPtrDeleter final { TensorImplPtr tensor_impl; @@ -40,13 +39,13 @@ struct TensorPtrDeleter final { } // namespace internal /** - * A smart pointer type for managing the lifecycle of a Tensor. + * A smart pointer for managing the lifecycle of a Tensor. * - * TensorPtr uses a unique pointer to enforce that each Tensor object has - * distinct ownership. This abstraction serves as a more convenient and safer - * replacement for the standard Tensor, which does not manage its - * metadata by design. Using TensorPtr simplifies memory management and ensures - * that the underlying TensorImpl is safely shared among tensors when needed. + * TensorPtr uses a unique pointer to ensure each Tensor object has distinct + * ownership. This abstraction simplifies memory management and serves as a + * safer alternative to the standard Tensor, which does not manage its metadata + * by design. It ensures that the underlying TensorImpl can be safely shared + * among tensors as needed. */ using TensorPtr = std::unique_ptr; @@ -62,13 +61,13 @@ using TensorPtr = std::unique_ptr; #endif // USE_ATEN_LIB /** - * Creates a new TensorPtr that manages a newly created Tensor with the given + * Creates a TensorPtr to manage a newly created Tensor with the given * TensorImplPtr. * - * This function wraps the provided TensorImplPtr in a TensorPtr, ensuring that - * the Tensor object's lifecycle is properly managed. The TensorPtr will - * uniquely own the Tensor object, while the underlying TensorImplPtr may be - * shared with other Tensors. + * This function wraps the provided TensorImplPtr in a TensorPtr, ensuring the + * Tensor object’s lifecycle is managed correctly. The TensorPtr uniquely owns + * the Tensor object, while the underlying TensorImplPtr can be shared with + * other Tensors. * * @param tensor_impl A TensorImplPtr to the TensorImpl to be managed. * @return A TensorPtr that manages the newly created Tensor. @@ -84,13 +83,12 @@ inline TensorPtr make_tensor_ptr(TensorImplPtr tensor_impl) { } /** - * Creates a new TensorPtr that shares the same TensorImplPtr as an existing + * Creates a TensorPtr that shares the same TensorImplPtr as an existing * TensorPtr. * - * This function creates a new TensorPtr that shares the - * underlying TensorImpl with the provided TensorPtr, ensuring that the - * underlying data and metadata are not duplicated but safely shared between the - * tensor objects. + * This function returns a TensorPtr that shares the underlying TensorImpl + * with the provided TensorPtr, ensuring that the underlying data and metadata + * are shared safely without duplication between the tensor objects. * * @param tensor A TensorPtr to the existing Tensor from which to create a copy. * @return A new TensorPtr that shares the underlying TensorImplPtr with the @@ -105,17 +103,15 @@ inline TensorPtr make_tensor_ptr(const TensorPtr& tensor) { } /** - * Creates a TensorPtr that manages a new Tensor with the same properties + * Creates a TensorPtr to manage a new Tensor with the same properties * as the given Tensor, sharing the same data without owning it. * - * @param tensor The Tensor whose properties are to be used to create a new - * TensorPtr. - * @return A new TensorPtr that manages a Tensor with the same properties as the + * @param tensor The Tensor whose properties are used to create a new TensorPtr. + * @return A new TensorPtr managing a Tensor with the same properties as the * original. */ inline TensorPtr make_tensor_ptr(const exec_aten::Tensor& tensor) { return make_tensor_ptr(make_tensor_impl_ptr( - tensor.scalar_type(), std::vector( tensor.sizes().begin(), tensor.sizes().end()), tensor.mutable_data_ptr(), @@ -124,11 +120,13 @@ inline TensorPtr make_tensor_ptr(const exec_aten::Tensor& tensor) { tensor.dim_order().begin(), tensor.dim_order().end()), std::vector( tensor.strides().begin(), tensor.strides().end()), + tensor.scalar_type(), tensor.shape_dynamism() #else // USE_ATEN_LIB {}, std::vector( - tensor.strides().begin(), tensor.strides().end()) + tensor.strides().begin(), tensor.strides().end()), + tensor.scalar_type() #endif // USE_ATEN_LIB )); } @@ -136,11 +134,11 @@ inline TensorPtr make_tensor_ptr(const exec_aten::Tensor& tensor) { /** * Creates a TensorPtr that manages a Tensor with the specified properties. * - * @param type The scalar type of the tensor elements. * @param sizes A vector specifying the size of each dimension. * @param data A pointer to the data buffer. * @param dim_order A vector specifying the order of dimensions. * @param strides A vector specifying the strides of the tensor. + * @param type The scalar type of the tensor elements. * @param dynamism Specifies the mutability of the tensor's shape. * @param deleter A custom deleter function for managing the lifetime of the * data buffer. If provided, this deleter will be called when the managed Tensor @@ -148,24 +146,47 @@ inline TensorPtr make_tensor_ptr(const exec_aten::Tensor& tensor) { * @return A TensorPtr that manages the newly created Tensor. */ inline TensorPtr make_tensor_ptr( - const exec_aten::ScalarType type, std::vector sizes, void* data, - std::vector dim_order = {}, - std::vector strides = {}, + std::vector dim_order, + std::vector strides, + const exec_aten::ScalarType type = exec_aten::ScalarType::Float, const exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, std::function deleter = nullptr) { return make_tensor_ptr(make_tensor_impl_ptr( - type, std::move(sizes), data, std::move(dim_order), std::move(strides), + type, dynamism, std::move(deleter))); } +/** + * Creates a TensorPtr that manages a Tensor with the specified properties. + * + * @param sizes A vector specifying the size of each dimension. + * @param data A pointer to the data buffer. + * @param type The scalar type of the tensor elements. + * @param dynamism Specifies the mutability of the tensor's shape. + * @param deleter A custom deleter function for managing the lifetime of the + * data buffer. If provided, this deleter will be called when the managed Tensor + * object is destroyed. + * @return A TensorPtr that manages the newly created Tensor. + */ +inline TensorPtr make_tensor_ptr( + std::vector sizes, + void* data, + const exec_aten::ScalarType type = exec_aten::ScalarType::Float, + const exec_aten::TensorShapeDynamism dynamism = + exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, + std::function deleter = nullptr) { + return make_tensor_ptr(make_tensor_impl_ptr( + std::move(sizes), data, {}, {}, type, dynamism, std::move(deleter))); +} + /** * Creates a TensorPtr that manages a Tensor with the specified properties. * @@ -178,15 +199,19 @@ inline TensorPtr make_tensor_ptr( * @param data A vector containing the tensor's data. * @param dim_order A vector specifying the order of dimensions. * @param strides A vector specifying the strides of each dimension. + * @param type The scalar type of the tensor elements. * @param dynamism Specifies the mutability of the tensor's shape. * @return A TensorPtr that manages the newly created TensorImpl. */ -template -TensorPtr make_tensor_ptr( +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> +inline TensorPtr make_tensor_ptr( std::vector sizes, std::vector data, std::vector dim_order = {}, std::vector strides = {}, + exec_aten::ScalarType type = deduced_type, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { return make_tensor_ptr(make_tensor_impl_ptr( @@ -194,6 +219,7 @@ TensorPtr make_tensor_ptr( std::move(data), std::move(dim_order), std::move(strides), + type, dynamism)); } @@ -209,12 +235,52 @@ TensorPtr make_tensor_ptr( * @param dynamism Specifies the mutability of the tensor's shape. * @return A TensorPtr that manages the newly created TensorImpl. */ -template -TensorPtr make_tensor_ptr( +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> +inline TensorPtr make_tensor_ptr( std::vector data, + exec_aten::ScalarType type = deduced_type, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { - return make_tensor_ptr(make_tensor_impl_ptr(std::move(data), dynamism)); + return make_tensor_ptr(make_tensor_impl_ptr(std::move(data), type, dynamism)); +} + +/** + * Creates a TensorPtr that manages a Tensor with the specified properties. + * + * This template overload is specialized for cases where the tensor data is + * provided as an initializer list. The scalar type is automatically deduced + * from the initializer list's data type. + * + * @tparam T The C++ type of the tensor elements, deduced from the initializer + * list. + * @param sizes A vector specifying the size of each dimension. + * @param list An initializer list containing the tensor's data. + * @param dim_order A vector specifying the order of dimensions. + * @param strides A vector specifying the strides of each dimension. + * @param type The scalar type of the tensor elements. + * @param dynamism Specifies the mutability of the tensor's shape. + * @return A TensorPtr that manages the newly created TensorImpl. + */ +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> +inline TensorPtr make_tensor_ptr( + std::vector sizes, + std::initializer_list list, + std::vector dim_order = {}, + std::vector strides = {}, + exec_aten::ScalarType type = deduced_type, + exec_aten::TensorShapeDynamism dynamism = + exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { + return make_tensor_ptr(make_tensor_impl_ptr( + std::move(sizes), + std::move(list), + std::move(dim_order), + std::move(strides), + type, + dynamism)); } /** @@ -226,16 +292,19 @@ TensorPtr make_tensor_ptr( * * @tparam T The C++ type of the tensor elements, deduced from the initializer * list. - * @param data An initializer list containing the tensor's data. + * @param list An initializer list containing the tensor's data. * @param dynamism Specifies the mutability of the tensor's shape. * @return A TensorPtr that manages the newly created TensorImpl. */ -template -TensorPtr make_tensor_ptr( - std::initializer_list data, +template < + typename T = float, + exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType::value> +inline TensorPtr make_tensor_ptr( + std::initializer_list list, + exec_aten::ScalarType type = deduced_type, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { - return make_tensor_ptr(make_tensor_impl_ptr(std::vector(data), dynamism)); + return make_tensor_ptr(make_tensor_impl_ptr(std::move(list), type, dynamism)); } /** @@ -246,8 +315,8 @@ TensorPtr make_tensor_ptr( * @return A TensorPtr that manages the newly created TensorImpl. */ template -TensorPtr make_tensor_ptr(T value) { - return make_tensor_ptr(make_tensor_impl_ptr({}, std::vector{value})); +inline TensorPtr make_tensor_ptr(T value) { + return make_tensor_ptr(make_tensor_impl_ptr(value)); } /** @@ -257,31 +326,54 @@ TensorPtr make_tensor_ptr(T value) { * and a scalar type to interpret the data. The vector is managed, and the * memory's lifetime is tied to the TensorImpl. * - * @param scalar_type The scalar type of the tensor elements. * @param sizes A vector specifying the size of each dimension. * @param data A vector containing the raw memory for the tensor's data. * @param dim_order A vector specifying the order of dimensions. * @param strides A vector specifying the strides of each dimension. + * @param type The scalar type of the tensor elements. * @param dynamism Specifies the mutability of the tensor's shape. * @return A TensorPtr managing the newly created Tensor. */ inline TensorPtr make_tensor_ptr( - exec_aten::ScalarType scalar_type, std::vector sizes, std::vector data, - std::vector dim_order = {}, - std::vector strides = {}, + std::vector dim_order, + std::vector strides, + exec_aten::ScalarType type = exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism dynamism = exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { return make_tensor_ptr(make_tensor_impl_ptr( - scalar_type, std::move(sizes), std::move(data), std::move(dim_order), std::move(strides), + type, dynamism)); } +/** + * Creates a TensorPtr that manages a Tensor with the specified properties. + * + * This overload accepts a raw memory buffer stored in a std::vector + * and a scalar type to interpret the data. The vector is managed, and the + * memory's lifetime is tied to the TensorImpl. + * + * @param sizes A vector specifying the size of each dimension. + * @param data A vector containing the raw memory for the tensor's data. + * @param type The scalar type of the tensor elements. + * @param dynamism Specifies the mutability of the tensor's shape. + * @return A TensorPtr managing the newly created Tensor. + */ +inline TensorPtr make_tensor_ptr( + std::vector sizes, + std::vector data, + exec_aten::ScalarType type = exec_aten::ScalarType::Float, + exec_aten::TensorShapeDynamism dynamism = + exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) { + return make_tensor_ptr( + make_tensor_impl_ptr(std::move(sizes), std::move(data), type, dynamism)); +} + /** * Creates a TensorPtr that manages a new Tensor with the same properties * as the given Tensor, but with a copy of the data owned by the returned @@ -293,7 +385,6 @@ inline TensorPtr make_tensor_ptr( */ inline TensorPtr clone_tensor_ptr(const exec_aten::Tensor& tensor) { return make_tensor_ptr(make_tensor_impl_ptr( - tensor.scalar_type(), std::vector( tensor.sizes().begin(), tensor.sizes().end()), std::vector( @@ -304,11 +395,13 @@ inline TensorPtr clone_tensor_ptr(const exec_aten::Tensor& tensor) { tensor.dim_order().begin(), tensor.dim_order().end()), std::vector( tensor.strides().begin(), tensor.strides().end()), + tensor.scalar_type(), tensor.shape_dynamism() #else // USE_ATEN_LIB {}, std::vector( - tensor.strides().begin(), tensor.strides().end()) + tensor.strides().begin(), tensor.strides().end()), + tensor.scalar_type() #endif // USE_ATEN_LIB )); } @@ -326,11 +419,11 @@ inline TensorPtr clone_tensor_ptr(const TensorPtr& tensor) { } /** - * Resizes the Tensor managed by the given TensorPtr to the new sizes provided. + * Resizes the Tensor managed by the provided TensorPtr to the new sizes. * * @param tensor A TensorPtr managing the Tensor to resize. * @param sizes A vector representing the new sizes for each dimension. - * @return Error::Ok on success, or an appropriate error code otherwise. + * @return Error::Ok on success, or an appropriate error code on failure. */ ET_NODISCARD runtime::Error resize_tensor_ptr( diff --git a/extension/tensor/tensor_ptr_maker.cpp b/extension/tensor/tensor_ptr_maker.cpp index 1a09fea4ca..cbea6da1e7 100644 --- a/extension/tensor/tensor_ptr_maker.cpp +++ b/extension/tensor/tensor_ptr_maker.cpp @@ -105,11 +105,11 @@ TensorPtr empty_strided( exec_aten::compute_numel(sizes.data(), sizes.size()) * exec_aten::elementSize(type)); return make_tensor_ptr( - type, std::move(sizes), std::move(data), {}, std::move(strides), + type, dynamism); } diff --git a/extension/tensor/tensor_ptr_maker.h b/extension/tensor/tensor_ptr_maker.h index 8146135b15..3f2d267a4e 100644 --- a/extension/tensor/tensor_ptr_maker.h +++ b/extension/tensor/tensor_ptr_maker.h @@ -15,13 +15,13 @@ namespace extension { /** * A helper class for creating TensorPtr instances from raw data and tensor - * properties. Note that the TensorPtr created by this class will not own the - * data, so it must outlive the TensorPtr. + * properties. Note that the TensorPtr created by this class does not own the + * data, so the data must outlive the TensorPtr. * - * TensorPtrMaker provides a fluent interface for specifying various properties - * of a tensor, such as its type, sizes, data pointer, dimension order, strides, - * and shape dynamism. The final tensor is created by invoking make_tensor_ptr() - * or converting TensorPtrMaker to TensorPtr. + * TensorPtrMaker provides a fluent interface for specifying various tensor + * properties, such as type, sizes, data pointer, dimension order, strides, and + * shape dynamism. The final tensor is created by invoking make_tensor_ptr() or + * by converting TensorPtrMaker to TensorPtr. */ class TensorPtrMaker final { public: @@ -99,11 +99,11 @@ class TensorPtrMaker final { */ TensorPtr make_tensor_ptr() && { return ::executorch::extension::make_tensor_ptr( - type_, std::move(sizes_), data_, std::move(dim_order_), std::move(strides_), + type_, dynamism_, std::move(deleter_)); } @@ -167,16 +167,16 @@ inline TensorPtrMaker for_blob( * Creates a TensorPtr from a raw data pointer and tensor sizes, with an * optional dynamism setting. * - * This function is a convenient way to create a tensor from existing data, with - * the option to specify whether the tensor's shape is static, dynamic, or - * bounded. + * This function provides a convenient way to create a tensor from existing + * data, with the option to specify whether the tensor's shape is static or + * dynamic. * - * @param data A pointer to the raw data to be used by the tensor. It must + * @param data A pointer to the raw data used by the tensor. The data must * outlive the TensorPtr created by this function. * @param sizes A vector specifying the size of each dimension. * @param type The scalar type of the tensor elements. * @param dynamism Specifies whether the tensor's shape is static or dynamic. - * @return A TensorPtr instance that manages the newly created Tensor. + * @return A TensorPtr instance managing the newly created Tensor. */ inline TensorPtr from_blob( void* data, @@ -195,15 +195,16 @@ inline TensorPtr from_blob( * * This function allows for the creation of a tensor from existing data, with * the option to specify custom strides for each dimension and whether the - * tensor's shape is static, dynamic, or bounded. + * tensor’s shape is static, dynamic, or bounded. * - * @param data A pointer to the raw data to be used by the tensor. It must + * @param data A pointer to the raw data used by the tensor. The data must * outlive the TensorPtr created by this function. * @param sizes A vector specifying the size of each dimension. * @param strides A vector specifying the stride for each dimension. * @param type The scalar type of the tensor elements. - * @param dynamism Specifies whether the tensor's shape is static or dynamic. - * @return A TensorPtr instance that manages the newly created Tensor. + * @param dynamism Specifies whether the tensor's shape is static, dynamic, or + * bounded. + * @return A TensorPtr instance managing the newly created Tensor. */ inline TensorPtr from_blob( void* data, @@ -306,9 +307,10 @@ TensorPtr empty_strided( * This function allocates memory for the tensor elements but does not * initialize them with any specific values. * - * @param other A reference to another tensor, whose size and properties will be + * @param other A reference to another tensor, whose size and properties are * used. - * @param type The scalar type of the tensor elements. + * @param type The scalar type of the tensor elements. If not provided, the + * scalar type of the other tensor is used. * @param dynamism Specifies whether the tensor's shape is static or dynamic. * @return A TensorPtr instance managing the newly created Tensor. */ @@ -397,7 +399,7 @@ inline TensorPtr full_like( * Creates a TensorPtr filled with the specified value. * * @param sizes A vector specifying the size of each dimension. - * @param fill_value The value to fill the tensor with. + * @param fill_value The value used to fill the tensor. * @param type The scalar type of the tensor elements. * @param dynamism Specifies whether the tensor's shape is static or dynamic. * @return A TensorPtr instance managing the newly created Tensor. @@ -412,11 +414,10 @@ inline TensorPtr full( } /** - * Creates a TensorPtr that holds a scalar value. + * Creates a TensorPtr holding a scalar value. * - * @param value The scalar value to create the tensor with. + * @param value The scalar value for the tensor. * @param type The scalar type of the tensor elements. - * @param dynamism Specifies whether the tensor's shape is static or dynamic. * @return A TensorPtr instance managing the newly created scalar Tensor. */ inline TensorPtr scalar_tensor( @@ -429,10 +430,10 @@ inline TensorPtr scalar_tensor( * Creates a TensorPtr filled with ones, with the same size and properties as * another tensor. * - * @param other A reference to another tensor, whose size and properties will be + * @param other A reference to another tensor, whose size and properties are * used. - * @param type The scalar type of the tensor elements. If not specified, the - * scalar type of the `other` tensor is used. + * @param type The scalar type of the tensor elements. If not provided, the + * scalar type of the other tensor is used. * @param dynamism Specifies whether the tensor's shape is static or dynamic. * @return A TensorPtr instance managing the newly created Tensor. */ @@ -553,7 +554,8 @@ inline TensorPtr rand( } /** - * Creates a TensorPtr filled with random values from a normal distribution. + * Creates a TensorPtr filled with random values between 0 and 1, with specified + * strides. * * @param sizes A vector specifying the size of each dimension. * @param strides A vector specifying the stride for each dimension. @@ -594,7 +596,8 @@ inline TensorPtr randn_like( } /** - * Creates a TensorPtr filled with random values from a normal distribution. + * Creates a TensorPtr filled with random values sampled from a normal + * distribution. * * @param sizes A vector specifying the size of each dimension. * @param type The scalar type of the tensor elements. @@ -661,10 +664,11 @@ inline TensorPtr randint_like( } /** - * Creates a TensorPtr filled with random integer values in the given range. + * Creates a TensorPtr filled with random integer values within the specified + * range. * - * @param low The lower bound (inclusive) of the random values. - * @param high The upper bound (exclusive) of the random values. + * @param low The inclusive lower bound of the random values. + * @param high The exclusive upper bound of the random values. * @param sizes A vector specifying the size of each dimension. * @param type The scalar type of the tensor elements. * @param dynamism Specifies whether the tensor's shape is static or dynamic. diff --git a/extension/tensor/test/tensor_impl_ptr_test.cpp b/extension/tensor/test/tensor_impl_ptr_test.cpp index 642e873870..b345258c2c 100644 --- a/extension/tensor/test/tensor_impl_ptr_test.cpp +++ b/extension/tensor/test/tensor_impl_ptr_test.cpp @@ -25,8 +25,7 @@ class TensorImplPtrTest : public ::testing::Test { TEST_F(TensorImplPtrTest, ScalarTensorCreation) { float scalar_data = 3.14f; - auto tensor_impl = - make_tensor_impl_ptr(exec_aten::ScalarType::Float, {}, &scalar_data); + auto tensor_impl = make_tensor_impl_ptr({}, &scalar_data); EXPECT_EQ(tensor_impl->numel(), 1); EXPECT_EQ(tensor_impl->dim(), 0); @@ -48,8 +47,7 @@ TEST_F(TensorImplPtrTest, ScalarTensorOwningData) { TEST_F(TensorImplPtrTest, TensorImplCreation) { float data[20] = {2}; - auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {4, 5}, data, {0, 1}, {5, 1}); + auto tensor_impl = make_tensor_impl_ptr({4, 5}, data, {0, 1}, {5, 1}); EXPECT_EQ(tensor_impl->dim(), 2); EXPECT_EQ(tensor_impl->size(0), 4); @@ -63,8 +61,7 @@ TEST_F(TensorImplPtrTest, TensorImplCreation) { TEST_F(TensorImplPtrTest, TensorImplSharedOwnership) { float data[20] = {2}; - auto tensor_impl1 = - make_tensor_impl_ptr(exec_aten::ScalarType::Float, {4, 5}, data); + auto tensor_impl1 = make_tensor_impl_ptr({4, 5}, data); auto tensor_impl2 = tensor_impl1; EXPECT_EQ(tensor_impl1.get(), tensor_impl2.get()); @@ -77,8 +74,7 @@ TEST_F(TensorImplPtrTest, TensorImplSharedOwnership) { TEST_F(TensorImplPtrTest, TensorImplInferredDimOrderAndStrides) { float data[12] = {0}; - auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {3, 4}, data, {}, {4, 1}); + auto tensor_impl = make_tensor_impl_ptr({3, 4}, data, {}, {4, 1}); EXPECT_EQ(tensor_impl->dim(), 2); EXPECT_EQ(tensor_impl->size(0), 3); @@ -90,8 +86,7 @@ TEST_F(TensorImplPtrTest, TensorImplInferredDimOrderAndStrides) { TEST_F(TensorImplPtrTest, TensorImplInferredDimOrderCustomStrides) { float data[12] = {0}; - auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {3, 4}, data, {}, {1, 3}); + auto tensor_impl = make_tensor_impl_ptr({3, 4}, data, {}, {1, 3}); EXPECT_EQ(tensor_impl->dim(), 2); EXPECT_EQ(tensor_impl->size(0), 3); @@ -102,8 +97,7 @@ TEST_F(TensorImplPtrTest, TensorImplInferredDimOrderCustomStrides) { TEST_F(TensorImplPtrTest, TensorImplDefaultDimOrderAndStrides) { float data[24] = {0}; - auto tensor_impl = - make_tensor_impl_ptr(exec_aten::ScalarType::Float, {2, 3, 4}, data); + auto tensor_impl = make_tensor_impl_ptr({2, 3, 4}, data); EXPECT_EQ(tensor_impl->dim(), 3); EXPECT_EQ(tensor_impl->size(0), 2); @@ -117,17 +111,12 @@ TEST_F(TensorImplPtrTest, TensorImplDefaultDimOrderAndStrides) { TEST_F(TensorImplPtrTest, TensorImplMismatchStridesAndDimOrder) { float data[12] = {0}; ET_EXPECT_DEATH( - { - auto _ = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {3, 4}, data, {1, 0}, {1, 4}); - }, - ""); + { auto _ = make_tensor_impl_ptr({3, 4}, data, {1, 0}, {1, 4}); }, ""); } TEST_F(TensorImplPtrTest, TensorImplCustomDimOrderAndStrides) { float data[12] = {0}; - auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {3, 4}, data, {1, 0}, {1, 3}); + auto tensor_impl = make_tensor_impl_ptr({3, 4}, data, {1, 0}, {1, 3}); EXPECT_EQ(tensor_impl->dim(), 2); EXPECT_EQ(tensor_impl->size(0), 3); @@ -140,16 +129,14 @@ TEST_F(TensorImplPtrTest, TensorImplInvalidDimOrder) { ET_EXPECT_DEATH( { float data[20] = {2}; - auto _ = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {4, 5}, data, {2, 1}); + auto _ = make_tensor_impl_ptr({4, 5}, data, {2, 1}, {1, 4}); }, ""); } TEST_F(TensorImplPtrTest, TensorImplCustomDeleter) { float data[20] = {4}; - auto tensor_impl = - make_tensor_impl_ptr(exec_aten::ScalarType::Float, {4, 5}, data); + auto tensor_impl = make_tensor_impl_ptr({4, 5}, data); TensorImplPtr copied_tensor_impl = tensor_impl; EXPECT_EQ(tensor_impl.use_count(), copied_tensor_impl.use_count()); @@ -163,11 +150,11 @@ TEST_F(TensorImplPtrTest, TensorImplDataDeleterReleasesCapturedSharedPtr) { std::shared_ptr data_ptr( new float[10], [](float* ptr) { delete[] ptr; }); auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {4, 5}, data_ptr.get(), {}, {}, + exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, [data_ptr, &deleter_called](void*) mutable { deleter_called = true; }); @@ -275,8 +262,7 @@ TEST_F(TensorImplPtrTest, TensorImplAmbiguityWithMixedVectors) { TEST_F(TensorImplPtrTest, SharedDataManagement) { auto data = std::make_shared>(100, 1.0f); - auto tensor_impl1 = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {10, 10}, data->data()); + auto tensor_impl1 = make_tensor_impl_ptr({10, 10}, data->data()); auto tensor_impl2 = tensor_impl1; EXPECT_EQ(tensor_impl1.get(), tensor_impl2.get()); @@ -298,11 +284,11 @@ TEST_F(TensorImplPtrTest, CustomDeleterWithSharedData) { bool deleter_called = false; { auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {10, 10}, data->data(), {}, {}, + exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, [data, &deleter_called](void*) mutable { deleter_called = true; @@ -339,8 +325,7 @@ TEST_F(TensorImplPtrTest, TensorImplUint8BufferWithFloatScalarType) { float_data[2] = 3.0f; float_data[3] = 4.0f; - auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {2, 2}, std::move(data)); + auto tensor_impl = make_tensor_impl_ptr({2, 2}, std::move(data)); EXPECT_EQ(tensor_impl->dim(), 2); EXPECT_EQ(tensor_impl->size(0), 2); @@ -358,18 +343,14 @@ TEST_F(TensorImplPtrTest, TensorImplUint8BufferTooSmallExpectDeath) { std::vector data( 2 * exec_aten::elementSize(exec_aten::ScalarType::Float)); ET_EXPECT_DEATH( - { - auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {2, 2}, std::move(data)); - }, + { auto tensor_impl = make_tensor_impl_ptr({2, 2}, std::move(data)); }, ""); } TEST_F(TensorImplPtrTest, TensorImplUint8BufferTooLarge) { std::vector data( 4 * exec_aten::elementSize(exec_aten::ScalarType::Float)); - auto tensor_impl = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {2, 2}, std::move(data)); + auto tensor_impl = make_tensor_impl_ptr({2, 2}, std::move(data)); EXPECT_EQ(tensor_impl->dim(), 2); EXPECT_EQ(tensor_impl->size(0), 2); @@ -381,15 +362,7 @@ TEST_F(TensorImplPtrTest, TensorImplUint8BufferTooLarge) { TEST_F(TensorImplPtrTest, StridesAndDimOrderMustMatchSizes) { float data[12] = {0}; ET_EXPECT_DEATH( - { - auto _ = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {3, 4}, data, {}, {1}); - }, - ""); + { auto _ = make_tensor_impl_ptr({3, 4}, data, {}, {1}); }, ""); ET_EXPECT_DEATH( - { - auto _ = make_tensor_impl_ptr( - exec_aten::ScalarType::Float, {3, 4}, data, {0}, {4, 1}); - }, - ""); + { auto _ = make_tensor_impl_ptr({3, 4}, data, {0}, {4, 1}); }, ""); } diff --git a/extension/tensor/test/tensor_ptr_test.cpp b/extension/tensor/test/tensor_ptr_test.cpp index 7fabf9ab8f..00614f24eb 100644 --- a/extension/tensor/test/tensor_ptr_test.cpp +++ b/extension/tensor/test/tensor_ptr_test.cpp @@ -24,7 +24,7 @@ class TensorPtrTest : public ::testing::Test { TEST_F(TensorPtrTest, ScalarTensorCreation) { float scalar_data = 3.14f; - auto tensor = make_tensor_ptr(exec_aten::ScalarType::Float, {}, &scalar_data); + auto tensor = make_tensor_ptr({}, &scalar_data); EXPECT_EQ(tensor->numel(), 1); EXPECT_EQ(tensor->dim(), 0); @@ -80,8 +80,7 @@ TEST_F(TensorPtrTest, ScalarTensorSingleValueCreation) { TEST_F(TensorPtrTest, CreateTensorWithStridesAndDimOrder) { float data[20] = {2}; - auto tensor = make_tensor_ptr( - exec_aten::ScalarType::Float, {4, 5}, data, {0, 1}, {5, 1}); + auto tensor = make_tensor_ptr({4, 5}, data, {0, 1}, {5, 1}); EXPECT_EQ(tensor->dim(), 2); EXPECT_EQ(tensor->size(0), 4); EXPECT_EQ(tensor->size(1), 5); @@ -93,7 +92,7 @@ TEST_F(TensorPtrTest, CreateTensorWithStridesAndDimOrder) { TEST_F(TensorPtrTest, TensorSharingImpl) { float data[20] = {2}; - auto tensor1 = make_tensor_ptr(exec_aten::ScalarType::Float, {4, 5}, data); + auto tensor1 = make_tensor_ptr({4, 5}, data); auto tensor2 = make_tensor_ptr(tensor1); EXPECT_EQ(tensor1->unsafeGetTensorImpl(), tensor2->unsafeGetTensorImpl()); } @@ -103,8 +102,7 @@ TEST_F(TensorPtrTest, TensorImplLifetime) { EXPECT_EQ(tensor, nullptr); { float data[20] = {2}; - auto tensor_impl = - make_tensor_impl_ptr(exec_aten::ScalarType::Float, {4, 5}, data); + auto tensor_impl = make_tensor_impl_ptr({4, 5}, data); tensor = make_tensor_ptr(tensor_impl); } EXPECT_EQ(tensor->dim(), 2); @@ -114,10 +112,10 @@ TEST_F(TensorPtrTest, TensorImplLifetime) { TEST_F(TensorPtrTest, TensorWithZeroDimensionAndElements) { float data[20] = {2}; - auto tensor = make_tensor_ptr(exec_aten::ScalarType::Float, {}, data); + auto tensor = make_tensor_ptr({}, data); EXPECT_EQ(tensor->dim(), 0); EXPECT_EQ(tensor->numel(), 1); - tensor = make_tensor_ptr(exec_aten::ScalarType::Float, {0, 5}, data); + tensor = make_tensor_ptr({0, 5}, data); EXPECT_EQ(tensor->dim(), 2); EXPECT_EQ(tensor->numel(), 0); } @@ -125,11 +123,11 @@ TEST_F(TensorPtrTest, TensorWithZeroDimensionAndElements) { TEST_F(TensorPtrTest, TensorResize) { float data[20] = {2}; auto tensor = make_tensor_ptr( - exec_aten::ScalarType::Float, {4, 5}, data, {}, {}, + exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism::DYNAMIC_UNBOUND); EXPECT_EQ(resize_tensor_ptr(tensor, {5, 4}), Error::Ok); EXPECT_EQ(tensor->size(0), 5); @@ -138,7 +136,7 @@ TEST_F(TensorPtrTest, TensorResize) { TEST_F(TensorPtrTest, TensorDataAccess) { float data[6] = {1, 2, 3, 4, 5, 6}; - auto tensor = make_tensor_ptr(exec_aten::ScalarType::Float, {2, 3}, data); + auto tensor = make_tensor_ptr({2, 3}, data); EXPECT_EQ(tensor->const_data_ptr()[0], 1); EXPECT_EQ(tensor->const_data_ptr()[5], 6); tensor->mutable_data_ptr()[0] = 10; @@ -149,11 +147,11 @@ TEST_F(TensorPtrTest, TensorWithCustomDataDeleter) { auto deleter_called = false; float* data = new float[20](); auto tensor = make_tensor_ptr( - exec_aten::ScalarType::Float, {4, 5}, data, {}, {}, + exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, [&deleter_called](void* ptr) { deleter_called = true; @@ -169,11 +167,11 @@ TEST_F(TensorPtrTest, TensorManagesMovedVector) { std::vector data(20, 3.0f); auto* data_ptr = data.data(); auto tensor = make_tensor_ptr( - exec_aten::ScalarType::Float, {4, 5}, data_ptr, {}, {}, + exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, [moved_data = std::move(data), &deleter_called](void*) mutable { deleter_called = true; @@ -191,11 +189,11 @@ TEST_F(TensorPtrTest, TensorDeleterReleasesCapturedSharedPtr) { std::shared_ptr data_ptr( new float[10], [](float* ptr) { delete[] ptr; }); auto tensor = make_tensor_ptr( - exec_aten::ScalarType::Float, {4, 5}, data_ptr.get(), {}, {}, + exec_aten::ScalarType::Float, exec_aten::TensorShapeDynamism::DYNAMIC_BOUND, [data_ptr, &deleter_called](void*) mutable { deleter_called = true; });