diff --git a/runtime/core/evalue.h b/runtime/core/evalue.h index d2e57c35d4..8003a89cbf 100644 --- a/runtime/core/evalue.h +++ b/runtime/core/evalue.h @@ -26,8 +26,8 @@ struct evalue_to_const_ref_overload_return { }; template <> -struct evalue_to_const_ref_overload_return { - using type = const exec_aten::Tensor&; +struct evalue_to_const_ref_overload_return { + using type = const executorch::aten::Tensor&; }; template @@ -36,8 +36,8 @@ struct evalue_to_ref_overload_return { }; template <> -struct evalue_to_ref_overload_return { - using type = exec_aten::Tensor&; +struct evalue_to_ref_overload_return { + using type = executorch::aten::Tensor&; }; } // namespace internal @@ -67,18 +67,19 @@ class BoxedEvalueList { /* * Constructs and returns the list of T specified by the EValue pointers */ - exec_aten::ArrayRef get() const; + executorch::aten::ArrayRef get() const; private: // Source of truth for the list - exec_aten::ArrayRef wrapped_vals_; + executorch::aten::ArrayRef wrapped_vals_; // Same size as wrapped_vals mutable T* unwrapped_vals_; }; template <> -exec_aten::ArrayRef> -BoxedEvalueList>::get() const; +executorch::aten::ArrayRef> +BoxedEvalueList>::get() + const; // Aggregate typing system similar to IValue only slimmed down with less // functionality, no dependencies on atomic, and fewer supported types to better @@ -96,18 +97,18 @@ struct EValue { bool as_bool; // TODO(jakeszwe): convert back to pointers to optimize size of this // struct - exec_aten::ArrayRef as_string; - exec_aten::ArrayRef as_double_list; - exec_aten::ArrayRef as_bool_list; + executorch::aten::ArrayRef as_string; + executorch::aten::ArrayRef as_double_list; + executorch::aten::ArrayRef as_bool_list; BoxedEvalueList as_int_list; - BoxedEvalueList as_tensor_list; - BoxedEvalueList> + BoxedEvalueList as_tensor_list; + BoxedEvalueList> as_list_optional_tensor; } copyable_union; // Since a Tensor just holds a TensorImpl*, there's no value to use Tensor* // here. - exec_aten::Tensor as_tensor; + executorch::aten::Tensor as_tensor; Payload() {} ~Payload() {} @@ -197,7 +198,7 @@ struct EValue { /****** Scalar Type ******/ /// Construct an EValue using the implicit value of a Scalar. - /*implicit*/ EValue(exec_aten::Scalar s) { + /*implicit*/ EValue(executorch::aten::Scalar s) { if (s.isIntegral(false)) { tag = Tag::Int; payload.copyable_union.as_int = s.to(); @@ -216,7 +217,7 @@ struct EValue { return tag == Tag::Int || tag == Tag::Double || tag == Tag::Bool; } - exec_aten::Scalar toScalar() const { + executorch::aten::Scalar toScalar() const { // Convert from implicit value to Scalar using implicit constructors. if (isDouble()) { @@ -231,11 +232,11 @@ struct EValue { } /****** Tensor Type ******/ - /*implicit*/ EValue(exec_aten::Tensor t) : tag(Tag::Tensor) { + /*implicit*/ EValue(executorch::aten::Tensor t) : tag(Tag::Tensor) { // When built in aten mode, at::Tensor has a non trivial constructor // destructor, so regular assignment to a union field is UB. Instead we must // go through placement new (which causes a refcount bump). - new (&payload.as_tensor) exec_aten::Tensor(t); + new (&payload.as_tensor) executorch::aten::Tensor(t); } // Template constructor that allows construction from types that can be @@ -261,35 +262,36 @@ struct EValue { return tag == Tag::Tensor; } - exec_aten::Tensor toTensor() && { + executorch::aten::Tensor toTensor() && { ET_CHECK_MSG(isTensor(), "EValue is not a Tensor."); auto res = std::move(payload.as_tensor); clearToNone(); return res; } - exec_aten::Tensor& toTensor() & { + executorch::aten::Tensor& toTensor() & { ET_CHECK_MSG(isTensor(), "EValue is not a Tensor."); return payload.as_tensor; } - const exec_aten::Tensor& toTensor() const& { + const executorch::aten::Tensor& toTensor() const& { ET_CHECK_MSG(isTensor(), "EValue is not a Tensor."); return payload.as_tensor; } /****** String Type ******/ /*implicit*/ EValue(const char* s, size_t size) : tag(Tag::String) { - payload.copyable_union.as_string = exec_aten::ArrayRef(s, size); + payload.copyable_union.as_string = + executorch::aten::ArrayRef(s, size); } bool isString() const { return tag == Tag::String; } - exec_aten::string_view toString() const { + executorch::aten::string_view toString() const { ET_CHECK_MSG(isString(), "EValue is not a String."); - return exec_aten::string_view( + return executorch::aten::string_view( payload.copyable_union.as_string.data(), payload.copyable_union.as_string.size()); } @@ -303,13 +305,13 @@ struct EValue { return tag == Tag::ListInt; } - exec_aten::ArrayRef toIntList() const { + executorch::aten::ArrayRef toIntList() const { ET_CHECK_MSG(isIntList(), "EValue is not an Int List."); return payload.copyable_union.as_int_list.get(); } /****** Bool List Type ******/ - /*implicit*/ EValue(exec_aten::ArrayRef b) : tag(Tag::ListBool) { + /*implicit*/ EValue(executorch::aten::ArrayRef b) : tag(Tag::ListBool) { payload.copyable_union.as_bool_list = b; } @@ -317,13 +319,14 @@ struct EValue { return tag == Tag::ListBool; } - exec_aten::ArrayRef toBoolList() const { + executorch::aten::ArrayRef toBoolList() const { ET_CHECK_MSG(isBoolList(), "EValue is not a Bool List."); return payload.copyable_union.as_bool_list; } /****** Double List Type ******/ - /*implicit*/ EValue(exec_aten::ArrayRef d) : tag(Tag::ListDouble) { + /*implicit*/ EValue(executorch::aten::ArrayRef d) + : tag(Tag::ListDouble) { payload.copyable_union.as_double_list = d; } @@ -331,13 +334,13 @@ struct EValue { return tag == Tag::ListDouble; } - exec_aten::ArrayRef toDoubleList() const { + executorch::aten::ArrayRef toDoubleList() const { ET_CHECK_MSG(isDoubleList(), "EValue is not a Double List."); return payload.copyable_union.as_double_list; } /****** Tensor List Type ******/ - /*implicit*/ EValue(BoxedEvalueList t) + /*implicit*/ EValue(BoxedEvalueList t) : tag(Tag::ListTensor) { payload.copyable_union.as_tensor_list = t; } @@ -346,13 +349,14 @@ struct EValue { return tag == Tag::ListTensor; } - exec_aten::ArrayRef toTensorList() const { + executorch::aten::ArrayRef toTensorList() const { ET_CHECK_MSG(isTensorList(), "EValue is not a Tensor List."); return payload.copyable_union.as_tensor_list.get(); } /****** List Optional Tensor Type ******/ - /*implicit*/ EValue(BoxedEvalueList> t) + /*implicit*/ EValue( + BoxedEvalueList> t) : tag(Tag::ListOptionalTensor) { payload.copyable_union.as_list_optional_tensor = t; } @@ -361,34 +365,39 @@ struct EValue { return tag == Tag::ListOptionalTensor; } - exec_aten::ArrayRef> + executorch::aten::ArrayRef< + executorch::aten::optional> toListOptionalTensor() const { return payload.copyable_union.as_list_optional_tensor.get(); } /****** ScalarType Type ******/ - exec_aten::ScalarType toScalarType() const { + executorch::aten::ScalarType toScalarType() const { ET_CHECK_MSG(isInt(), "EValue is not a ScalarType."); - return static_cast(payload.copyable_union.as_int); + return static_cast( + payload.copyable_union.as_int); } /****** MemoryFormat Type ******/ - exec_aten::MemoryFormat toMemoryFormat() const { + executorch::aten::MemoryFormat toMemoryFormat() const { ET_CHECK_MSG(isInt(), "EValue is not a MemoryFormat."); - return static_cast(payload.copyable_union.as_int); + return static_cast( + payload.copyable_union.as_int); } /****** Layout Type ******/ - exec_aten::Layout toLayout() const { + executorch::aten::Layout toLayout() const { ET_CHECK_MSG(isInt(), "EValue is not a Layout."); - return static_cast(payload.copyable_union.as_int); + return static_cast(payload.copyable_union.as_int); } /****** Device Type ******/ - exec_aten::Device toDevice() const { + executorch::aten::Device toDevice() const { ET_CHECK_MSG(isInt(), "EValue is not a Device."); - return exec_aten::Device( - static_cast(payload.copyable_union.as_int), -1); + return executorch::aten::Device( + static_cast( + payload.copyable_union.as_int), + -1); } template @@ -403,9 +412,9 @@ struct EValue { * an uninitialized state. */ template - inline exec_aten::optional toOptional() const { + inline executorch::aten::optional toOptional() const { if (this->isNone()) { - return exec_aten::nullopt; + return executorch::aten::nullopt; } return this->to(); } @@ -421,7 +430,7 @@ struct EValue { void moveFrom(EValue&& rhs) noexcept { if (rhs.isTensor()) { new (&payload.as_tensor) - exec_aten::Tensor(std::move(rhs.payload.as_tensor)); + executorch::aten::Tensor(std::move(rhs.payload.as_tensor)); rhs.payload.as_tensor.~Tensor(); } else { payload.copyable_union = rhs.payload.copyable_union; @@ -451,7 +460,7 @@ struct EValue { EValue(const Payload& p, Tag t) : tag(t) { if (isTensor()) { - new (&payload.as_tensor) exec_aten::Tensor(p.as_tensor); + new (&payload.as_tensor) executorch::aten::Tensor(p.as_tensor); } else { payload.copyable_union = p.copyable_union; } @@ -480,60 +489,64 @@ struct EValue { return static_cast(this->method_name()); \ } -EVALUE_DEFINE_TO(exec_aten::Scalar, toScalar) +EVALUE_DEFINE_TO(executorch::aten::Scalar, toScalar) EVALUE_DEFINE_TO(int64_t, toInt) EVALUE_DEFINE_TO(bool, toBool) EVALUE_DEFINE_TO(double, toDouble) -EVALUE_DEFINE_TO(exec_aten::string_view, toString) -EVALUE_DEFINE_TO(exec_aten::ScalarType, toScalarType) -EVALUE_DEFINE_TO(exec_aten::MemoryFormat, toMemoryFormat) -EVALUE_DEFINE_TO(exec_aten::Layout, toLayout) -EVALUE_DEFINE_TO(exec_aten::Device, toDevice) +EVALUE_DEFINE_TO(executorch::aten::string_view, toString) +EVALUE_DEFINE_TO(executorch::aten::ScalarType, toScalarType) +EVALUE_DEFINE_TO(executorch::aten::MemoryFormat, toMemoryFormat) +EVALUE_DEFINE_TO(executorch::aten::Layout, toLayout) +EVALUE_DEFINE_TO(executorch::aten::Device, toDevice) // Tensor and Optional Tensor EVALUE_DEFINE_TO( - exec_aten::optional, - toOptional) -EVALUE_DEFINE_TO(exec_aten::Tensor, toTensor) + executorch::aten::optional, + toOptional) +EVALUE_DEFINE_TO(executorch::aten::Tensor, toTensor) // IntList and Optional IntList -EVALUE_DEFINE_TO(exec_aten::ArrayRef, toIntList) +EVALUE_DEFINE_TO(executorch::aten::ArrayRef, toIntList) EVALUE_DEFINE_TO( - exec_aten::optional>, - toOptional>) + executorch::aten::optional>, + toOptional>) // DoubleList and Optional DoubleList -EVALUE_DEFINE_TO(exec_aten::ArrayRef, toDoubleList) +EVALUE_DEFINE_TO(executorch::aten::ArrayRef, toDoubleList) EVALUE_DEFINE_TO( - exec_aten::optional>, - toOptional>) + executorch::aten::optional>, + toOptional>) // BoolList and Optional BoolList -EVALUE_DEFINE_TO(exec_aten::ArrayRef, toBoolList) +EVALUE_DEFINE_TO(executorch::aten::ArrayRef, toBoolList) EVALUE_DEFINE_TO( - exec_aten::optional>, - toOptional>) + executorch::aten::optional>, + toOptional>) // TensorList and Optional TensorList -EVALUE_DEFINE_TO(exec_aten::ArrayRef, toTensorList) EVALUE_DEFINE_TO( - exec_aten::optional>, - toOptional>) + executorch::aten::ArrayRef, + toTensorList) +EVALUE_DEFINE_TO( + executorch::aten::optional< + executorch::aten::ArrayRef>, + toOptional>) // List of Optional Tensor EVALUE_DEFINE_TO( - exec_aten::ArrayRef>, + executorch::aten::ArrayRef< + executorch::aten::optional>, toListOptionalTensor) #undef EVALUE_DEFINE_TO template -exec_aten::ArrayRef BoxedEvalueList::get() const { - for (typename exec_aten::ArrayRef::size_type i = 0; +executorch::aten::ArrayRef BoxedEvalueList::get() const { + for (typename executorch::aten::ArrayRef::size_type i = 0; i < wrapped_vals_.size(); i++) { ET_CHECK(wrapped_vals_[i] != nullptr); unwrapped_vals_[i] = wrapped_vals_[i]->template to(); } - return exec_aten::ArrayRef{unwrapped_vals_, wrapped_vals_.size()}; + return executorch::aten::ArrayRef{unwrapped_vals_, wrapped_vals_.size()}; } } // namespace runtime diff --git a/runtime/core/event_tracer.h b/runtime/core/event_tracer.h index ff483b1f77..5cf4f74a38 100644 --- a/runtime/core/event_tracer.h +++ b/runtime/core/event_tracer.h @@ -287,7 +287,7 @@ class EventTracer { virtual void log_intermediate_output_delegate( const char* name, DebugHandle delegate_debug_index, - const exec_aten::Tensor& output) = 0; + const executorch::aten::Tensor& output) = 0; /** * Log an intermediate tensor array output from a delegate. @@ -307,7 +307,7 @@ class EventTracer { virtual void log_intermediate_output_delegate( const char* name, DebugHandle delegate_debug_index, - const ArrayRef output) = 0; + const ArrayRef output) = 0; /** * Log an intermediate int output from a delegate. diff --git a/runtime/core/event_tracer_hooks_delegate.h b/runtime/core/event_tracer_hooks_delegate.h index 477187e002..b2369fc216 100644 --- a/runtime/core/event_tracer_hooks_delegate.h +++ b/runtime/core/event_tracer_hooks_delegate.h @@ -176,8 +176,8 @@ inline void event_tracer_log_output_delegate( static_assert( std::is_same::value || std::is_same::value || std::is_same::value || - std::is_same::value || - std::is_same>::value, + std::is_same::value || + std::is_same>::value, "Unsupported type for intermediate output"); event_tracer->log_intermediate_output_delegate( name, delegate_debug_id, output); diff --git a/runtime/core/exec_aten/testing_util/tensor_factory.h b/runtime/core/exec_aten/testing_util/tensor_factory.h index 9037b074a0..2ea21e9583 100644 --- a/runtime/core/exec_aten/testing_util/tensor_factory.h +++ b/runtime/core/exec_aten/testing_util/tensor_factory.h @@ -56,7 +56,7 @@ inline size_t sizes_to_numel(const std::vector& sizes) { inline bool check_strides( const std::vector sizes, - const std::vector strides) { + const std::vector strides) { if (sizes.size() != strides.size()) { // The length of stride vector shall equal to size vector. return false; @@ -149,14 +149,14 @@ inline bool check_dim_order( return true; } -inline std::vector strides_from_dim_order( +inline std::vector strides_from_dim_order( const std::vector& sizes, const std::vector& dim_order) { bool legal = check_dim_order(sizes, dim_order); ET_CHECK_MSG(legal, "The input dim_order variable is illegal."); size_t ndim = sizes.size(); - std::vector strides(ndim); + std::vector strides(ndim); strides[dim_order[ndim - 1]] = 1; for (int i = ndim - 2; i >= 0; --i) { uint8_t cur_dim = dim_order[i]; @@ -191,7 +191,7 @@ inline std::vector channels_last_dim_order(size_t dims) { // Note that this USE_ATEN_LIB section uses ATen-specific namespaces instead of // exec_aten because we know that we're working with ATen, and many of these -// names aren't mapped into exec_aten::. +// names aren't mapped into executorch::aten::. namespace internal { @@ -260,7 +260,7 @@ class TensorFactory { at::Tensor make( const std::vector& sizes, const std::vector& data, - const std::vector strides = {}, + const std::vector strides = {}, ET_UNUSED TensorShapeDynamism dynamism = TensorShapeDynamism::DYNAMIC_UNBOUND) { auto expected_numel = internal::sizes_to_numel(sizes); @@ -371,7 +371,7 @@ class TensorFactory { for (uint8_t i = 0; i < sizes.size(); i++) { contiguous_dim_order[i] = i; } - std::vector contiguous_strides = + std::vector contiguous_strides = internal::strides_from_dim_order(sizes, contiguous_dim_order); for (int32_t i = 0; i < input.dim(); i++) { @@ -527,7 +527,7 @@ class TensorFactory { */ at::Tensor empty_strided( const std::vector& sizes, - const std::vector& strides, + const std::vector& strides, ET_UNUSED TensorShapeDynamism dynamism = TensorShapeDynamism::DYNAMIC_UNBOUND) { auto sizes64 = vec_32_to_64(sizes); @@ -623,7 +623,7 @@ inline void validate_strides( // Note that this !USE_ATEN_LIB section uses ExecuTorch-specific namespaces // instead of exec_aten to make it clear that we're dealing with ETensor, and -// because many of these names aren't mapped into exec_aten::. +// because many of these names aren't mapped into executorch::aten::. namespace internal { @@ -740,7 +740,7 @@ class TensorFactory { torch::executor::Tensor make( const std::vector& sizes, const std::vector& data, - const std::vector strides = {}, + const std::vector strides = {}, TensorShapeDynamism dynamism = TensorShapeDynamism::STATIC) { std::vector default_strides; // Generate strides from the tensor dimensions, assuming contiguous data if @@ -1019,7 +1019,7 @@ class TensorFactory { std::vector sizes_; std::vector data_; std::vector dim_order_; - std::vector strides_; + std::vector strides_; torch::executor::TensorImpl impl_; }; @@ -1040,7 +1040,7 @@ class TensorFactory { * (and Tensors they contain), and must live longer than those TensorLists and * Tensors. */ -template +template class TensorListFactory final { public: TensorListFactory() = default; @@ -1051,15 +1051,18 @@ class TensorListFactory final { * provided Tensors, but filled with zero elements. The dtypes of the template * entries are ignored. */ - exec_aten::TensorList zeros_like( - const std::vector& templates) { - memory_.emplace_back(std::make_unique>()); + executorch::aten::TensorList zeros_like( + const std::vector& templates) { + memory_.emplace_back( + std::make_unique>()); auto& vec = memory_.back(); std::for_each( - templates.begin(), templates.end(), [&](const exec_aten::Tensor& t) { + templates.begin(), + templates.end(), + [&](const executorch::aten::Tensor& t) { vec->push_back(tf_.zeros_like(t)); }); - return exec_aten::TensorList(vec->data(), vec->size()); + return executorch::aten::TensorList(vec->data(), vec->size()); } private: @@ -1069,7 +1072,7 @@ class TensorListFactory final { * vector of pointers so that the elements won't move if the vector needs to * resize/realloc. */ - std::vector>> memory_; + std::vector>> memory_; }; } // namespace testing diff --git a/runtime/core/exec_aten/testing_util/tensor_util.h b/runtime/core/exec_aten/testing_util/tensor_util.h index 3d1aca3478..4284176c2d 100644 --- a/runtime/core/exec_aten/testing_util/tensor_util.h +++ b/runtime/core/exec_aten/testing_util/tensor_util.h @@ -68,8 +68,8 @@ constexpr double kDefaultBFloat16Atol = 1e-2; * outside of the specified tolerance of each other. */ bool tensors_are_close( - const exec_aten::Tensor& a, - const exec_aten::Tensor& b, + const executorch::aten::Tensor& a, + const executorch::aten::Tensor& b, double rtol = internal::kDefaultRtol, std::optional opt_atol = std::nullopt); @@ -99,8 +99,8 @@ bool tensors_are_close( * outside of the specified tolerance of each other. */ bool tensor_data_is_close( - const exec_aten::Tensor& a, - const exec_aten::Tensor& b, + const executorch::aten::Tensor& a, + const executorch::aten::Tensor& b, double rtol = internal::kDefaultRtol, std::optional opt_atol = std::nullopt); @@ -110,9 +110,9 @@ bool tensor_data_is_close( * i. */ bool tensor_lists_are_close( - const exec_aten::Tensor* tensors_a, + const executorch::aten::Tensor* tensors_a, size_t num_tensors_a, - const exec_aten::Tensor* tensors_b, + const executorch::aten::Tensor* tensors_b, size_t num_tensors_b, double rtol = internal::kDefaultRtol, std::optional opt_atol = std::nullopt); diff --git a/runtime/core/exec_aten/util/tensor_util.h b/runtime/core/exec_aten/util/tensor_util.h index 53b65e3f16..eb57f3e099 100644 --- a/runtime/core/exec_aten/util/tensor_util.h +++ b/runtime/core/exec_aten/util/tensor_util.h @@ -122,22 +122,22 @@ }) /// Asserts that all tensors have the same dtype. -#define ET_CHECK_SAME_DTYPE2(a__, b__) \ - ({ \ - const ::exec_aten::ScalarType a_type__ = (a__).scalar_type(); \ - const ::exec_aten::ScalarType b_type__ = (b__).scalar_type(); \ - ET_CHECK_MSG( \ - a_type__ == b_type__, \ - ET_TENSOR_CHECK_PREFIX__ ": dtype={%" PRId8 ", %" PRId8 "}", \ - static_cast(a_type__), \ - static_cast(b_type__)); \ +#define ET_CHECK_SAME_DTYPE2(a__, b__) \ + ({ \ + const ::executorch::aten::ScalarType a_type__ = (a__).scalar_type(); \ + const ::executorch::aten::ScalarType b_type__ = (b__).scalar_type(); \ + ET_CHECK_MSG( \ + a_type__ == b_type__, \ + ET_TENSOR_CHECK_PREFIX__ ": dtype={%" PRId8 ", %" PRId8 "}", \ + static_cast(a_type__), \ + static_cast(b_type__)); \ }) #define ET_CHECK_SAME_DTYPE3(a__, b__, c__) \ ({ \ - const ::exec_aten::ScalarType a_type__ = (a__).scalar_type(); \ - const ::exec_aten::ScalarType b_type__ = (b__).scalar_type(); \ - const ::exec_aten::ScalarType c_type__ = (c__).scalar_type(); \ + const ::executorch::aten::ScalarType a_type__ = (a__).scalar_type(); \ + const ::executorch::aten::ScalarType b_type__ = (b__).scalar_type(); \ + const ::executorch::aten::ScalarType c_type__ = (c__).scalar_type(); \ ET_CHECK_MSG( \ a_type__ == b_type__ && b_type__ == c_type__, \ ET_TENSOR_CHECK_PREFIX__ ": dtype={%" PRId8 ", %" PRId8 ", %" PRId8 \ @@ -159,8 +159,8 @@ const size_t b_numel__ = (b__).numel(); \ const size_t a_dim__ = (a__).dim(); \ const size_t b_dim__ = (b__).dim(); \ - const ::exec_aten::ScalarType a_type__ = (a__).scalar_type(); \ - const ::exec_aten::ScalarType b_type__ = (b__).scalar_type(); \ + const ::executorch::aten::ScalarType a_type__ = (a__).scalar_type(); \ + const ::executorch::aten::ScalarType b_type__ = (b__).scalar_type(); \ \ ET_CHECK_MSG( \ a_numel__ == b_numel__ && \ @@ -186,48 +186,48 @@ } \ }) -#define ET_CHECK_SAME_SHAPE_AND_DTYPE3(a__, b__, c__) \ - ({ \ - const size_t a_numel__ = (a__).numel(); \ - const size_t b_numel__ = (b__).numel(); \ - const size_t c_numel__ = (c__).numel(); \ - const size_t a_dim__ = (a__).dim(); \ - const size_t b_dim__ = (b__).dim(); \ - const size_t c_dim__ = (c__).dim(); \ - const ::exec_aten::ScalarType a_type__ = (a__).scalar_type(); \ - const ::exec_aten::ScalarType b_type__ = (b__).scalar_type(); \ - const ::exec_aten::ScalarType c_type__ = (c__).scalar_type(); \ - \ - ET_CHECK_MSG( \ - a_numel__ == b_numel__ && b_numel__ == c_numel__ && \ - ((a_numel__ == 1 && b_numel__ == 1 && c_numel__ == 1) || \ - (a_dim__ == b_dim__ && b_dim__ == c_dim__)) && \ - a_type__ == b_type__ && b_type__ == c_type__, \ - ET_TENSOR_CHECK_PREFIX__ \ - ": numel={%zu, %zu, %zu}, dim={%zu, %zu, %zu}, " \ - "dtype={%" PRId8 ", %" PRId8 ", %" PRId8 "}", \ - a_numel__, \ - b_numel__, \ - c_numel__, \ - a_dim__, \ - b_dim__, \ - c_dim__, \ - static_cast(a_type__), \ - static_cast(b_type__), \ - static_cast(c_type__)); \ - for (size_t dim__ = 0; dim__ < ET_MIN3(a_dim__, b_dim__, c_dim__); \ - ++dim__) { \ - size_t a_size__ = (a__).size(dim__); \ - size_t b_size__ = (b__).size(dim__); \ - size_t c_size__ = (c__).size(dim__); \ - ET_CHECK_MSG( \ - a_size__ == b_size__ && b_size__ == c_size__, \ - ET_TENSOR_CHECK_PREFIX__ " at size(%zu): {%zu, %zu, %zu}", \ - dim__, \ - a_size__, \ - b_size__, \ - c_size__); \ - } \ +#define ET_CHECK_SAME_SHAPE_AND_DTYPE3(a__, b__, c__) \ + ({ \ + const size_t a_numel__ = (a__).numel(); \ + const size_t b_numel__ = (b__).numel(); \ + const size_t c_numel__ = (c__).numel(); \ + const size_t a_dim__ = (a__).dim(); \ + const size_t b_dim__ = (b__).dim(); \ + const size_t c_dim__ = (c__).dim(); \ + const ::executorch::aten::ScalarType a_type__ = (a__).scalar_type(); \ + const ::executorch::aten::ScalarType b_type__ = (b__).scalar_type(); \ + const ::executorch::aten::ScalarType c_type__ = (c__).scalar_type(); \ + \ + ET_CHECK_MSG( \ + a_numel__ == b_numel__ && b_numel__ == c_numel__ && \ + ((a_numel__ == 1 && b_numel__ == 1 && c_numel__ == 1) || \ + (a_dim__ == b_dim__ && b_dim__ == c_dim__)) && \ + a_type__ == b_type__ && b_type__ == c_type__, \ + ET_TENSOR_CHECK_PREFIX__ \ + ": numel={%zu, %zu, %zu}, dim={%zu, %zu, %zu}, " \ + "dtype={%" PRId8 ", %" PRId8 ", %" PRId8 "}", \ + a_numel__, \ + b_numel__, \ + c_numel__, \ + a_dim__, \ + b_dim__, \ + c_dim__, \ + static_cast(a_type__), \ + static_cast(b_type__), \ + static_cast(c_type__)); \ + for (size_t dim__ = 0; dim__ < ET_MIN3(a_dim__, b_dim__, c_dim__); \ + ++dim__) { \ + size_t a_size__ = (a__).size(dim__); \ + size_t b_size__ = (b__).size(dim__); \ + size_t c_size__ = (c__).size(dim__); \ + ET_CHECK_MSG( \ + a_size__ == b_size__ && b_size__ == c_size__, \ + ET_TENSOR_CHECK_PREFIX__ " at size(%zu): {%zu, %zu, %zu}", \ + dim__, \ + a_size__, \ + b_size__, \ + c_size__); \ + } \ }) /** @@ -235,9 +235,10 @@ */ #define ET_CHECK_CONTIGUOUS(a__) \ ({ \ - const ::exec_aten::ArrayRef strides = \ - a__.strides(); \ - const ::exec_aten::ArrayRef sizes = a__.sizes(); \ + const ::executorch::aten::ArrayRef \ + strides = a__.strides(); \ + const ::executorch::aten::ArrayRef sizes = \ + a__.sizes(); \ ET_CHECK_MSG( \ strides[strides.size() - 1] == 1, \ "The stride of the last dimension shall be 1 for contiguous tensor, " \ @@ -268,10 +269,10 @@ "Two tensors shall have same number of strides, but not %zu and %zu.", \ a__.dim(), \ b__.dim()); \ - const ::exec_aten::ArrayRef a_strides = \ - a__.strides(); \ - const ::exec_aten::ArrayRef b_strides = \ - b__.strides(); \ + const ::executorch::aten::ArrayRef \ + a_strides = a__.strides(); \ + const ::executorch::aten::ArrayRef \ + b_strides = b__.strides(); \ for (size_t i = 0; i < a__.dim(); i++) { \ ET_CHECK_MSG( \ a_strides[i] == b_strides[i], \ @@ -298,12 +299,12 @@ a__.dim(), \ b__.dim(), \ c__.dim()); \ - const ::exec_aten::ArrayRef a_strides = \ - a__.strides(); \ - const ::exec_aten::ArrayRef b_strides = \ - b__.strides(); \ - const ::exec_aten::ArrayRef c_strides = \ - c__.strides(); \ + const ::executorch::aten::ArrayRef \ + a_strides = a__.strides(); \ + const ::executorch::aten::ArrayRef \ + b_strides = b__.strides(); \ + const ::executorch::aten::ArrayRef \ + c_strides = c__.strides(); \ for (size_t i = 0; i < a__.dim(); i++) { \ ET_CHECK_MSG( \ a_strides[i] == b_strides[i] && b_strides[i] == c_strides[i], \ @@ -434,7 +435,7 @@ inline bool dim_is_valid(int64_t dim, int64_t upper_bound) { * the zero dimensional tensors in some kernels, that treat them as 1D tensors * with a single element. */ -inline ssize_t nonzero_dim(const exec_aten::Tensor& tensor) { +inline ssize_t nonzero_dim(const executorch::aten::Tensor& tensor) { return tensor.dim() == 0 ? 1 : tensor.dim(); } @@ -444,13 +445,15 @@ inline ssize_t nonzero_dim(const exec_aten::Tensor& tensor) { * the zero dimensional tensors in some kernels, that treat them as 1D tensors * with a single element. */ -inline ssize_t nonempty_size(const exec_aten::Tensor& tensor, ssize_t dim) { +inline ssize_t nonempty_size( + const executorch::aten::Tensor& tensor, + ssize_t dim) { return tensor.dim() == 0 ? 1 : tensor.size(dim); } inline bool tensor_can_cast_to( - exec_aten::Tensor a, - exec_aten::ScalarType dtype) { + executorch::aten::Tensor a, + executorch::aten::ScalarType dtype) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::canCast(a.scalar_type(), dtype), "Tensor of dtype %s cannot cast to dtype %s", @@ -460,16 +463,18 @@ inline bool tensor_can_cast_to( return true; } -inline bool tensor_is_bool_type(exec_aten::Tensor t) { +inline bool tensor_is_bool_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( - t.scalar_type() == exec_aten::ScalarType::Bool, + t.scalar_type() == executorch::aten::ScalarType::Bool, "Expected to find bool type, but tensor has type %s", torch::executor::toString(t.scalar_type())); return true; } -inline bool tensor_is_type(exec_aten::Tensor t, exec_aten::ScalarType dtype) { +inline bool tensor_is_type( + executorch::aten::Tensor t, + executorch::aten::ScalarType dtype) { ET_LOG_MSG_AND_RETURN_IF_FALSE( t.scalar_type() == dtype, "Expected to find %s type, but tensor has type %s", @@ -480,7 +485,7 @@ inline bool tensor_is_type(exec_aten::Tensor t, exec_aten::ScalarType dtype) { } inline bool tensor_is_integral_type( - exec_aten::Tensor t, + executorch::aten::Tensor t, bool includeBool = false) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isIntegralType(t.scalar_type(), includeBool), @@ -490,7 +495,7 @@ inline bool tensor_is_integral_type( return true; } -inline bool tensor_is_floating_type(exec_aten::Tensor t) { +inline bool tensor_is_floating_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isFloatingType(t.scalar_type()), "Expected to find a floating type, but tensor has type %s", @@ -499,7 +504,7 @@ inline bool tensor_is_floating_type(exec_aten::Tensor t) { return true; } -inline bool tensor_is_real_type(exec_aten::Tensor t) { +inline bool tensor_is_real_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isRealType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", @@ -508,7 +513,7 @@ inline bool tensor_is_real_type(exec_aten::Tensor t) { return true; } -inline bool tensor_is_realh_type(exec_aten::Tensor t) { +inline bool tensor_is_realh_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isRealHType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", @@ -517,7 +522,7 @@ inline bool tensor_is_realh_type(exec_aten::Tensor t) { return true; } -inline bool tensor_is_realhbf16_type(exec_aten::Tensor t) { +inline bool tensor_is_realhbf16_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( executorch::runtime::isRealHBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", @@ -526,7 +531,7 @@ inline bool tensor_is_realhbf16_type(exec_aten::Tensor t) { return true; } -inline bool tensor_is_realhb_type(exec_aten::Tensor t) { +inline bool tensor_is_realhb_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isRealHBType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", @@ -535,7 +540,7 @@ inline bool tensor_is_realhb_type(exec_aten::Tensor t) { return true; } -inline bool tensor_is_realhbbf16_type(exec_aten::Tensor t) { +inline bool tensor_is_realhbbf16_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( executorch::runtime::isRealHBBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", @@ -544,7 +549,7 @@ inline bool tensor_is_realhbbf16_type(exec_aten::Tensor t) { return true; } -inline bool tensor_is_complex_type(exec_aten::Tensor t) { +inline bool tensor_is_complex_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isComplexType(t.scalar_type()), "Expected to find a complex type, but tensor has type %s", @@ -553,7 +558,7 @@ inline bool tensor_is_complex_type(exec_aten::Tensor t) { return true; } -inline bool tensor_is_bits_type(exec_aten::Tensor t) { +inline bool tensor_is_bits_type(executorch::aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isBitsType(t.scalar_type()), "Expected to find a bits type, but tensor has type %s", @@ -562,7 +567,9 @@ inline bool tensor_is_bits_type(exec_aten::Tensor t) { return true; } -inline bool tensors_have_same_dtype(exec_aten::Tensor a, exec_aten::Tensor b) { +inline bool tensors_have_same_dtype( + executorch::aten::Tensor a, + executorch::aten::Tensor b) { ET_LOG_MSG_AND_RETURN_IF_FALSE( a.scalar_type() == b.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s}", @@ -572,9 +579,9 @@ inline bool tensors_have_same_dtype(exec_aten::Tensor a, exec_aten::Tensor b) { } inline bool tensors_have_same_dtype( - exec_aten::Tensor a, - exec_aten::Tensor b, - exec_aten::Tensor c) { + executorch::aten::Tensor a, + executorch::aten::Tensor b, + executorch::aten::Tensor c) { ET_LOG_MSG_AND_RETURN_IF_FALSE( a.scalar_type() == b.scalar_type() && b.scalar_type() == c.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s, %s}", @@ -584,7 +591,7 @@ inline bool tensors_have_same_dtype( return true; } -inline bool tensor_is_rank(exec_aten::Tensor t, size_t rank) { +inline bool tensor_is_rank(executorch::aten::Tensor t, size_t rank) { ET_LOG_MSG_AND_RETURN_IF_FALSE( t.dim() == rank, "Expected tensor.dim() to be %zu, but got %zu", @@ -595,7 +602,7 @@ inline bool tensor_is_rank(exec_aten::Tensor t, size_t rank) { } inline bool tensor_has_rank_greater_or_equal_to( - exec_aten::Tensor t, + executorch::aten::Tensor t, size_t rank) { ET_LOG_MSG_AND_RETURN_IF_FALSE( t.dim() >= rank, @@ -607,7 +614,7 @@ inline bool tensor_has_rank_greater_or_equal_to( } inline bool tensor_has_rank_smaller_or_equal_to( - exec_aten::Tensor t, + executorch::aten::Tensor t, size_t rank) { ET_LOG_MSG_AND_RETURN_IF_FALSE( t.dim() <= rank, @@ -618,7 +625,7 @@ inline bool tensor_has_rank_smaller_or_equal_to( return true; } -inline bool tensor_has_dim(exec_aten::Tensor t, int64_t d) { +inline bool tensor_has_dim(executorch::aten::Tensor t, int64_t d) { if (t.dim() == 0) { ET_LOG_MSG_AND_RETURN_IF_FALSE( d == 0 || d == -1, @@ -634,14 +641,15 @@ inline bool tensor_has_dim(exec_aten::Tensor t, int64_t d) { return true; } -inline bool tensor_has_non_empty_dim(exec_aten::Tensor t, int64_t d) { +inline bool tensor_has_non_empty_dim(executorch::aten::Tensor t, int64_t d) { const size_t udim = ET_NORMALIZE_IX(d, t.dim()); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(t, d)); ET_LOG_AND_RETURN_IF_FALSE(t.size(udim) != 0); return true; } -inline bool tensor_dim_has_index(exec_aten::Tensor t, int64_t d, int64_t ix) { +inline bool +tensor_dim_has_index(executorch::aten::Tensor t, int64_t d, int64_t ix) { // Indexing ops don't support zero-dim tensors ET_CHECK(t.dim() != 0); if (d < 0) { @@ -661,9 +669,9 @@ inline bool tensor_dim_has_index(exec_aten::Tensor t, int64_t d, int64_t ix) { } inline bool tensors_have_same_size_at_dims( - exec_aten::Tensor a, + executorch::aten::Tensor a, size_t dim_a, - exec_aten::Tensor b, + executorch::aten::Tensor b, size_t dim_b) { ET_LOG_MSG_AND_RETURN_IF_FALSE( dim_a < a.dim(), @@ -687,7 +695,9 @@ inline bool tensors_have_same_size_at_dims( return true; } -inline bool tensors_have_same_shape(exec_aten::Tensor a, exec_aten::Tensor b) { +inline bool tensors_have_same_shape( + executorch::aten::Tensor a, + executorch::aten::Tensor b) { if (a.numel() == 1 && b.numel() == 1) { // PyTorch operators treat all scalar tensors as the same shape even if // they have different dims. @@ -717,9 +727,9 @@ inline bool tensors_have_same_shape(exec_aten::Tensor a, exec_aten::Tensor b) { } inline bool tensors_have_same_shape( - exec_aten::Tensor a, - exec_aten::Tensor b, - exec_aten::Tensor c) { + executorch::aten::Tensor a, + executorch::aten::Tensor b, + executorch::aten::Tensor c) { if (a.numel() == 1 && b.numel() == 1 && c.numel() == 1) { // PyTorch operators treat all scalar tensors as the same shape even if // they have different dims. @@ -755,21 +765,21 @@ inline bool tensors_have_same_shape( } inline bool tensors_have_same_shape_and_dtype( - exec_aten::Tensor a, - exec_aten::Tensor b) { + executorch::aten::Tensor a, + executorch::aten::Tensor b) { return tensors_have_same_shape(a, b) && tensors_have_same_dtype(a, b); } inline bool tensors_have_same_shape_and_dtype( - exec_aten::Tensor a, - exec_aten::Tensor b, - exec_aten::Tensor c) { + executorch::aten::Tensor a, + executorch::aten::Tensor b, + executorch::aten::Tensor c) { return tensors_have_same_shape(a, b, c) && tensors_have_same_dtype(a, b, c); } inline bool tensor_has_expected_size( - exec_aten::Tensor a, - exec_aten::ArrayRef expected_sizes) { + executorch::aten::Tensor a, + executorch::aten::ArrayRef expected_sizes) { if (!(a.sizes() == expected_sizes)) { ET_LOG( Error, @@ -793,8 +803,8 @@ inline bool tensor_has_expected_size( } inline bool tensors_have_same_strides( - exec_aten::Tensor a, - exec_aten::Tensor b) { + executorch::aten::Tensor a, + executorch::aten::Tensor b) { if (a.strides() != b.strides()) { ET_LOG( Error, @@ -816,9 +826,9 @@ inline bool tensors_have_same_strides( } inline bool tensors_have_same_strides( - exec_aten::Tensor a, - exec_aten::Tensor b, - exec_aten::Tensor c) { + executorch::aten::Tensor a, + executorch::aten::Tensor b, + executorch::aten::Tensor c) { if (!(a.strides() == b.strides() && b.strides() == c.strides())) { ET_LOG( Error, @@ -841,7 +851,7 @@ inline bool tensors_have_same_strides( return true; } -inline bool tensor_is_contiguous(exec_aten::Tensor t) { +inline bool tensor_is_contiguous(executorch::aten::Tensor t) { const auto strides = t.strides(); const auto sizes = t.sizes(); // If tensor is 0-dim (i.e. a scalar tensor) it is contiguous @@ -867,7 +877,9 @@ inline bool tensor_is_contiguous(exec_aten::Tensor t) { return true; } -inline bool tensors_have_same_rank(exec_aten::Tensor a, exec_aten::Tensor b) { +inline bool tensors_have_same_rank( + executorch::aten::Tensor a, + executorch::aten::Tensor b) { ET_LOG_MSG_AND_RETURN_IF_FALSE( a.dim() == b.dim(), ET_TENSOR_CHECK_PREFIX__ ": rank={%zd, %zd}", @@ -876,7 +888,7 @@ inline bool tensors_have_same_rank(exec_aten::Tensor a, exec_aten::Tensor b) { return true; } -inline bool tensor_is_scalar(exec_aten::Tensor t) { +inline bool tensor_is_scalar(executorch::aten::Tensor t) { return t.dim() == 0 && t.numel() == 1; } @@ -891,7 +903,9 @@ inline bool tensor_is_scalar(exec_aten::Tensor t) { constexpr size_t kTensorDimensionLimit = 16; /// Returns the product of dim[0:dim), not including dim. -inline size_t getLeadingDims(const exec_aten::Tensor& tensor, int64_t dim) { +inline size_t getLeadingDims( + const executorch::aten::Tensor& tensor, + int64_t dim) { ET_CHECK_MSG( dim >= 0 && dim <= tensor.dim(), "Ending dimension %" PRId64 @@ -906,7 +920,9 @@ inline size_t getLeadingDims(const exec_aten::Tensor& tensor, int64_t dim) { } /// Returns the product of dim[dim+1:]. -inline size_t getTrailingDims(const exec_aten::Tensor& tensor, int64_t dim) { +inline size_t getTrailingDims( + const executorch::aten::Tensor& tensor, + int64_t dim) { ET_CHECK_MSG( dim >= -1 && dim < tensor.dim(), "Starting dimension %" PRId64 @@ -931,7 +947,7 @@ inline size_t getTrailingDims(const exec_aten::Tensor& tensor, int64_t dim) { * in the tensor. */ inline size_t coordinateToIndex( - const exec_aten::Tensor& tensor, + const executorch::aten::Tensor& tensor, const size_t* const coordinate) { size_t index = 0; for (int d = 0; d < tensor.dim(); ++d) { @@ -946,7 +962,7 @@ inline size_t coordinateToIndex( * repeated calls to coordinateToIndex. */ inline void memoizeTrailingDims( - const exec_aten::Tensor& tensor, + const executorch::aten::Tensor& tensor, size_t trailing_dims_memo[kTensorDimensionLimit]) { const auto tensorDim = tensor.dim(); size_t dims = 1; @@ -962,7 +978,7 @@ inline void memoizeTrailingDims( * memoizeTrailingDims. */ inline size_t coordinateToIndexWithTrailingDimsMemo( - const exec_aten::Tensor& tensor, + const executorch::aten::Tensor& tensor, const size_t* const coordinate, const size_t trailing_dims_memo[kTensorDimensionLimit]) { size_t index = 0; @@ -984,7 +1000,7 @@ inline size_t coordinateToIndexWithTrailingDimsMemo( * @returns void */ inline void indexToCoordinate( - const exec_aten::Tensor& tensor, + const executorch::aten::Tensor& tensor, size_t index, size_t* coordinate) { ET_CHECK(index < tensor.numel()); @@ -1011,12 +1027,12 @@ template < typename std::enable_if< std::is_integral::value && !std::is_same::value, bool>::type = true> -bool extract_scalar_tensor(exec_aten::Tensor tensor, INT_T* out_val) { +bool extract_scalar_tensor(executorch::aten::Tensor tensor, INT_T* out_val) { if (tensor.numel() != 1) { return false; } #define CASE_INT_DTYPE(TENSOR_CTYPE, TENSOR_DTYPE) \ - case exec_aten::ScalarType::TENSOR_DTYPE: { \ + case executorch::aten::ScalarType::TENSOR_DTYPE: { \ const TENSOR_CTYPE val = tensor.const_data_ptr()[0]; \ if (val < std::numeric_limits::lowest() || \ val > std::numeric_limits::max()) { \ @@ -1048,12 +1064,12 @@ template < typename FLOAT_T, typename std::enable_if::value, bool>:: type = true> -bool extract_scalar_tensor(exec_aten::Tensor tensor, FLOAT_T* out_val) { +bool extract_scalar_tensor(executorch::aten::Tensor tensor, FLOAT_T* out_val) { if (tensor.numel() != 1) { return false; } #define CASE_REAL_DTYPE(TENSOR_CTYPE, TENSOR_DTYPE) \ - case exec_aten::ScalarType::TENSOR_DTYPE: { \ + case executorch::aten::ScalarType::TENSOR_DTYPE: { \ /* ET_FORALL_REAL_TYPES guarantees TENSOR_CTYPE is a real type. */ \ double val = \ static_cast(tensor.const_data_ptr()[0]); \ @@ -1086,8 +1102,8 @@ template < typename BOOL_T, typename std::enable_if::value, bool>::type = true> -bool extract_scalar_tensor(exec_aten::Tensor tensor, BOOL_T* out_val) { - if (tensor.scalar_type() != exec_aten::ScalarType::Bool) { +bool extract_scalar_tensor(executorch::aten::Tensor tensor, BOOL_T* out_val) { + if (tensor.scalar_type() != executorch::aten::ScalarType::Bool) { return false; } if (tensor.numel() != 1) { @@ -1107,33 +1123,35 @@ namespace internal { * Share t_src's data_ptr with t_dst. */ ET_NODISCARD Error share_tensor_data( - const exec_aten::Tensor& t_dst, - const exec_aten::Tensor& t_src); + const executorch::aten::Tensor& t_dst, + const executorch::aten::Tensor& t_src); /** * Copy t_src's data_ptr to t_dst. */ ET_NODISCARD Error copy_tensor_data( - const exec_aten::Tensor& t_dst, - const exec_aten::Tensor& t_src); + const executorch::aten::Tensor& t_dst, + const executorch::aten::Tensor& t_src); /** * Set the data_ptr of t to buffer. */ -ET_NODISCARD Error -set_tensor_data(const exec_aten::Tensor& t, void* buffer, size_t buffer_size); +ET_NODISCARD Error set_tensor_data( + const executorch::aten::Tensor& t, + void* buffer, + size_t buffer_size); /** * Reset tensor's data_ptr, clear all the storage for at::Tensor. */ -void reset_data_ptr(const exec_aten::Tensor& tensor); +void reset_data_ptr(const executorch::aten::Tensor& tensor); /** * Resize tensor impl */ ET_NODISCARD Error resize_tensor_impl( - exec_aten::TensorImpl* impl, - exec_aten::ArrayRef new_sizes); + executorch::aten::TensorImpl* impl, + executorch::aten::ArrayRef new_sizes); } // namespace internal @@ -1147,8 +1165,8 @@ ET_NODISCARD Error resize_tensor_impl( * passed in through runtimeContext. */ ET_NODISCARD inline Error resize_tensor( - exec_aten::Tensor t, - exec_aten::ArrayRef new_sizes) { + executorch::aten::Tensor t, + executorch::aten::ArrayRef new_sizes) { return internal::resize_tensor_impl(t.unsafeGetTensorImpl(), new_sizes); } @@ -1163,16 +1181,19 @@ ET_NODISCARD inline Error resize_tensor( */ template < typename T, - typename std:: - enable_if::value, int>::type = 0> + typename std::enable_if< + !std::is_same::value, + int>::type = 0> ET_NODISCARD inline Error resize_tensor( - exec_aten::Tensor t, - exec_aten::ArrayRef new_sizes) { + executorch::aten::Tensor t, + executorch::aten::ArrayRef new_sizes) { // Need to cast the input array to an array of Tensor::SizesType - std::array new_sizes_casted{}; + std::array + new_sizes_casted{}; size_t new_sizes_ndim = new_sizes.size(); for (size_t i = 0; i < new_sizes_ndim; ++i) { - new_sizes_casted[i] = static_cast(new_sizes[i]); + new_sizes_casted[i] = + static_cast(new_sizes[i]); } return internal::resize_tensor_impl( @@ -1181,8 +1202,8 @@ ET_NODISCARD inline Error resize_tensor( /// DEPRECATED: Use `resize_tensor()` instead, which can fail non-fatally. ET_DEPRECATED inline void resize( - exec_aten::Tensor t, - exec_aten::ArrayRef new_sizes) { + executorch::aten::Tensor t, + executorch::aten::ArrayRef new_sizes) { Error err = resize_tensor(t, new_sizes); ET_CHECK_MSG( err == Error::Ok, "Could not resize Tensor; see logs for details"); @@ -1195,22 +1216,22 @@ ET_DEPRECATED inline void resize( * @param out_dim_order_size Size of the DimOrderType array. */ ET_NODISCARD Error get_dim_order( - const exec_aten::Tensor& tensor, - exec_aten::DimOrderType* out_dim_order, + const executorch::aten::Tensor& tensor, + executorch::aten::DimOrderType* out_dim_order, size_t out_dim_order_size); /** * Checks whether a tensor has a valid dim order. If the dim order could not * be determined, then this function returns false by default. */ -bool tensor_has_valid_dim_order(exec_aten::Tensor t); +bool tensor_has_valid_dim_order(executorch::aten::Tensor t); /** * Checks whether a tensor has either the default of channels last dim order. * If the dim order could not be determined, then this function returns false * by default. */ -bool tensor_is_default_or_channels_last_dim_order(exec_aten::Tensor t); +bool tensor_is_default_or_channels_last_dim_order(executorch::aten::Tensor t); /** * Checks whether a tensor has the default dimension order. @@ -1219,7 +1240,7 @@ bool tensor_is_default_or_channels_last_dim_order(exec_aten::Tensor t); * @param t The tensor to check the dimension order of. * @return True if the tensor has the default dimension order, false otherwise. */ -bool tensor_is_default_dim_order(exec_aten::Tensor t); +bool tensor_is_default_dim_order(executorch::aten::Tensor t); /** * Checks whether a tensor has the channels last dimension order. @@ -1229,7 +1250,7 @@ bool tensor_is_default_dim_order(exec_aten::Tensor t); * @return True if the tensor has the channels last dimension order, false * otherwise. */ -bool tensor_is_channels_last_dim_order(exec_aten::Tensor t); +bool tensor_is_channels_last_dim_order(executorch::aten::Tensor t); /** * Asserts that four tensors have the same dim_order @@ -1239,7 +1260,7 @@ bool tensor_is_channels_last_dim_order(exec_aten::Tensor t); * */ bool tensors_have_same_dim_order( - const exec_aten::ArrayRef tensor_list); + const executorch::aten::ArrayRef tensor_list); /** * Asserts that two tensors have the same dim_order @@ -1249,9 +1270,9 @@ bool tensors_have_same_dim_order( */ inline bool tensors_have_same_dim_order( - const exec_aten::Tensor& a, - const exec_aten::Tensor& b) { - exec_aten::Tensor tensor_list[2] = {a, b}; + const executorch::aten::Tensor& a, + const executorch::aten::Tensor& b) { + executorch::aten::Tensor tensor_list[2] = {a, b}; return tensors_have_same_dim_order(tensor_list); } @@ -1264,10 +1285,10 @@ inline bool tensors_have_same_dim_order( */ inline bool tensors_have_same_dim_order( - const exec_aten::Tensor& a, - const exec_aten::Tensor& b, - const exec_aten::Tensor& c) { - exec_aten::Tensor tensor_list[3] = {a, b, c}; + const executorch::aten::Tensor& a, + const executorch::aten::Tensor& b, + const executorch::aten::Tensor& c) { + executorch::aten::Tensor tensor_list[3] = {a, b, c}; return tensors_have_same_dim_order(tensor_list); } @@ -1280,11 +1301,11 @@ inline bool tensors_have_same_dim_order( */ inline bool tensors_have_same_dim_order( - const exec_aten::Tensor& a, - const exec_aten::Tensor& b, - const exec_aten::Tensor& c, - const exec_aten::Tensor& d) { - exec_aten::Tensor tensor_list[4] = {a, b, c, d}; + const executorch::aten::Tensor& a, + const executorch::aten::Tensor& b, + const executorch::aten::Tensor& c, + const executorch::aten::Tensor& d) { + executorch::aten::Tensor tensor_list[4] = {a, b, c, d}; return tensors_have_same_dim_order(tensor_list); } @@ -1297,8 +1318,8 @@ inline bool tensors_have_same_dim_order( * @param ndim Number of dimensions in the tensor. */ inline size_t calculate_linear_index( - const exec_aten::SizesType* coordinate, - const exec_aten::StridesType* strides, + const executorch::aten::SizesType* coordinate, + const executorch::aten::StridesType* strides, const size_t ndim) { size_t index = 0; for (size_t i = 0; i < ndim; i++) { diff --git a/runtime/executor/method.h b/runtime/executor/method.h index 66e3c96d29..8b3330fb5a 100644 --- a/runtime/executor/method.h +++ b/runtime/executor/method.h @@ -115,7 +115,7 @@ class Method final { * @returns Error::Ok on success, non-Ok on failure. */ ET_NODISCARD Error - set_inputs(const exec_aten::ArrayRef& input_evalues); + set_inputs(const executorch::aten::ArrayRef& input_evalues); /** * Sets the data buffer of the specified method output to the provided value. diff --git a/runtime/executor/method_meta.h b/runtime/executor/method_meta.h index 7817583fc3..569b93c0f9 100644 --- a/runtime/executor/method_meta.h +++ b/runtime/executor/method_meta.h @@ -50,7 +50,7 @@ class TensorInfo final { /** * Returns the scalar type of the input/output. */ - exec_aten::ScalarType scalar_type() const; + executorch::aten::ScalarType scalar_type() const; /** * Returns whether the tensor's memory was planned during export. @@ -69,7 +69,7 @@ class TensorInfo final { TensorInfo( Span sizes, Span dim_order, - exec_aten::ScalarType scalar_type, + executorch::aten::ScalarType scalar_type, const bool is_memory_planned); /** @@ -89,7 +89,7 @@ class TensorInfo final { Span dim_order_; /// The scalar type of the tensor. - exec_aten::ScalarType scalar_type_; + executorch::aten::ScalarType scalar_type_; /// Whether the tensor's memory was planned during export. bool is_memory_planned_; diff --git a/runtime/executor/tensor_parser.h b/runtime/executor/tensor_parser.h index 10b6795511..1d860bfc30 100644 --- a/runtime/executor/tensor_parser.h +++ b/runtime/executor/tensor_parser.h @@ -18,12 +18,12 @@ namespace executorch { namespace runtime { namespace deserialization { -ET_NODISCARD Result parseTensor( +ET_NODISCARD Result parseTensor( const Program* program, MemoryManager* memory_manager, const executorch_flatbuffer::Tensor* s_tensor); -ET_NODISCARD Result> parseTensorList( +ET_NODISCARD Result> parseTensorList( const flatbuffers::Vector* tensor_indices, EValue* values_, MemoryManager* memory_manager); @@ -32,7 +32,7 @@ ET_NODISCARD Result> parseTensorList( // list of optionals: list of optional Tensor, list of optional float etc, so we // just use a template to avoid boilerplate. template -ET_NODISCARD Result>> +ET_NODISCARD Result>> parseListOptionalType( const flatbuffers::Vector* value_indices, EValue* values_, @@ -42,7 +42,7 @@ parseListOptionalType( auto* optional_tensor_list = ET_ALLOCATE_LIST_OR_RETURN_ERROR( memory_manager->method_allocator(), - exec_aten::optional, + executorch::aten::optional, value_indices->size()); size_t output_idx = 0; @@ -57,19 +57,19 @@ parseListOptionalType( // copy assignment is not defined if its non trivial. if (index == -1) { new (&optional_tensor_list[output_idx]) - exec_aten::optional(exec_aten::nullopt); + executorch::aten::optional(executorch::aten::nullopt); // no value to point to. BoxedEvalueList for optional tensor will convert // this to nullopt. // TODO(T161156879): do something less hacky here. evalp_list[output_idx] = nullptr; } else { new (&optional_tensor_list[output_idx]) - exec_aten::optional(values_[index].toOptional()); + executorch::aten::optional(values_[index].toOptional()); evalp_list[output_idx] = &values_[static_cast(index)]; } output_idx++; } - return BoxedEvalueList>( + return BoxedEvalueList>( evalp_list, optional_tensor_list, value_indices->size()); } diff --git a/runtime/kernel/operator_registry.h b/runtime/kernel/operator_registry.h index 6d6c77bd76..e4c5d6706e 100644 --- a/runtime/kernel/operator_registry.h +++ b/runtime/kernel/operator_registry.h @@ -50,11 +50,13 @@ using OpFunction = void (*)(KernelRuntimeContext&, EValue**); * Used by the Executor to hold the tensor metadata info and retrieve kernel. */ struct TensorMeta { - exec_aten::ScalarType dtype_; - Span dim_order_; + executorch::aten::ScalarType dtype_; + Span dim_order_; TensorMeta() = default; - TensorMeta(exec_aten::ScalarType dtype, Span order) + TensorMeta( + executorch::aten::ScalarType dtype, + Span order) : dtype_(dtype), dim_order_(order) {} bool operator==(const TensorMeta& other) const { diff --git a/runtime/kernel/test/test_util.h b/runtime/kernel/test/test_util.h index 0c6c651af3..082635bd0e 100644 --- a/runtime/kernel/test/test_util.h +++ b/runtime/kernel/test/test_util.h @@ -19,13 +19,14 @@ namespace runtime { namespace testing { inline void make_kernel_key( - std::vector< - std::pair>> - tensors, + std::vector>> tensors, char* buf) { std::vector meta; for (auto& t : tensors) { - Span dim_order(t.second.data(), t.second.size()); + Span dim_order( + t.second.data(), t.second.size()); meta.emplace_back(t.first, dim_order); } Span metadata(meta.data(), meta.size());