diff --git a/examples/models/llama2/custom_ops/op_sdpa.cpp b/examples/models/llama2/custom_ops/op_sdpa.cpp index 6638852f7d2..1bb92d7da80 100644 --- a/examples/models/llama2/custom_ops/op_sdpa.cpp +++ b/examples/models/llama2/custom_ops/op_sdpa.cpp @@ -523,22 +523,22 @@ bool validate_flash_attention_args( "Attention mask must be a 2D tensor"); ET_LOG_MSG_AND_RETURN_IF_FALSE( - is_default_dim_order(query.dim_order().data(), query.dim()), - "key cache must be in default dim order"); + is_contiguous_dim_order(query.dim_order().data(), query.dim()), + "key cache must be in contiguous dim order"); ET_LOG_MSG_AND_RETURN_IF_FALSE( - is_default_dim_order(key.dim_order().data(), key.dim()), - "value cache must be in default dim order"); + is_contiguous_dim_order(key.dim_order().data(), key.dim()), + "value cache must be in contiguous dim order"); ET_LOG_MSG_AND_RETURN_IF_FALSE( - is_default_dim_order(value.dim_order().data(), value.dim()), - "value cache must be in default dim order"); + is_contiguous_dim_order(value.dim_order().data(), value.dim()), + "value cache must be in contiguous dim order"); if (attn_mask.has_value()) { ET_LOG_MSG_AND_RETURN_IF_FALSE( - is_default_dim_order( + is_contiguous_dim_order( attn_mask.value().dim_order().data(), attn_mask.value().dim()), - "value cache must be in default dim order"); + "value cache must be in contiguous dim order"); } return true; @@ -590,14 +590,14 @@ bool validate_cache_params( seq_length, v_cache.size(2)); - // Make sure they are in default dim order + // Make sure they are in contiguous dim order ET_LOG_MSG_AND_RETURN_IF_FALSE( - is_default_dim_order(k_cache.dim_order().data(), k_cache.dim()), - "key cache must be in default dim order"); + is_contiguous_dim_order(k_cache.dim_order().data(), k_cache.dim()), + "key cache must be in contiguous dim order"); ET_LOG_MSG_AND_RETURN_IF_FALSE( - is_default_dim_order(v_cache.dim_order().data(), v_cache.dim()), - "value cache must be in default dim order"); + is_contiguous_dim_order(v_cache.dim_order().data(), v_cache.dim()), + "value cache must be in contiguous dim order"); return true; } @@ -615,9 +615,9 @@ void update_cache( "projected_value must have batch size of 1"); ET_CHECK_MSG(cache.size(1) == 1, "cache must have batch size of 1"); ET_CHECK_MSG( - is_default_dim_order( + is_contiguous_dim_order( projected_value.dim_order().data(), projected_value.dim()), - "projected value must be in default dim order"); + "projected value must be in contiguous dim order"); const void* projected_value_data = projected_value.const_data_ptr(); void* cache_data = cache.mutable_data_ptr(); diff --git a/kernels/portable/cpu/op_native_batch_norm.cpp b/kernels/portable/cpu/op_native_batch_norm.cpp index 26eb5d90a7b..2e613c0a637 100644 --- a/kernels/portable/cpu/op_native_batch_norm.cpp +++ b/kernels/portable/cpu/op_native_batch_norm.cpp @@ -66,10 +66,10 @@ std::tuple _native_batch_norm_legit_no_training_out( InvalidArgument, ret_val); - // For now, only support the default dim order + // For now, only support the contiguous dim order ET_KERNEL_CHECK( ctx, - is_default_dim_order(in.dim_order().data(), in.dim_order().size()), + is_contiguous_dim_order(in.dim_order().data(), in.dim_order().size()), InvalidArgument, ret_val); diff --git a/runtime/core/exec_aten/testing_util/tensor_factory.h b/runtime/core/exec_aten/testing_util/tensor_factory.h index 7ec4d5dc735..993bd8f6bdd 100644 --- a/runtime/core/exec_aten/testing_util/tensor_factory.h +++ b/runtime/core/exec_aten/testing_util/tensor_factory.h @@ -292,7 +292,7 @@ class TensorFactory { * size of this vector must be equal to the product of the elements of * `sizes`. * @param[in] dim_order The dim order describing how tensor memory is laid - * out. If empty or not specificed, the function will use a default dim order + * out. If empty or not specificed, the function will use a contiguous dim order * of {0, 1, 2, 3, ...} * * @return A new Tensor with the specified shape and data. @@ -706,7 +706,7 @@ class TensorFactory { * size of this vector must be equal to the product of the elements of * `sizes`. * @param[in] dim_order The dim order describing how tensor memory is laid - * out. If empty or not specificed, the function will use a default dim order + * out. If empty or not specificed, the function will use a contiguous dim order * of {0, 1, 2, 3, ...} * * @return A new Tensor with the specified shape and data. diff --git a/runtime/core/exec_aten/util/dim_order_util.h b/runtime/core/exec_aten/util/dim_order_util.h index 31175d1e6cc..33aa4f86a86 100644 --- a/runtime/core/exec_aten/util/dim_order_util.h +++ b/runtime/core/exec_aten/util/dim_order_util.h @@ -29,14 +29,14 @@ bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) { } // namespace /** - * Check if a given dim_order array is equivalent to the default dim order of + * Check if a given dim_order array is equivalent to the contiguous dim order of * {0, 1, 2, 3, ...} * * @param[in] dim_order pointer to dim_order array * @param[in] dims length of the dim_order array */ template -inline bool is_default_dim_order( +inline bool is_contiguous_dim_order( const DimOrderType* dim_order, const size_t dims) { for (int i = 0; i < dims; ++i) { diff --git a/runtime/core/exec_aten/util/tensor_util.h b/runtime/core/exec_aten/util/tensor_util.h index c5c663e28c5..196e7d9107c 100644 --- a/runtime/core/exec_aten/util/tensor_util.h +++ b/runtime/core/exec_aten/util/tensor_util.h @@ -315,7 +315,7 @@ #define ET_CHECK_DEFAULT_OR_CHANNELSLAST_DIMORDER(t__) \ ({ \ ET_CHECK_MSG( \ - is_default_dim_order( \ + is_contiguous_dim_order( \ t__.dim_order().data(), t__.dim_order().size()) || \ is_channels_last_dim_order( \ t__.dim_order().data(), t__.dim_order().size()), \ diff --git a/runtime/core/exec_aten/util/tensor_util_aten.cpp b/runtime/core/exec_aten/util/tensor_util_aten.cpp index f08189cb8b3..d3d5417f963 100644 --- a/runtime/core/exec_aten/util/tensor_util_aten.cpp +++ b/runtime/core/exec_aten/util/tensor_util_aten.cpp @@ -59,7 +59,7 @@ inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) { get_dim_order(t, dim_order, t.dim()) == Error::Ok, "Failed to retrieve dim order from tensor!"); - bool ret_val = is_default_dim_order(dim_order, t.dim()) || + bool ret_val = is_contiguous_dim_order(dim_order, t.dim()) || is_channels_last_dim_order(dim_order, t.dim()); if (!ret_val) { diff --git a/runtime/core/exec_aten/util/tensor_util_portable.cpp b/runtime/core/exec_aten/util/tensor_util_portable.cpp index 8795833c374..ad7c93f0a39 100644 --- a/runtime/core/exec_aten/util/tensor_util_portable.cpp +++ b/runtime/core/exec_aten/util/tensor_util_portable.cpp @@ -55,7 +55,7 @@ bool tensor_has_valid_dim_order(torch::executor::Tensor t) { bool tensor_is_default_or_channels_last_dim_order(torch::executor::Tensor t) { bool ret_val = - is_default_dim_order(t.dim_order().data(), t.dim_order().size()) || + is_contiguous_dim_order(t.dim_order().data(), t.dim_order().size()) || is_channels_last_dim_order(t.dim_order().data(), t.dim_order().size()); if (!ret_val) { diff --git a/runtime/core/exec_aten/util/test/dim_order_util_test.cpp b/runtime/core/exec_aten/util/test/dim_order_util_test.cpp index f1e9309710a..28e768be654 100644 --- a/runtime/core/exec_aten/util/test/dim_order_util_test.cpp +++ b/runtime/core/exec_aten/util/test/dim_order_util_test.cpp @@ -236,7 +236,7 @@ TEST(TensorUtilTest, IsDefaultDimOrderTest) { std::vector dim_order(i); std::iota(dim_order.begin(), dim_order.end(), 0); - EXPECT_TRUE(torch::executor::is_default_dim_order( + EXPECT_TRUE(torch::executor::is_contiguous_dim_order( dim_order.data(), dim_order.size())); // As a bonus, check that is_channels_last returns false @@ -252,7 +252,7 @@ TEST(TensorUtilTest, IsDefaultDimOrderFailCasesTest) { std::iota(dim_order.begin(), dim_order.end(), 0); std::swap(dim_order[0], dim_order[1]); - EXPECT_FALSE(torch::executor::is_default_dim_order( + EXPECT_FALSE(torch::executor::is_contiguous_dim_order( dim_order.data(), dim_order.size())); } @@ -263,7 +263,7 @@ TEST(TensorUtilTest, IsDefaultDimOrderFailCasesTest) { dim_order[d] = (d + 1) % i; } - EXPECT_FALSE(torch::executor::is_default_dim_order( + EXPECT_FALSE(torch::executor::is_contiguous_dim_order( dim_order.data(), dim_order.size())); } } @@ -276,8 +276,8 @@ TEST(TensorUtilTest, IsChannelsLastDimOrderTest) { EXPECT_TRUE(torch::executor::is_channels_last_dim_order(dim_order_5d, 5)); // As a bonus, check that is_default returns false - EXPECT_FALSE(torch::executor::is_default_dim_order(dim_order_4d, 4)); - EXPECT_FALSE(torch::executor::is_default_dim_order(dim_order_5d, 5)); + EXPECT_FALSE(torch::executor::is_contiguous_dim_order(dim_order_4d, 4)); + EXPECT_FALSE(torch::executor::is_contiguous_dim_order(dim_order_5d, 5)); } TEST(TensorUtilTest, IsChannelsLastDimOrderFailCasesTest) {