Skip to content

Commit

Permalink
rename default dim order as contiguous dim order (#2157)
Browse files Browse the repository at this point in the history
Summary:

bypass-github-export-checks

Differential Revision: D54285070
  • Loading branch information
Gasoonjia authored and facebook-github-bot committed Mar 5, 2024
1 parent bee1d04 commit 775d8b0
Show file tree
Hide file tree
Showing 8 changed files with 29 additions and 29 deletions.
30 changes: 15 additions & 15 deletions examples/models/llama2/custom_ops/op_sdpa.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -526,22 +526,22 @@ bool validate_flash_attention_args(
"Attention mask must be a 2D tensor");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
is_default_dim_order(query.dim_order().data(), query.dim()),
"key cache must be in default dim order");
is_contiguous_dim_order(query.dim_order().data(), query.dim()),
"key cache must be in contiguous dim order");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
is_default_dim_order(key.dim_order().data(), key.dim()),
"value cache must be in default dim order");
is_contiguous_dim_order(key.dim_order().data(), key.dim()),
"value cache must be in contiguous dim order");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
is_default_dim_order(value.dim_order().data(), value.dim()),
"value cache must be in default dim order");
is_contiguous_dim_order(value.dim_order().data(), value.dim()),
"value cache must be in contiguous dim order");

if (attn_mask.has_value()) {
ET_LOG_MSG_AND_RETURN_IF_FALSE(
is_default_dim_order(
is_contiguous_dim_order(
attn_mask.value().dim_order().data(), attn_mask.value().dim()),
"value cache must be in default dim order");
"value cache must be in contiguous dim order");
}

return true;
Expand Down Expand Up @@ -593,14 +593,14 @@ bool validate_cache_params(
seq_length,
v_cache.size(2));

// Make sure they are in default dim order
// Make sure they are in contiguous dim order
ET_LOG_MSG_AND_RETURN_IF_FALSE(
is_default_dim_order(k_cache.dim_order().data(), k_cache.dim()),
"key cache must be in default dim order");
is_contiguous_dim_order(k_cache.dim_order().data(), k_cache.dim()),
"key cache must be in contiguous dim order");

ET_LOG_MSG_AND_RETURN_IF_FALSE(
is_default_dim_order(v_cache.dim_order().data(), v_cache.dim()),
"value cache must be in default dim order");
is_contiguous_dim_order(v_cache.dim_order().data(), v_cache.dim()),
"value cache must be in contiguous dim order");

return true;
}
Expand All @@ -618,9 +618,9 @@ void update_cache(
"projected_value must have batch size of 1");
ET_CHECK_MSG(cache.size(1) == 1, "cache must have batch size of 1");
ET_CHECK_MSG(
is_default_dim_order(
is_contiguous_dim_order(
projected_value.dim_order().data(), projected_value.dim()),
"projected value must be in default dim order");
"projected value must be in contiguous dim order");
const void* projected_value_data = projected_value.const_data_ptr();
void* cache_data = cache.mutable_data_ptr();

Expand Down
4 changes: 2 additions & 2 deletions kernels/portable/cpu/op_native_batch_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@ std::tuple<Tensor&, Tensor&, Tensor&> _native_batch_norm_legit_no_training_out(
InvalidArgument,
ret_val);

// For now, only support the default dim order
// For now, only support the contiguous dim order
ET_KERNEL_CHECK(
ctx,
is_default_dim_order(in.dim_order().data(), in.dim_order().size()),
is_contiguous_dim_order(in.dim_order().data(), in.dim_order().size()),
InvalidArgument,
ret_val);

Expand Down
4 changes: 2 additions & 2 deletions runtime/core/exec_aten/testing_util/tensor_factory.h
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ class TensorFactory {
* size of this vector must be equal to the product of the elements of
* `sizes`.
* @param[in] dim_order The dim order describing how tensor memory is laid
* out. If empty or not specificed, the function will use a default dim order
* out. If empty or not specificed, the function will use a contiguous dim order
* of {0, 1, 2, 3, ...}
*
* @return A new Tensor with the specified shape and data.
Expand Down Expand Up @@ -706,7 +706,7 @@ class TensorFactory {
* size of this vector must be equal to the product of the elements of
* `sizes`.
* @param[in] dim_order The dim order describing how tensor memory is laid
* out. If empty or not specificed, the function will use a default dim order
* out. If empty or not specificed, the function will use a contiguous dim order
* of {0, 1, 2, 3, ...}
*
* @return A new Tensor with the specified shape and data.
Expand Down
4 changes: 2 additions & 2 deletions runtime/core/exec_aten/util/dim_order_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,14 @@ bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) {
} // namespace

/**
* Check if a given dim_order array is equivalent to the default dim order of
* Check if a given dim_order array is equivalent to the contiguous dim order of
* {0, 1, 2, 3, ...}
*
* @param[in] dim_order pointer to dim_order array
* @param[in] dims length of the dim_order array
*/
template <typename DimOrderType>
inline bool is_default_dim_order(
inline bool is_contiguous_dim_order(
const DimOrderType* dim_order,
const size_t dims) {
for (int i = 0; i < dims; ++i) {
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/exec_aten/util/tensor_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@
#define ET_CHECK_DEFAULT_OR_CHANNELSLAST_DIMORDER(t__) \
({ \
ET_CHECK_MSG( \
is_default_dim_order( \
is_contiguous_dim_order( \
t__.dim_order().data(), t__.dim_order().size()) || \
is_channels_last_dim_order( \
t__.dim_order().data(), t__.dim_order().size()), \
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/exec_aten/util/tensor_util_aten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) {
get_dim_order(t, dim_order, t.dim()) == Error::Ok,
"Failed to retrieve dim order from tensor!");

bool ret_val = is_default_dim_order(dim_order, t.dim()) ||
bool ret_val = is_contiguous_dim_order(dim_order, t.dim()) ||
is_channels_last_dim_order(dim_order, t.dim());

if (!ret_val) {
Expand Down
2 changes: 1 addition & 1 deletion runtime/core/exec_aten/util/tensor_util_portable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ bool tensor_has_valid_dim_order(torch::executor::Tensor t) {

bool tensor_is_default_or_channels_last_dim_order(torch::executor::Tensor t) {
bool ret_val =
is_default_dim_order(t.dim_order().data(), t.dim_order().size()) ||
is_contiguous_dim_order(t.dim_order().data(), t.dim_order().size()) ||
is_channels_last_dim_order(t.dim_order().data(), t.dim_order().size());

if (!ret_val) {
Expand Down
10 changes: 5 additions & 5 deletions runtime/core/exec_aten/util/test/dim_order_util_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ TEST(TensorUtilTest, IsDefaultDimOrderTest) {
std::vector<exec_aten::DimOrderType> dim_order(i);
std::iota(dim_order.begin(), dim_order.end(), 0);

EXPECT_TRUE(torch::executor::is_default_dim_order(
EXPECT_TRUE(torch::executor::is_contiguous_dim_order(
dim_order.data(), dim_order.size()));

// As a bonus, check that is_channels_last returns false
Expand All @@ -252,7 +252,7 @@ TEST(TensorUtilTest, IsDefaultDimOrderFailCasesTest) {
std::iota(dim_order.begin(), dim_order.end(), 0);
std::swap(dim_order[0], dim_order[1]);

EXPECT_FALSE(torch::executor::is_default_dim_order(
EXPECT_FALSE(torch::executor::is_contiguous_dim_order(
dim_order.data(), dim_order.size()));
}

Expand All @@ -263,7 +263,7 @@ TEST(TensorUtilTest, IsDefaultDimOrderFailCasesTest) {
dim_order[d] = (d + 1) % i;
}

EXPECT_FALSE(torch::executor::is_default_dim_order(
EXPECT_FALSE(torch::executor::is_contiguous_dim_order(
dim_order.data(), dim_order.size()));
}
}
Expand All @@ -276,8 +276,8 @@ TEST(TensorUtilTest, IsChannelsLastDimOrderTest) {
EXPECT_TRUE(torch::executor::is_channels_last_dim_order(dim_order_5d, 5));

// As a bonus, check that is_default returns false
EXPECT_FALSE(torch::executor::is_default_dim_order(dim_order_4d, 4));
EXPECT_FALSE(torch::executor::is_default_dim_order(dim_order_5d, 5));
EXPECT_FALSE(torch::executor::is_contiguous_dim_order(dim_order_4d, 4));
EXPECT_FALSE(torch::executor::is_contiguous_dim_order(dim_order_5d, 5));
}

TEST(TensorUtilTest, IsChannelsLastDimOrderFailCasesTest) {
Expand Down

0 comments on commit 775d8b0

Please sign in to comment.