Skip to content

Commit

Permalink
s/exec_aten::/executorch::aten::/ for extension/**/*.h (#6032)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #6032

Migrate all extension headers to use the new aten namespace, so that they act as good examples for users. The .cpp code can migrate later.

Reviewed By: lucylq

Differential Revision: D64079593

fbshipit-source-id: 62164cd6ec3238e21e896813f185d0c1446ef527
  • Loading branch information
dbort authored and facebook-github-bot committed Oct 10, 2024
1 parent 192ca82 commit 69c2c76
Show file tree
Hide file tree
Showing 9 changed files with 219 additions and 204 deletions.
2 changes: 1 addition & 1 deletion extension/android/jni/jni_layer_constants.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ constexpr static int kTensorDTypeBits4x2 = 20;
constexpr static int kTensorDTypeBits8 = 21;
constexpr static int kTensorDTypeBits16 = 22;

using exec_aten::ScalarType;
using executorch::aten::ScalarType;

const std::unordered_map<ScalarType, int> scalar_type_to_java_dtype = {
{ScalarType::Byte, kTensorDTypeUInt8},
Expand Down
25 changes: 13 additions & 12 deletions extension/kernel_util/make_boxed_from_unboxed_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,12 @@ struct decay_if_not_tensor final {
using type = std::decay_t<T>;
};
template <>
struct decay_if_not_tensor<exec_aten::Tensor&> final {
using type = exec_aten::Tensor&;
struct decay_if_not_tensor<executorch::aten::Tensor&> final {
using type = executorch::aten::Tensor&;
};
template <>
struct decay_if_not_tensor<const exec_aten::Tensor&> final {
using type = const exec_aten::Tensor&;
struct decay_if_not_tensor<const executorch::aten::Tensor&> final {
using type = const executorch::aten::Tensor&;
};

template <class T>
Expand All @@ -82,29 +82,30 @@ struct evalue_to_arg final {
};

template <>
struct evalue_to_arg<exec_aten::Tensor&> final {
static exec_aten::Tensor& call(executorch::runtime::EValue& v) {
struct evalue_to_arg<executorch::aten::Tensor&> final {
static executorch::aten::Tensor& call(executorch::runtime::EValue& v) {
return v.toTensor();
}
};

template <>
struct evalue_to_arg<const exec_aten::Tensor&> final {
static const exec_aten::Tensor& call(executorch::runtime::EValue& v) {
struct evalue_to_arg<const executorch::aten::Tensor&> final {
static const executorch::aten::Tensor& call(executorch::runtime::EValue& v) {
return v.toTensor();
}
};

template <class T>
struct evalue_to_arg<exec_aten::optional<T>> final {
static exec_aten::optional<T> call(executorch::runtime::EValue& v) {
struct evalue_to_arg<executorch::aten::optional<T>> final {
static executorch::aten::optional<T> call(executorch::runtime::EValue& v) {
return v.toOptional<T>();
}
};

template <class T>
struct evalue_to_arg<exec_aten::ArrayRef<exec_aten::optional<T>>> final {
static exec_aten::ArrayRef<exec_aten::optional<T>> call(
struct evalue_to_arg<executorch::aten::ArrayRef<executorch::aten::optional<T>>>
final {
static executorch::aten::ArrayRef<executorch::aten::optional<T>> call(
executorch::runtime::EValue& v) {
return v.toListOptionalTensor();
}
Expand Down
2 changes: 1 addition & 1 deletion extension/llm/runner/image_prefiller.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class ImagePrefiller {
* It's passed as reference and will be updated inside this function.
* @return The next token of the LLM Module after prefill.
*/
virtual ::executorch::runtime::Result<exec_aten::Tensor> prefill(
virtual ::executorch::runtime::Result<executorch::aten::Tensor> prefill(
Image& image,
int64_t& start_pos) = 0;

Expand Down
5 changes: 3 additions & 2 deletions extension/llm/runner/text_decoder_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class TextDecoderRunner {
* Module.
* @return The output of the LLM Module. This will be a tensor of logits.
*/
virtual ::executorch::runtime::Result<exec_aten::Tensor> step(
virtual ::executorch::runtime::Result<executorch::aten::Tensor> step(
TensorPtr& input,
TensorPtr& start_pos);

Expand Down Expand Up @@ -66,7 +66,8 @@ class TextDecoderRunner {
* @param logits_tensor The logits tensor.
* @return The next token.
*/
inline int32_t logits_to_token(const exec_aten::Tensor& logits_tensor) {
inline int32_t logits_to_token(
const executorch::aten::Tensor& logits_tensor) {
int32_t result = 0;
ET_SWITCH_THREE_TYPES(
Float,
Expand Down
11 changes: 6 additions & 5 deletions extension/llm/runner/text_token_generator.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class TextTokenGenerator {
int64_t pos = start_pos; // position in the sequence

std::vector<uint64_t> token_data; // allocate space for the tokens
std::vector<exec_aten::SizesType> token_shape;
std::vector<executorch::aten::SizesType> token_shape;

// Token after prefill
uint64_t cur_token = tokens.back();
Expand All @@ -70,9 +70,10 @@ class TextTokenGenerator {
}

// initialize tensor wrappers
auto tokens_managed =
from_blob(token_data.data(), token_shape, exec_aten::ScalarType::Long);
auto start_pos_managed = from_blob(&pos, {1}, exec_aten::ScalarType::Long);
auto tokens_managed = from_blob(
token_data.data(), token_shape, executorch::aten::ScalarType::Long);
auto start_pos_managed =
from_blob(&pos, {1}, executorch::aten::ScalarType::Long);

should_stop_ = false;

Expand All @@ -83,7 +84,7 @@ class TextTokenGenerator {
text_decoder_runner_->step(tokens_managed, start_pos_managed);

ET_CHECK_OK_OR_RETURN_ERROR(logits_res.error());
exec_aten::Tensor& logits_tensor = logits_res.get();
executorch::aten::Tensor& logits_tensor = logits_res.get();

prev_token = cur_token;

Expand Down
112 changes: 60 additions & 52 deletions extension/tensor/tensor_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace extension {
/**
* A smart pointer type for managing the lifecycle of a Tensor.
*/
using TensorPtr = std::shared_ptr<exec_aten::Tensor>;
using TensorPtr = std::shared_ptr<executorch::aten::Tensor>;

/**
* Creates a TensorPtr that manages a Tensor with the specified properties.
Expand All @@ -39,13 +39,14 @@ using TensorPtr = std::shared_ptr<exec_aten::Tensor>;
* @return A TensorPtr that manages the newly created Tensor.
*/
TensorPtr make_tensor_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
void* data,
std::vector<exec_aten::DimOrderType> dim_order,
std::vector<exec_aten::StridesType> strides,
const exec_aten::ScalarType type = exec_aten::ScalarType::Float,
const exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND,
std::vector<executorch::aten::DimOrderType> dim_order,
std::vector<executorch::aten::StridesType> strides,
const executorch::aten::ScalarType type =
executorch::aten::ScalarType::Float,
const executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
std::function<void(void*)> deleter = nullptr);

/**
Expand All @@ -61,11 +62,12 @@ TensorPtr make_tensor_ptr(
* @return A TensorPtr that manages the newly created Tensor.
*/
inline TensorPtr make_tensor_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
void* data,
const exec_aten::ScalarType type = exec_aten::ScalarType::Float,
const exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND,
const executorch::aten::ScalarType type =
executorch::aten::ScalarType::Float,
const executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND,
std::function<void(void*)> deleter = nullptr) {
return make_tensor_ptr(
std::move(sizes), data, {}, {}, type, dynamism, std::move(deleter));
Expand Down Expand Up @@ -93,15 +95,16 @@ inline TensorPtr make_tensor_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
inline TensorPtr make_tensor_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::vector<T> data,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::DimOrderType> dim_order = {},
std::vector<executorch::aten::StridesType> strides = {},
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
if (type != deduced_type) {
ET_CHECK_MSG(
runtime::canCast(deduced_type, type),
Expand Down Expand Up @@ -157,13 +160,15 @@ inline TensorPtr make_tensor_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
inline TensorPtr make_tensor_ptr(
std::vector<T> data,
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<exec_aten::SizesType> sizes{exec_aten::SizesType(data.size())};
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::SizesType> sizes{
executorch::aten::SizesType(data.size())};
return make_tensor_ptr(
std::move(sizes), std::move(data), {0}, {1}, type, dynamism);
}
Expand Down Expand Up @@ -192,15 +197,16 @@ inline TensorPtr make_tensor_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
inline TensorPtr make_tensor_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::initializer_list<T> list,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::DimOrderType> dim_order = {},
std::vector<executorch::aten::StridesType> strides = {},
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
return make_tensor_ptr(
std::move(sizes),
std::vector<T>(std::move(list)),
Expand Down Expand Up @@ -231,13 +237,15 @@ inline TensorPtr make_tensor_ptr(
*/
template <
typename T = float,
exec_aten::ScalarType deduced_type = runtime::CppTypeToScalarType<T>::value>
executorch::aten::ScalarType deduced_type =
runtime::CppTypeToScalarType<T>::value>
inline TensorPtr make_tensor_ptr(
std::initializer_list<T> list,
exec_aten::ScalarType type = deduced_type,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<exec_aten::SizesType> sizes{exec_aten::SizesType(list.size())};
executorch::aten::ScalarType type = deduced_type,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
std::vector<executorch::aten::SizesType> sizes{
executorch::aten::SizesType(list.size())};
return make_tensor_ptr(
std::move(sizes), std::move(list), {0}, {1}, type, dynamism);
}
Expand Down Expand Up @@ -270,13 +278,13 @@ inline TensorPtr make_tensor_ptr(T value) {
* @return A TensorPtr managing the newly created Tensor.
*/
TensorPtr make_tensor_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::vector<uint8_t> data,
std::vector<exec_aten::DimOrderType> dim_order,
std::vector<exec_aten::StridesType> strides,
exec_aten::ScalarType type = exec_aten::ScalarType::Float,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND);
std::vector<executorch::aten::DimOrderType> dim_order,
std::vector<executorch::aten::StridesType> strides,
executorch::aten::ScalarType type = executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND);

/**
* Creates a TensorPtr that manages a Tensor with the specified properties.
Expand All @@ -292,11 +300,11 @@ TensorPtr make_tensor_ptr(
* @return A TensorPtr managing the newly created Tensor.
*/
inline TensorPtr make_tensor_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<executorch::aten::SizesType> sizes,
std::vector<uint8_t> data,
exec_aten::ScalarType type = exec_aten::ScalarType::Float,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::DYNAMIC_BOUND) {
executorch::aten::ScalarType type = executorch::aten::ScalarType::Float,
executorch::aten::TensorShapeDynamism dynamism =
executorch::aten::TensorShapeDynamism::DYNAMIC_BOUND) {
return make_tensor_ptr(
std::move(sizes), std::move(data), {}, {}, type, dynamism);
}
Expand All @@ -309,21 +317,21 @@ inline TensorPtr make_tensor_ptr(
* @return A new TensorPtr managing a Tensor with the same properties as the
* original.
*/
inline TensorPtr make_tensor_ptr(const exec_aten::Tensor& tensor) {
inline TensorPtr make_tensor_ptr(const executorch::aten::Tensor& tensor) {
return make_tensor_ptr(
std::vector<exec_aten::SizesType>(
std::vector<executorch::aten::SizesType>(
tensor.sizes().begin(), tensor.sizes().end()),
tensor.mutable_data_ptr(),
#ifndef USE_ATEN_LIB
std::vector<exec_aten::DimOrderType>(
std::vector<executorch::aten::DimOrderType>(
tensor.dim_order().begin(), tensor.dim_order().end()),
std::vector<exec_aten::StridesType>(
std::vector<executorch::aten::StridesType>(
tensor.strides().begin(), tensor.strides().end()),
tensor.scalar_type(),
tensor.shape_dynamism()
#else // USE_ATEN_LIB
{},
std::vector<exec_aten::StridesType>(
std::vector<executorch::aten::StridesType>(
tensor.strides().begin(), tensor.strides().end()),
tensor.scalar_type()
#endif // USE_ATEN_LIB
Expand All @@ -339,7 +347,7 @@ inline TensorPtr make_tensor_ptr(const exec_aten::Tensor& tensor) {
* @return A new TensorPtr that manages a Tensor with the same properties as the
* original but with copied data.
*/
TensorPtr clone_tensor_ptr(const exec_aten::Tensor& tensor);
TensorPtr clone_tensor_ptr(const executorch::aten::Tensor& tensor);

/**
* Creates a new TensorPtr by cloning the given TensorPtr, copying the
Expand All @@ -363,7 +371,7 @@ inline TensorPtr clone_tensor_ptr(const TensorPtr& tensor) {
ET_NODISCARD
runtime::Error resize_tensor_ptr(
TensorPtr& tensor,
const std::vector<exec_aten::SizesType>& sizes);
const std::vector<executorch::aten::SizesType>& sizes);

} // namespace extension
} // namespace executorch
Loading

0 comments on commit 69c2c76

Please sign in to comment.