Skip to content

Commit

Permalink
Fix the build of examples/xtensa (#2642)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #2642

Fixes to the aot side and the build side following the new docs.

bypass-github-pytorch-ci-checks

Reviewed By: tarun292, cccclai

Differential Revision: D55300249

fbshipit-source-id: e3f4b456ed4fada037fc9c0f074601a043240872
  • Loading branch information
mcremon-meta authored and facebook-github-bot committed Mar 25, 2024
1 parent 02050de commit 551aaf0
Show file tree
Hide file tree
Showing 7 changed files with 42 additions and 18 deletions.
9 changes: 8 additions & 1 deletion docs/source/build-run-xtensa.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,15 +136,22 @@ cd executorch
rm -rf cmake-out
# prebuild and install executorch library
cmake -DBUCK2=buck2 \
-DCMAKE_TOOLCHAIN_FILE=<path_to_executorch>/examples/xtensa/xtensa.cmake \
-DCMAKE_INSTALL_PREFIX=cmake-out \
-DCMAKE_BUILD_TYPE=Debug \
-DEXECUTORCH_BUILD_HOST_TARGETS=ON \
-DEXECUTORCH_BUILD_EXECUTOR_RUNNER=OFF \
-DEXECUTORCH_BUILD_FLATC=OFF \
-DFLATC_EXECUTABLE="$(which flatc)" \
-DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \
-DPYTHON_EXECUTABLE=python3 \
-Bcmake-out .

cmake --build cmake-out -j8 --target install --config Debug
# build xtensa runner
cmake -DCMAKE_BUILD_TYPE=Debug \
-DCMAKE_TOOLCHAIN_FILE=../examples/xtensa/xtensa.cmake \
-DCMAKE_TOOLCHAIN_FILE=<path_to_executorch>/examples/xtensa/xtensa.cmake \
-DCMAKE_PREFIX_PATH=<path_to_executorch>/cmake-out \
-DMODEL_PATH=<path_to_program_file_generated_in_previous_step> \
-DNXP_SDK_ROOT_DIR=<path_to_nxp_sdk_root> -DEXECUTORCH_BUILD_FLATC=0 \
-DFLATC_EXECUTABLE="$(which flatc)" \
Expand Down
17 changes: 15 additions & 2 deletions examples/xtensa/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
endif()

if(NOT PYTHON_EXECUTABLE)
set(PYTHON_EXECUTABLE python3)
endif()

# Set the project name.
project(xtensa_executorch_example)

Expand All @@ -19,6 +23,9 @@ if(NOT EXECUTORCH_ROOT)
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
endif()

# Let files say "include <executorch/path/to/header.h>".
set(_common_include_directories ${EXECUTORCH_ROOT}/..)

# Find prebuilt executorch lib
find_package(executorch CONFIG REQUIRED)

Expand Down Expand Up @@ -74,6 +81,10 @@ target_include_directories(
${NXP_SDK_ROOT_DIR}/devices/MIMXRT685S/utilities/str
${NXP_SDK_ROOT_DIR}/boards/evkmimxrt685/dsp_examples/mu_polling/dsp)

add_library(extension_runner_util STATIC IMPORTED)
set_property(TARGET extension_runner_util PROPERTY IMPORTED_LOCATION
"${CMAKE_CURRENT_LIST_DIR}/../../cmake-out/extension/runner_util/libextension_runner_util.a")

add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ops)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/kernels)

Expand All @@ -90,13 +101,15 @@ add_custom_target(gen_model_header DEPENDS ${CMAKE_BINARY_DIR}/model_pte.h)
add_executable(xtensa_executorch_example executor_runner.cpp)
add_dependencies(xtensa_executorch_example gen_model_header)

# lint_cmake: -linelength
target_include_directories(xtensa_executorch_example PUBLIC ${ROOT_DIR}/..
${CMAKE_BINARY_DIR})
${CMAKE_BINARY_DIR}
${_common_include_directories})

target_link_options(xtensa_executorch_example PRIVATE
-mlsp=${NXP_SDK_ROOT_DIR}/devices/MIMXRT685S/xtensa/min-rt)
target_link_libraries(xtensa_executorch_example dsp_mu_polling_libs
xtensa_ops_lib executorch)
xtensa_ops_lib extension_runner_util executorch)

add_custom_command(
TARGET xtensa_executorch_example
Expand Down
9 changes: 5 additions & 4 deletions examples/xtensa/aot/export_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from .meta_registrations import * # noqa

import torch
from executorch.exir import EdgeCompileConfig, ExecutorchBackendConfig
from executorch.exir import EdgeCompileConfig
from torch._export import capture_pre_autograd_graph
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e

Expand Down Expand Up @@ -77,14 +77,15 @@ def forward(self, x: torch.Tensor):
export_to_edge(
converted_model_exp,
example_inputs,
EdgeCompileConfig(
edge_compile_config=EdgeCompileConfig(
_check_ir_validity=False,
),
)
.transform(
[ReplacePT2QuantWithXtensaQuant(), ReplacePT2DequantWithXtensaDequant()]
[ReplacePT2QuantWithXtensaQuant(), ReplacePT2DequantWithXtensaDequant()],
check_ir_validity=False,
)
.to_executorch(config=ExecutorchBackendConfig(extract_constant_segment=False))
.to_executorch()
)

logging.info(f"Final exported graph:\n{exec_prog.exported_program().graph}")
Expand Down
4 changes: 2 additions & 2 deletions examples/xtensa/executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@
#include <vector>

#include <executorch/extension/data_loader/buffer_data_loader.h>
#include <executorch/extension/runner_util/inputs.h>
#include <executorch/runtime/executor/method.h>
#include <executorch/runtime/executor/program.h>
#include <executorch/runtime/platform/log.h>
#include <executorch/runtime/platform/profiler.h>
#include <executorch/runtime/platform/runtime.h>
#include <executorch/util/util.h>

static uint8_t method_allocator_pool[18 * 1024U]; // 4 MB

Expand Down Expand Up @@ -177,7 +177,7 @@ int main(int argc, char** argv) {
}

ET_LOG(Info, "Method loaded.");
torch::executor::util::PrepareInputTensors(*method);
torch::executor::util::prepare_input_tensors(*method);
ET_LOG(Info, "Starting the model execution...");

Error status = method->execute();
Expand Down
11 changes: 11 additions & 0 deletions examples/xtensa/ops/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,21 @@ set(_aten_ops__srcs
add_library(aten_ops_xtensa ${_aten_ops__srcs})
target_link_libraries(aten_ops_xtensa PUBLIC executorch)

# Let files say "include <executorch/path/to/header.h>".
set(_common_include_directories ${EXECUTORCH_ROOT}/..)

target_include_directories(aten_ops_xtensa PUBLIC ${ROOT_DIR}/..
${CMAKE_BINARY_DIR}
${_common_include_directories})

# Custom ops that are needed to run the test model.
add_library(
custom_ops "quantized_linear_out.cpp" "quantize_per_tensor.cpp"
"dequantize_per_tensor.cpp")
target_include_directories(custom_ops PUBLIC ${ROOT_DIR}/..
${CMAKE_BINARY_DIR}
${_common_include_directories})

target_link_libraries(custom_ops PUBLIC executorch)
target_link_libraries(custom_ops PRIVATE xtensa_kernels)

Expand Down
8 changes: 0 additions & 8 deletions examples/xtensa/ops/dequantize_per_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,7 @@ void dequantize_per_tensor_out(
int64_t quant_min,
int64_t quant_max,
ScalarType dtype,
exec_aten::optional<ScalarType>& out_dtype,
Tensor& out) {
if (out_dtype.has_value()) {
ET_CHECK_MSG(
out_dtype.value() == ScalarType::Float,
"Expected out dtype to be Float but got %hhd",
out_dtype.value());
}

float* out_data = out.mutable_data_ptr<float>();
size_t numel = out.numel();

Expand Down
2 changes: 1 addition & 1 deletion examples/xtensa/ops/functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
- arg_meta: null
kernel_name: impl::HiFi::quantized_linear_pt2_out

- func: xtensa::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None, Tensor(a!) out) -> Tensor(a!)
- func: xtensa::dequantize_per_tensor.out(Tensor input, float scale, int zero_point, int quant_min, int quant_max, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)
variants: function
kernels:
- arg_meta: null
Expand Down

0 comments on commit 551aaf0

Please sign in to comment.