Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[IntelFPGA] Init support for Intel FPGA #5742

Merged
merged 28 commits into from
Mar 31, 2021
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c5b3c1d
test=develop
xbeu Mar 17, 2021
99756c6
test=develop
xbeu Mar 17, 2021
680a348
test=develop
xbeu Mar 17, 2021
a14cbfe
test=develop
xbeu Mar 18, 2021
4c5cd31
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 18, 2021
db4d225
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 18, 2021
9f0def8
test=develop
xbeu Mar 18, 2021
a9685c4
test=develop
xbeu Mar 22, 2021
ec249f0
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 22, 2021
4c3196e
test=develop
xbeu Mar 23, 2021
e5400c9
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 23, 2021
1428da3
test=develop
xbeu Mar 24, 2021
4f11189
test=develop
xbeu Mar 24, 2021
91ea17d
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 24, 2021
d716f59
test=develop
xbeu Mar 24, 2021
51533df
test=develop
xbeu Mar 24, 2021
aaf4dfd
test=develop
xbeu Mar 24, 2021
ebc91d0
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 24, 2021
063e302
test=develop
xbeu Mar 25, 2021
71568a2
test=develop
xbeu Mar 25, 2021
de84566
test=develop
xbeu Mar 26, 2021
9327d84
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 26, 2021
e39781d
test=develop
xbeu Mar 29, 2021
f5a0046
test=develop
xbeu Mar 29, 2021
a6da448
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 29, 2021
c22d58b
test=develop
xbeu Mar 30, 2021
e80d96c
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle-Lite…
xbeu Mar 30, 2021
7097964
Update intel_fpga.md
xbeu Mar 30, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ lite_option(LITE_WITH_TRAIN "Enable training operators and kernels in lite"
lite_option(LITE_WITH_OPENMP "Enable OpenMP in lite framework" ON)
lite_option(LITE_WITH_OPENCL "Enable OpenCL support in lite" OFF)
lite_option(LITE_WITH_FPGA "Enable FPGA support in lite" OFF)
lite_option(LITE_WITH_INTEL_FPGA "Enable Intel FPGA support in lite" OFF)
lite_option(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK "Enable light-weight framework" OFF)
lite_option(LITE_WITH_PROFILE "Enable profile mode in lite framework" OFF)
lite_option(LITE_WITH_PRECISION_PROFILE "Enable precision profile in profile mode ON in lite" OFF)
Expand Down
4 changes: 4 additions & 0 deletions cmake/configure.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,10 @@ if (LITE_WITH_FPGA)
add_definitions("-DLITE_WITH_FPGA")
endif()

if (LITE_WITH_INTEL_FPGA)
add_definitions("-DLITE_WITH_INTEL_FPGA")
endif()

if (LITE_WITH_BM)
add_definitions("-DLITE_WITH_BM")
endif()
Expand Down
33 changes: 27 additions & 6 deletions cmake/lite.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ endfunction()
function (lite_deps TARGET)
set(options "")
set(oneValueArgs "")
set(multiValueArgs DEPS X86_DEPS CUDA_DEPS ARM_DEPS PROFILE_DEPS LIGHT_DEPS HVY_DEPS CL_DEPS FPGA_DEPS BM_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS IMAGINATION_NNA_DEPS APU_DEPS CV_DEPS ARGS)
set(multiValueArgs DEPS X86_DEPS CUDA_DEPS ARM_DEPS PROFILE_DEPS LIGHT_DEPS HVY_DEPS CL_DEPS FPGA_DEPS INTEL_FPGA_DEPS BM_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS IMAGINATION_NNA_DEPS APU_DEPS CV_DEPS ARGS)
cmake_parse_arguments(lite_deps "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})

set(deps ${lite_deps_DEPS})
Expand Down Expand Up @@ -81,6 +81,12 @@ function (lite_deps TARGET)
set(deps ${deps} ${var})
endforeach(var)
endif()

if (LITE_WITH_INTEL_FPGA)
foreach(var ${lite_deps_INTEL_FPGA_DEPS})
set(deps ${deps} ${var})
endforeach(var)
endif()

if (LITE_WITH_NPU)
foreach(var ${lite_deps_NPU_DEPS})
Expand Down Expand Up @@ -155,7 +161,7 @@ file(WRITE ${offline_lib_registry_file} "") # clean
function(lite_cc_library TARGET)
set(options SHARED shared STATIC static MODULE module)
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS CV_DEPS PROFILE_DEPS LIGHT_DEPS
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS INTEL_FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS CV_DEPS PROFILE_DEPS LIGHT_DEPS
HVY_DEPS EXCLUDE_COMPILE_DEPS ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})

Expand All @@ -171,6 +177,7 @@ function(lite_cc_library TARGET)
ARM_DEPS ${args_ARM_DEPS}
CV_DEPS ${args_CV_DEPS}
FPGA_DEPS ${args_FPGA_DEPS}
INTEL_FPGA_DEPS ${args_INTEL_FPGA_DEPS}
NPU_DEPS ${args_NPU_DEPS}
APU_DEPS ${args_APU_DEPS}
XPU_DEPS ${args_XPU_DEPS}
Expand Down Expand Up @@ -207,7 +214,7 @@ function(lite_cc_binary TARGET)
set(options " -g ")
endif()
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS INTEL_FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS CV_DEPS ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})

Expand All @@ -219,6 +226,7 @@ function(lite_cc_binary TARGET)
CL_DEPS ${args_CL_DEPS}
ARM_DEPS ${args_ARM_DEPS}
FPGA_DEPS ${args_FPGA_DEPS}
INTEL_FPGA_DEPS ${args_INTEL_FPGA_DEPS}
NPU_DEPS ${args_NPU_DEPS}
APU_DEPS ${args_APU_DEPS}
XPU_DEPS ${args_XPU_DEPS}
Expand Down Expand Up @@ -262,7 +270,7 @@ function(lite_cc_test TARGET)
endif()
set(options "")
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS INTEL_FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS CV_DEPS
ARGS
COMPILE_LEVEL # (basic|extra)
Expand All @@ -282,6 +290,7 @@ function(lite_cc_test TARGET)
CL_DEPS ${args_CL_DEPS}
ARM_DEPS ${args_ARM_DEPS}
FPGA_DEPS ${args_FPGA_DEPS}
INTEL_FPGA_DEPS ${args_INTEL_FPGA_DEPS}
NPU_DEPS ${args_NPU_DEPS}
APU_DEPS ${args_APU_DEPS}
XPU_DEPS ${args_XPU_DEPS}
Expand Down Expand Up @@ -318,6 +327,7 @@ set(arm_kernels CACHE INTERNAL "arm kernels")
set(x86_kernels CACHE INTERNAL "x86 kernels")
set(cuda_kernels CACHE INTERNAL "cuda kernels")
set(fpga_kernels CACHE INTERNAL "fpga kernels")
set(intel_fpga_kernels CACHE INTERNAL "intel_fpga kernels")
set(npu_kernels CACHE INTERNAL "npu kernels")
set(apu_kernels CACHE INTERNAL "apu kernels")
set(xpu_kernels CACHE INTERNAL "xpu kernels")
Expand Down Expand Up @@ -346,7 +356,7 @@ endif()
function(add_kernel TARGET device level)
set(options "")
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS INTEL_FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS RKNPU_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS
ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
Expand Down Expand Up @@ -431,6 +441,15 @@ function(add_kernel TARGET device level)
endif()
set(fpga_kernels "${fpga_kernels};${TARGET}" CACHE INTERNAL "")
endif()
if ("${device}" STREQUAL "INTEL_FPGA")
if (NOT LITE_WITH_INTEL_FPGA)
foreach(src ${args_SRCS})
file(APPEND ${fake_kernels_src_list} "${CMAKE_CURRENT_SOURCE_DIR}/${src}\n")
endforeach()
return()
endif()
set(intel_fpga_kernels "${intel_fpga_kernels};${TARGET}" CACHE INTERNAL "")
endif()
if ("${device}" STREQUAL "BM")
if (NOT LITE_WITH_BM)
foreach(src ${args_SRCS})
Expand Down Expand Up @@ -514,6 +533,7 @@ function(add_kernel TARGET device level)
CL_DEPS ${args_CL_DEPS}
ARM_DEPS ${args_ARM_DEPS}
FPGA_DEPS ${args_FPGA_DEPS}
INTEL_FPGA_DEPS ${args_INTEL_FPGA_DEPS}
NPU_DEPS ${args_NPU_DEPS}
APU_DEPS ${args_APU_DEPS}
XPU_DEPS ${args_XPU_DEPS}
Expand All @@ -540,7 +560,7 @@ endif()
function(add_operator TARGET level)
set(options "")
set(oneValueArgs "")
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
set(multiValueArgs SRCS DEPS X86_DEPS CUDA_DEPS CL_DEPS ARM_DEPS FPGA_DEPS INTEL_FPGA_DEPS BM_DEPS IMAGINATION_NNA_DEPS NPU_DEPS XPU_DEPS MLU_DEPS HUAWEI_ASCEND_NPU_DEPS APU_DEPS PROFILE_DEPS
LIGHT_DEPS HVY_DEPS EXCLUDE_COMPILE_DEPS
ARGS)
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
Expand Down Expand Up @@ -572,6 +592,7 @@ function(add_operator TARGET level)
CL_DEPS ${args_CL_DEPS}
ARM_DEPS ${args_ARM_DEPS}
FPGA_DEPS ${args_FPGA_DEPS}
INTEL_FPGA_DEPS ${args_INTEL_FPGA_DEPS}
NPU_DEPS ${args_NPU_DEPS}
APU_DEPS ${args_APU_DEPS}
XPU_DEPS ${args_XPU_DEPS}
Expand Down
107 changes: 107 additions & 0 deletions docs/demo_guides/intel_fpga.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
# PaddleLite使用IntelFPGA预测部署

Paddle Lite支持基于arm的IntelFPGA C5的模型预测,提供armv7hf的交叉编译

PaddleLite通过调用底层驱动实现对FPGA硬件的调度,以及对应的API接口。

## Lite实现IntelFPGA简介

Lite支持IntelFPGA作为后端硬件进行模型推理,其主要特性如下:

- Lite中IntelFPGA的kernel均以FP32、NCHW的格式作为输入输出格式

- 对于IntelFPGA暂不支持的kernel,均会切回ARM端运行,实现ARM+FPGA混合布署运行

## 支持芯片
- [Cyclone V](https://www.intel.cn/content/dam/altera-www/global/en_US/pdfs/literature/hb/cyclone-v/cv_51002.pdf)

### 已支持(或部分支持)的Paddle算子
hong19860320 marked this conversation as resolved.
Show resolved Hide resolved

- relu/relu6/leakyrelu
- conv2d
- depthwise_conv2d

### 已支持的Paddle模型

- [SSD_MobileNet_V1](https://paddlemodels.bj.bcebos.com/object_detection/ssd_mobilenet_v1_coco_pretrained.tar)

## 编译

需要提前准备带有intelfpgadrv.ko的IntelFPGA开发板C5MB/C5TB和Lite代码

CMAKE编译选项:

- 设置`LITE_WITH_INTEL_FPGA=ON`和`LITE_WITH_ARM=ON`

其他编译选项与ARM编译相同,可以参考[“Paddle Lite在Docker下的ARM编译”](../source_compile/compile_linux)。

示例如下:
```shell
cmake .. \
-DWITH_GPU=OFF \
-DWITH_MKL=OFF \
-DWITH_LITE=ON \
-DLITE_WITH_CUDA=OFF \
-DLITE_WITH_X86=OFF \
-DLITE_WITH_ARM=ON \
-DLITE_WITH_OPENMP=ON \
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
-DWITH_TESTING=OFF \
-DLITE_WITH_INTEL_FPGA=ON \
-DARM_TARGET_OS=armlinux
make publish_inference -j2
```
Lite提供IntelFPGA编译脚本,位于lite/tools/build_intel_fpga.sh full_publish,在Lite根目录执行该脚本即可编译

## 运行示例

- **运行文件准备**

下面以SSD模型为例,介绍如何使用C5MB/C5TB开发板实现模型运行

```bash
#打开串口调试工具,如Putty或SecureCRT,选择对应的调试串口,并设置串口属性,
#波特率:115200,数据位:8,停止位:1,奇偶校验:无[主机上执行]
#上电C5MB开发板,并在串口调试工具中登录
awcloud login: root
Password: #密码:Awcloud@123
#进入/opt目录[开发板执行]
cd /opt
#在运行模型前需要加载FPGA驱动[开发板执行]
insmod driver/intelfpgadrv.ko
```

- **使用IntelFPGA进行模型预测**

```bash
#以下命令均在开发板上运行,在开发板上已经部署了对应的输入图片,模型,驱动程序,执行程序等
#运行SSD测试程序,输入图片为/opt/images/dog.jpg,输出图片为/opt/dog_result.jpg
./run_ssd.sh
```

## 如何在Code中使用

在Lite中使用IntelFPGA与ARM相似,具体的区别如下:

- 由于IntelFPGA运行模式为FP32精度、NCHW布局,所以需要修改相应的`valid_place`

代码示例:
```cpp
lite::Predictor predictor;
std::vector<Place> valid_places(
{Place{TARGET(kIntelFPGA), PRECISION(kFloat), DATALAYOUT(kNCHW)},Place{TARGET(kARM)});

predictor.Build(model_dir, "", "", valid_places);

auto* input_tensor = predictor.GetInput(0);
input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
auto* data = input_tensor->mutable_data<float>();
auto item_size = input_tensor->dims().production();
//假设设置输入数据全为1
for (int i = 0; i < item_size; i++) {
data[i] = 1;
}

predictor.Run();
auto* out = predictor.GetOutput(0);
```
22 changes: 20 additions & 2 deletions lite/api/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,10 @@ if(LITE_WITH_FPGA)
set(light_api_deps ${light_api_deps} ${fpga_deps})
set(cxx_api_deps ${cxx_api_deps} ${fpga_deps})
endif()

if(LITE_WITH_INTEL_FPGA)
set(light_api_deps ${light_api_deps} ${intel_fpga_deps})
set(cxx_api_deps ${cxx_api_deps} ${intel_fpga_deps})
endif()
if(LITE_WITH_BM)
set(light_api_deps ${light_api_deps} ${bm_deps})
set(cxx_api_deps ${cxx_api_deps} ${bm_deps})
Expand Down Expand Up @@ -209,6 +212,7 @@ list(LENGTH apu_kernels num_apu_kernels)
list(LENGTH xpu_kernels num_xpu_kernels)
list(LENGTH rknpu_kernels num_rknpu_kernels)
list(LENGTH fpga_kernels num_fpga_kernels)
list(LENGTH intel_fpga_kernels num_intel_fpga_kernels)
list(LENGTH bm_kernels num_bm_kernels)
list(LENGTH mlu_kernels num_mlu_kernels)
list(LENGTH huawei_ascend_npu_kernels num_huawei_ascend_npu_kernels)
Expand All @@ -225,6 +229,7 @@ message(STATUS "Collected ${num_apu_kernels} APU kernels")
message(STATUS "Collected ${num_xpu_kernels} XPU kernels")
message(STATUS "Collected ${num_rknpu_kernels} RKNPU kernels")
message(STATUS "Collected ${num_fpga_kernels} FPGA kernels")
message(STATUS "Collected ${num_intel_fpga_kernels} INTEL_FPGA kernels")
message(STATUS "Collected ${num_bm_kernels} BM kernels")
message(STATUS "Collected ${num_mlu_kernels} MLU kernels")
message(STATUS "Collected ${num_huawei_ascend_npu_kernels} HUAWEI_ASCEND_NPU kernels")
Expand All @@ -249,6 +254,7 @@ if (NOT LITE_ON_TINY_PUBLISH)
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
endif()

Expand All @@ -272,6 +278,7 @@ lite_cc_library(light_api SRCS light_api.cc
RKNPU_DEPS ${rknpu_kernels}
CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
BM_DEPS ${bm_kernels}
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
MLU_DEPS ${mlu_kernels}
Expand All @@ -296,6 +303,7 @@ if(WITH_TESTING)
RKNPU_DEPS ${rknpu_kernels}
CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
BM_DEPS ${bm_kernels}
MLU_DEPS ${mlu_kernels}
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
Expand Down Expand Up @@ -352,7 +360,7 @@ if(WITH_TESTING)
endif()

if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND WITH_TESTING)
set(lite_model_test_DEPS cxx_api mir_passes ${ops} ${host_kernels} ${arm_kernels} ${npu_kernels} ${apu_kernels} ${fpga_kernels})
set(lite_model_test_DEPS cxx_api mir_passes ${ops} ${host_kernels} ${arm_kernels} ${npu_kernels} ${apu_kernels} ${fpga_kernels} ${intel_fpga_kernels})

lite_cc_test(test_mobilenetv1_int8 SRCS mobilenetv1_int8_test.cc
DEPS ${lite_model_test_DEPS}
Expand Down Expand Up @@ -451,6 +459,7 @@ if (NOT LITE_ON_TINY_PUBLISH)
APU_DEPS ${apu_kernels}
CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
BM_DEPS ${bm_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
Expand All @@ -470,6 +479,7 @@ if(NOT WITH_COVERAGE)
DEPS light_api program mir_passes paddle_api_light
CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
RKNPU_DEPS ${rknpu_kernels}
BM_DEPS ${bm_kernels}
ARGS --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL)
Expand All @@ -480,6 +490,7 @@ if(NOT WITH_COVERAGE)
X86_DEPS ${x86_kernels}
XPU_DEPS ${xpu_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
RKNPU_DEPS ${rknpu_kernels}
BM_DEPS ${bm_kernels}
MLU_DEPS ${mlu_kernels}
Expand Down Expand Up @@ -524,6 +535,7 @@ if(NOT WITH_COVERAGE)
CL_DEPS ${opencl_kernels}
X86_DEPS ${x86_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
BM_DEPS ${bm_kernels}
MLU_DEPS ${mlu_kernels}
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
Expand All @@ -549,6 +561,7 @@ if(NOT IOS)
RKNPU_DEPS ${rknpu_kernels}
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
Expand All @@ -566,6 +579,7 @@ if(NOT IOS)
RKNPU_DEPS ${rknpu_kernels}
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
Expand All @@ -583,6 +597,7 @@ if(NOT IOS)
RKNPU_DEPS ${rknpu_kernels}
IMAGINATION_NNA_DEPS ${imagination_nna_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
Expand All @@ -599,6 +614,7 @@ if(NOT IOS)
APU_DEPS ${apu_kernels}
CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
Expand All @@ -617,6 +633,7 @@ if(NOT IOS)
CL_DEPS ${opencl_kernels}
BM_DEPS ${bm_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels})

Expand All @@ -631,6 +648,7 @@ if(NOT IOS)
APU_DEPS ${apu_kernels}
CL_DEPS ${opencl_kernels}
FPGA_DEPS ${fpga_kernels}
INTEL_FPGA_DEPS ${intel_fpga_kernels}
X86_DEPS ${x86_kernels}
CUDA_DEPS ${cuda_kernels}
HUAWEI_ASCEND_NPU_DEPS ${huawei_ascend_npu_kernels})
Expand Down
Loading