diff --git a/.github/workflows/aarch64-linux-gnu-shared.yaml b/.github/workflows/aarch64-linux-gnu-shared.yaml index 1f548e237..c347e56b9 100644 --- a/.github/workflows/aarch64-linux-gnu-shared.yaml +++ b/.github/workflows/aarch64-linux-gnu-shared.yaml @@ -34,12 +34,20 @@ concurrency: jobs: aarch64_linux_gnu_shared: runs-on: ${{ matrix.os }} - name: aarch64 shared GPU ${{ matrix.gpu }} + name: aarch64 shared GPU ${{ matrix.gpu }} ${{ matrix.onnxruntime_version }} strategy: fail-fast: false matrix: - os: [ubuntu-latest] - gpu: [ON, OFF] + include: + - os: ubuntu-latest + gpu: ON + onnxruntime_version: "1.11.0" + - os: ubuntu-latest + gpu: ON + onnxruntime_version: "1.16.0" + - os: ubuntu-latest + gpu: OFF + onnxruntime_version: "" steps: - uses: actions/checkout@v4 @@ -62,7 +70,7 @@ jobs: if: steps.cache-qemu.outputs.cache-hit != 'true' run: | sudo apt-get update - sudo apt-get install autoconf automake autotools-dev ninja-build + sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev. - name: checkout-qemu if: steps.cache-qemu.outputs.cache-hit != 'true' @@ -159,6 +167,7 @@ jobs: export BUILD_SHARED_LIBS=ON export SHERPA_ONNX_ENABLE_GPU=${{ matrix.gpu }} + export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=${{ matrix.onnxruntime_version }} ./build-aarch64-linux-gnu.sh @@ -199,7 +208,7 @@ jobs: if [[ ${{ matrix.gpu }} == OFF ]]; then dst=${dst}-cpu else - dst=${dst}-gpu + dst=${dst}-gpu-onnxruntime-${{ matrix.onnxruntime_version }} fi mkdir $dst @@ -223,7 +232,7 @@ jobs: - uses: actions/upload-artifact@v4 with: - name: sherpa-onnx-linux-aarch64-shared-gpu-${{ matrix.gpu }} + name: sherpa-onnx-linux-aarch64-shared-gpu-${{ matrix.gpu }}-onnxruntime-${{ matrix.onnxruntime_version }} path: sherpa-onnx-*linux-aarch64-shared*.tar.bz2 # https://huggingface.co/docs/hub/spaces-github-actions diff --git a/.github/workflows/aarch64-linux-gnu-static.yaml b/.github/workflows/aarch64-linux-gnu-static.yaml index 6cbfc0e27..749611e56 100644 --- a/.github/workflows/aarch64-linux-gnu-static.yaml +++ b/.github/workflows/aarch64-linux-gnu-static.yaml @@ -61,7 +61,7 @@ jobs: if: steps.cache-qemu.outputs.cache-hit != 'true' run: | sudo apt-get update - sudo apt-get install autoconf automake autotools-dev ninja-build + sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev. - name: checkout-qemu if: steps.cache-qemu.outputs.cache-hit != 'true' diff --git a/.github/workflows/arm-linux-gnueabihf.yaml b/.github/workflows/arm-linux-gnueabihf.yaml index 6a2874910..a70f2b2f4 100644 --- a/.github/workflows/arm-linux-gnueabihf.yaml +++ b/.github/workflows/arm-linux-gnueabihf.yaml @@ -62,7 +62,7 @@ jobs: if: steps.cache-qemu.outputs.cache-hit != 'true' run: | sudo apt-get update - sudo apt-get install autoconf automake autotools-dev ninja-build + sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev. - name: checkout-qemu if: steps.cache-qemu.outputs.cache-hit != 'true' diff --git a/CMakeLists.txt b/CMakeLists.txt index f45384d4d..395872c11 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -46,6 +46,9 @@ option(SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE "True to use pre-i option(SHERPA_ONNX_ENABLE_SANITIZER "Whether to enable ubsan and asan" OFF) option(SHERPA_ONNX_BUILD_C_API_EXAMPLES "Whether to enable C API examples" ON) +set(SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION "1.11.0" CACHE STRING "Used only for Linux ARM64 GPU. If you use Jetson nano b01, then please set it to 1.11.0. If you use Jetson Orin NX, then set it to 1.16.0") + + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") diff --git a/build-aarch64-linux-gnu.sh b/build-aarch64-linux-gnu.sh index 62b359c17..cdc48e372 100755 --- a/build-aarch64-linux-gnu.sh +++ b/build-aarch64-linux-gnu.sh @@ -1,4 +1,25 @@ #!/usr/bin/env bash +# +# Usage of this file +# +# (1) Build CPU version of sherpa-onnx +# ./build-aarch64-linux-gnu.sh +# +# (2) Build GPU version of sherpa-onnx +# +# (a) Make sure your board has NVIDIA GPU(s) +# +# (b) For Jetson Nano B01 (using CUDA 10.2) +# +# export SHERPA_ONNX_ENABLE_GPU=ON +# export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.11.0 +# ./build-aarch64-linux-gnu.sh +# +# (c) For Jetson Orin NX (using CUDA 11.4) +# +# export SHERPA_ONNX_ENABLE_GPU=ON +# export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.16.0 +# ./build-aarch64-linux-gnu.sh if command -v aarch64-none-linux-gnu-gcc &> /dev/null; then ln -svf $(which aarch64-none-linux-gnu-gcc) ./aarch64-linux-gnu-gcc @@ -47,11 +68,6 @@ fi if [[ x"$SHERPA_ONNX_ENABLE_GPU" == x"" ]]; then # By default, use CPU SHERPA_ONNX_ENABLE_GPU=OFF - - # If you use GPU, then please make sure you have NVIDIA GPUs on your board. - # It uses onnxruntime 1.11.0. - # - # Tested on Jetson Nano B01 fi if [[ x"$SHERPA_ONNX_ENABLE_GPU" == x"ON" ]]; then @@ -59,6 +75,11 @@ if [[ x"$SHERPA_ONNX_ENABLE_GPU" == x"ON" ]]; then BUILD_SHARED_LIBS=ON fi +if [[ x"$SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION" == x"" ]]; then + # Used only when SHERPA_ONNX_ENABLE_GPU is ON + SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION="1.11.0" +fi + cmake \ -DBUILD_PIPER_PHONMIZE_EXE=OFF \ -DBUILD_PIPER_PHONMIZE_TESTS=OFF \ @@ -75,6 +96,7 @@ cmake \ -DSHERPA_ONNX_ENABLE_JNI=OFF \ -DSHERPA_ONNX_ENABLE_C_API=ON \ -DSHERPA_ONNX_ENABLE_WEBSOCKET=ON \ + -DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=$SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION \ -DCMAKE_TOOLCHAIN_FILE=../toolchains/aarch64-linux-gnu.toolchain.cmake \ .. diff --git a/cmake/onnxruntime-linux-aarch64-gpu.cmake b/cmake/onnxruntime-linux-aarch64-gpu.cmake index 64db9c22b..5df32c996 100644 --- a/cmake/onnxruntime-linux-aarch64-gpu.cmake +++ b/cmake/onnxruntime-linux-aarch64-gpu.cmake @@ -18,19 +18,37 @@ if(NOT SHERPA_ONNX_ENABLE_GPU) message(FATAL_ERROR "This file is for NVIDIA GPU only. Given SHERPA_ONNX_ENABLE_GPU: ${SHERPA_ONNX_ENABLE_GPU}") endif() -set(onnxruntime_URL "https://github.com/csukuangfj/onnxruntime-libs/releases/download/v1.11.0/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2") -set(onnxruntime_URL2 "https://hf-mirror.com/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2") -set(onnxruntime_HASH "SHA256=36eded935551e23aead09d4173bdf0bd1e7b01fdec15d77f97d6e34029aa60d7") +message(WARNING "\ +SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION: ${SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION} +If you use Jetson nano b01, then please pass + -DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.11.0 +to cmake (You need to make sure CUDA 10.2 is available on your board). + +If you use Jetson Orin NX, then please pass + -DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.16.0 +to cmake (You need to make sure CUDA 11.4 is available on your board). +") + +set(v ${SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION}) + +set(onnxruntime_URL "https://github.com/csukuangfj/onnxruntime-libs/releases/download/v${v}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2") +set(onnxruntime_URL2 "https://hf-mirror.com/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2") + +if(v STREQUAL "1.11.0") + set(onnxruntime_HASH "SHA256=36eded935551e23aead09d4173bdf0bd1e7b01fdec15d77f97d6e34029aa60d7") +else() + set(onnxruntime_HASH "SHA256=4c09d5acf2c2682b4eab1dc2f1ad98fc1fde5f5f1960063e337983ba59379a4b") +endif() # If you don't have access to the Internet, # please download onnxruntime to one of the following locations. # You can add more if you want. set(possible_file_locations - $ENV{HOME}/Downloads/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2 - ${CMAKE_SOURCE_DIR}/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2 - ${CMAKE_BINARY_DIR}/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2 - /tmp/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2 - /star-fj/fangjun/download/github/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2 + $ENV{HOME}/Downloads/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2 + ${CMAKE_SOURCE_DIR}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2 + ${CMAKE_BINARY_DIR}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2 + /tmp/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2 + /star-fj/fangjun/download/github/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2 ) foreach(f IN LISTS possible_file_locations)