diff --git a/.github/workflows/helpers/prebuild_legion.sh b/.github/workflows/helpers/prebuild_legion.sh new file mode 100755 index 0000000000..ccaa58383e --- /dev/null +++ b/.github/workflows/helpers/prebuild_legion.sh @@ -0,0 +1,75 @@ +#! /usr/bin/env bash +set -euo pipefail + +# Parse input params +python_version=${python_version:-"empty"} +gpu_backend=${gpu_backend:-"empty"} +gpu_backend_version=${gpu_backend_version:-"empty"} + +if [[ "${gpu_backend}" != @(cuda|hip_cuda|hip_rocm|intel) ]]; then + echo "Error, value of gpu_backend (${gpu_backend}) is invalid. Pick between 'cuda', 'hip_cuda', 'hip_rocm' or 'intel'." + exit 1 +else + echo "Pre-building Legion with GPU backend: ${gpu_backend}" +fi + +if [[ "${gpu_backend}" == "cuda" || "${FF_GPU_BACKEND}" == "hip_cuda" ]]; then + # Check that CUDA version is supported. Versions above 12.0 not supported because we don't publish docker images for it yet. + if [[ "$gpu_backend_version" != @(11.1|11.2|11.3|11.4|11.5|11.6|11.7|11.8|12.0) ]]; then + echo "cuda_version is not supported, please choose among {11.1|11.2|11.3|11.4|11.5|11.6|11.7|11.8|12.0}" + exit 1 + fi + export cuda_version="$gpu_backend_version" +elif [[ "${gpu_backend}" == "hip_rocm" ]]; then + # Check that HIP version is supported + if [[ "$gpu_backend_version" != @(5.3|5.4|5.5|5.6) ]]; then + echo "hip_version is not supported, please choose among {5.3, 5.4, 5.5, 5.6}" + exit 1 + fi + export hip_version="$gpu_backend_version" +else + echo "gpu backend: ${gpu_backend} and gpu_backend_version: ${gpu_backend_version} not yet supported." + exit 1 +fi + +# Cd into directory holding this script +cd "${BASH_SOURCE[0]%/*}" + +export FF_GPU_BACKEND="${gpu_backend}" +export FF_CUDA_ARCH=all +export FF_HIP_ARCH=all +export BUILD_LEGION_ONLY=ON +export INSTALL_DIR="/usr/legion" +export python_version="${python_version}" + +# Build Docker Flexflow Container +echo "building docker" +../../../docker/build.sh flexflow + +# Cleanup any existing container with the same name +docker rm prelegion || true + +# Create container to be able to copy data from the image +docker create --name prelegion flexflow-"${gpu_backend}"-"${gpu_backend_version}":latest + +# Copy legion libraries to host +echo "extract legion library assets" +mkdir -p ../../../prebuilt_legion_assets +rm -rf ../../../prebuilt_legion_assets/tmp || true +docker cp prelegion:$INSTALL_DIR ../../../prebuilt_legion_assets/tmp + + +# Create the tarball file +cd ../../../prebuilt_legion_assets/tmp +export LEGION_TARBALL="legion_ubuntu-20.04_${gpu_backend}-${gpu_backend_version}_py${python_version}.tar.gz" + +echo "Creating archive $LEGION_TARBALL" +tar -zcvf "../$LEGION_TARBALL" ./ +cd .. +echo "Checking the size of the Legion tarball..." +du -h "$LEGION_TARBALL" + + +# Cleanup +rm -rf tmp/* +docker rm prelegion diff --git a/.github/workflows/prebuild-legion.yml b/.github/workflows/prebuild-legion.yml new file mode 100644 index 0000000000..00e7e78a77 --- /dev/null +++ b/.github/workflows/prebuild-legion.yml @@ -0,0 +1,84 @@ +name: "prebuild-legion" +on: + push: + branches: + - "inference" + paths: + - "cmake/**" + - "config/**" + - "deps/legion/**" + - ".github/workflows/helpers/install_dependencies.sh" + workflow_dispatch: +concurrency: + group: prebuild-legion-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prebuild-legion: + name: Prebuild Legion with CMake + runs-on: ubuntu-20.04 + defaults: + run: + shell: bash -l {0} # required to use an activated conda environment + strategy: + matrix: + gpu_backend: ["cuda", "hip_rocm"] + gpu_backend_version: ["11.8", "5.6"] + python_version: "3.11" + exclude: + - gpu_backend: "cuda" + gpu_backend_version: "5.6" + - gpu_backend: "hip_rocm" + gpu_backend_version: "11.8" + fail-fast: false + steps: + - name: Checkout Git Repository + uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Free additional space on runner + run: .github/workflows/helpers/free_space_on_runner.sh + + - name: Build Legion + env: + FF_GPU_BACKEND: ${{ matrix.gpu_backend }} + run: .github/workflows/helpers/prebuild_legion.sh + + - name: Archive compiled Legion library (CUDA) + env: + FF_GPU_BACKEND: ${{ matrix.gpu_backend }} + uses: actions/upload-artifact@v3 + with: + name: legion_ubuntu-20.04_${{ matrix.gpu_backend }}-${{ matrix.gpu_backend_version }}_py${{ matrix.python_version }} + path: prebuilt_legion_assets/legion_ubuntu-20.04_${{ matrix.gpu_backend }}-${{ matrix.gpu_backend_version }}_py${{ matrix.python_version }}.tar.gz + + create-release: + name: Create new release + runs-on: ubuntu-20.04 + needs: prebuild-legion + steps: + - name: Checkout Git Repository + uses: actions/checkout@v3 + - name: Free additional space on runner + run: .github/workflows/helpers/free_space_on_runner.sh + - name: Create folder for artifacts + run: mkdir artifacts unwrapped_artifacts + - name: Download artifacts + uses: actions/download-artifact@v3 + with: + path: ./artifacts + - name: Display structure of downloaded files + working-directory: ./artifacts + run: ls -R + - name: Unwrap all artifacts + working-directory: ./artifacts + run: find . -maxdepth 2 -mindepth 2 -type f -name "*.tar.gz" -exec mv {} ../unwrapped_artifacts/ \; + - name: Get datetime + run: echo "RELEASE_DATETIME=$(date '+%Y-%m-%dT%H-%M-%S')" >> $GITHUB_ENV + - name: Release + env: + NAME: ${{ env.RELEASE_DATETIME }} + TAG_NAME: ${{ env.RELEASE_DATETIME }} + GITHUB_TOKEN: ${{ secrets.FLEXFLOW_TOKEN }} + run: gh release create $TAG_NAME ./unwrapped_artifacts/*.tar.gz --repo flexflow/flexflow-third-party diff --git a/CMakeLists.txt b/CMakeLists.txt index 32399ed4d8..648b46b49e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,6 +48,9 @@ option(BUILD_SHARED_LIBS "Build shared libraries instead of static ones" ON) # option for using Python option(FF_USE_PYTHON "Enable Python" ON) +# option for building legion only +option(BUILD_LEGION_ONLY "Build Legion only" OFF) + # option to download pre-compiled NCCL/Legion libraries option(FF_USE_PREBUILT_NCCL "Enable use of NCCL pre-compiled library, if available" ON) option(FF_USE_PREBUILT_LEGION "Enable use of Legion pre-compiled library, if available" ON) @@ -235,266 +238,271 @@ if (FF_GPU_BACKEND STREQUAL "cuda" OR FF_GPU_BACKEND STREQUAL "hip_cuda") include(cudnn) endif() -# NCCL -if(FF_USE_NCCL) - if(FF_GPU_BACKEND STREQUAL "hip_cuda" OR FF_GPU_BACKEND STREQUAL "cuda") - include(nccl) - endif() - list(APPEND FF_CC_FLAGS - -DFF_USE_NCCL) - list(APPEND FF_NVCC_FLAGS - -DFF_USE_NCCL) -endif() - -# Inference tests -if(INFERENCE_TESTS) - list(APPEND FF_CC_FLAGS - -DINFERENCE_TESTS) - list(APPEND FF_NVCC_FLAGS - -DINFERENCE_TESTS) -endif() - # Legion include(legion) -# json -include(json) - -# variant -include(variant) - -# optional -include(optional) - -if (FF_GPU_BACKEND STREQUAL "cuda") - list(APPEND FF_CC_FLAGS - -DFF_USE_CUDA) - list(APPEND FF_NVCC_FLAGS - -DFF_USE_CUDA) -elseif (FF_GPU_BACKEND STREQUAL "hip_cuda") - list(APPEND FF_CC_FLAGS - -DFF_USE_HIP_CUDA) - list(APPEND FF_HIPCC_FLAGS - -DFF_USE_HIP_CUDA) -elseif (FF_GPU_BACKEND STREQUAL "hip_rocm") - list(APPEND FF_CC_FLAGS - -DFF_USE_HIP_ROCM) - list(APPEND FF_HIPCC_FLAGS - -DFF_USE_HIP_ROCM) -else() -endif() +# Not build FlexFlow if BUILD_LEGION_ONLY is ON +if(NOT BUILD_LEGION_ONLY) + # NCCL + if(FF_USE_NCCL) + if(FF_GPU_BACKEND STREQUAL "hip_cuda" OR FF_GPU_BACKEND STREQUAL "cuda") + include(nccl) + endif() + list(APPEND FF_CC_FLAGS + -DFF_USE_NCCL) + list(APPEND FF_NVCC_FLAGS + -DFF_USE_NCCL) + endif() -# Start build FlexFlow -if (CMAKE_BUILD_TYPE STREQUAL "Debug") + # Inference tests + if(INFERENCE_TESTS) list(APPEND FF_CC_FLAGS - -DFF_DEBUG) + -DINFERENCE_TESTS) list(APPEND FF_NVCC_FLAGS - -DFF_DEBUG) -endif() + -DINFERENCE_TESTS) + endif() + + # json + include(json) + + # variant + include(variant) + + # optional + include(optional) + + if (FF_GPU_BACKEND STREQUAL "cuda") + list(APPEND FF_CC_FLAGS + -DFF_USE_CUDA) + list(APPEND FF_NVCC_FLAGS + -DFF_USE_CUDA) + elseif (FF_GPU_BACKEND STREQUAL "hip_cuda") + list(APPEND FF_CC_FLAGS + -DFF_USE_HIP_CUDA) + list(APPEND FF_HIPCC_FLAGS + -DFF_USE_HIP_CUDA) + elseif (FF_GPU_BACKEND STREQUAL "hip_rocm") + list(APPEND FF_CC_FLAGS + -DFF_USE_HIP_ROCM) + list(APPEND FF_HIPCC_FLAGS + -DFF_USE_HIP_ROCM) + else() + endif() -message(STATUS "FlexFlow MAX_DIM: ${FF_MAX_DIM}") -message(STATUS "LEGION_MAX_RETURN_SIZE: ${LEGION_MAX_RETURN_SIZE}") + # Start build FlexFlow + if (CMAKE_BUILD_TYPE STREQUAL "Debug") + list(APPEND FF_CC_FLAGS + -DFF_DEBUG) + list(APPEND FF_NVCC_FLAGS + -DFF_DEBUG) + endif() -list(APPEND FF_CC_FLAGS - -DMAX_TENSOR_DIM=${FF_MAX_DIM} - -DLEGION_MAX_RETURN_SIZE=${LEGION_MAX_RETURN_SIZE}) + message(STATUS "FlexFlow MAX_DIM: ${FF_MAX_DIM}") + message(STATUS "LEGION_MAX_RETURN_SIZE: ${LEGION_MAX_RETURN_SIZE}") -if(FF_USE_AVX2) list(APPEND FF_CC_FLAGS - -DFF_USE_AVX2 - -mavx2) -endif() - -list(APPEND FF_NVCC_FLAGS - -Wno-deprecated-gpu-targets - -DMAX_TENSOR_DIM=${FF_MAX_DIM} - -DLEGION_MAX_RETURN_SIZE=${LEGION_MAX_RETURN_SIZE}) - -list(APPEND FF_LD_FLAGS - -lrt - -ldl - -rdynamic - -lstdc++fs) - -# Set FF FLAGS -add_compile_options(${FF_CC_FLAGS}) -set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} ${FF_NVCC_FLAGS} -UNDEBUG) -link_libraries(${FF_LD_FLAGS}) - -list(APPEND FLEXFLOW_INCLUDE_DIRS - ${FLEXFLOW_ROOT}/include - ${FLEXFLOW_ROOT}) - -file(GLOB_RECURSE FLEXFLOW_HDR - LIST_DIRECTORIES False - ${FLEXFLOW_ROOT}/include/*.h) - list(APPEND FLEXFLOW_HDR ${FLEXFLOW_ROOT}/inference/file_loader.h) - -file(GLOB_RECURSE FLEXFLOW_SRC - LIST_DIRECTORIES False - ${FLEXFLOW_ROOT}/src/*.cc) -list(REMOVE_ITEM FLEXFLOW_SRC "${FLEXFLOW_ROOT}/src/runtime/cpp_driver.cc") -list(APPEND FLEXFLOW_SRC ${FLEXFLOW_ROOT}/inference/file_loader.cc) - -set(FLEXFLOW_CPP_DRV_SRC - ${FLEXFLOW_ROOT}/src/runtime/cpp_driver.cc) - -add_library(substitution_loader SHARED - ${FLEXFLOW_ROOT}/src/runtime/substitution_loader.cc) -target_include_directories(substitution_loader PRIVATE ${FLEXFLOW_INCLUDE_DIRS}) -target_link_libraries(substitution_loader nlohmann_json::nlohmann_json) + -DMAX_TENSOR_DIM=${FF_MAX_DIM} + -DLEGION_MAX_RETURN_SIZE=${LEGION_MAX_RETURN_SIZE}) + if(FF_USE_AVX2) + list(APPEND FF_CC_FLAGS + -DFF_USE_AVX2 + -mavx2) + endif() -#message("FLEXFLOW_INCLUDE_DIRS: ${FLEXFLOW_INCLUDE_DIRS}") + list(APPEND FF_NVCC_FLAGS + -Wno-deprecated-gpu-targets + -DMAX_TENSOR_DIM=${FF_MAX_DIM} + -DLEGION_MAX_RETURN_SIZE=${LEGION_MAX_RETURN_SIZE}) + + list(APPEND FF_LD_FLAGS + -lrt + -ldl + -rdynamic + -lstdc++fs) + + # Set FF FLAGS + add_compile_options(${FF_CC_FLAGS}) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} ${FF_NVCC_FLAGS} -UNDEBUG) + link_libraries(${FF_LD_FLAGS}) + + list(APPEND FLEXFLOW_INCLUDE_DIRS + ${FLEXFLOW_ROOT}/include + ${FLEXFLOW_ROOT}) + + file(GLOB_RECURSE FLEXFLOW_HDR + LIST_DIRECTORIES False + ${FLEXFLOW_ROOT}/include/*.h) + + list(APPEND FLEXFLOW_HDR ${FLEXFLOW_ROOT}/inference/file_loader.h) -# compile flexflow lib -if (FF_GPU_BACKEND STREQUAL "cuda") - file(GLOB_RECURSE FLEXFLOW_GPU_SRC + file(GLOB_RECURSE FLEXFLOW_SRC LIST_DIRECTORIES False - ${FLEXFLOW_ROOT}/src/*.cu) + ${FLEXFLOW_ROOT}/src/*.cc) + + list(REMOVE_ITEM FLEXFLOW_SRC "${FLEXFLOW_ROOT}/src/runtime/cpp_driver.cc") + list(APPEND FLEXFLOW_SRC ${FLEXFLOW_ROOT}/inference/file_loader.cc) - add_compile_definitions(FF_USE_CUDA) + set(FLEXFLOW_CPP_DRV_SRC + ${FLEXFLOW_ROOT}/src/runtime/cpp_driver.cc) - if(BUILD_SHARED_LIBS) - cuda_add_library(flexflow SHARED ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC} OPTIONS ${CUDA_GENCODE}) - else() - cuda_add_library(flexflow STATIC ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC} OPTIONS ${CUDA_GENCODE}) - endif() -elseif(FF_GPU_BACKEND STREQUAL "hip_cuda" OR FF_GPU_BACKEND STREQUAL "hip_rocm") - file(GLOB_RECURSE FLEXFLOW_GPU_SRC - LIST_DIRECTORIES False - ${FLEXFLOW_ROOT}/src/*.cpp) + add_library(substitution_loader SHARED + ${FLEXFLOW_ROOT}/src/runtime/substitution_loader.cc) + target_include_directories(substitution_loader PRIVATE ${FLEXFLOW_INCLUDE_DIRS}) + target_link_libraries(substitution_loader nlohmann_json::nlohmann_json) - if(BUILD_SHARED_LIBS) - add_library(flexflow SHARED ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC}) - else() - add_library(flexflow STATIC ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC}) - endif() - list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH}/hip ${ROCM_PATH}) + #message("FLEXFLOW_INCLUDE_DIRS: ${FLEXFLOW_INCLUDE_DIRS}") - find_package(hip REQUIRED) + # compile flexflow lib + if (FF_GPU_BACKEND STREQUAL "cuda") + file(GLOB_RECURSE FLEXFLOW_GPU_SRC + LIST_DIRECTORIES False + ${FLEXFLOW_ROOT}/src/*.cu) - if (FF_GPU_BACKEND STREQUAL "hip_cuda") - # The targets defined by the hip cmake config only target amd devices. - # For targeting nvidia devices, we'll make our own interface target, - # hip_device_nvidia, that includes the rocm and hip headers. - add_library(hip_device_nvidia INTERFACE) + add_compile_definitions(FF_USE_CUDA) - if (NOT FF_CUDA_ARCH STREQUAL "") - target_compile_options(hip_device_nvidia INTERFACE -arch=compute_${FF_CUDA_ARCH}) + if(BUILD_SHARED_LIBS) + cuda_add_library(flexflow SHARED ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC} OPTIONS ${CUDA_GENCODE}) + else() + cuda_add_library(flexflow STATIC ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC} OPTIONS ${CUDA_GENCODE}) endif() + elseif(FF_GPU_BACKEND STREQUAL "hip_cuda" OR FF_GPU_BACKEND STREQUAL "hip_rocm") + file(GLOB_RECURSE FLEXFLOW_GPU_SRC + LIST_DIRECTORIES False + ${FLEXFLOW_ROOT}/src/*.cpp) - target_include_directories(hip_device_nvidia SYSTEM INTERFACE ${HIP_INCLUDE_DIRS} ${ROCM_PATH}/include) - target_include_directories(hip_device_nvidia INTERFACE ${HIP_INCLUDE_DIRS} ${ROCM_PATH}/include) - - add_compile_definitions(FF_USE_HIP_CUDA) - - # Linking cuda: - # We do not explicitly link cuda. hipcc when targeting nvidia will - # use nvcc under the hood. nvcc when used for linking will handle - # linking cuda dependencies - target_link_libraries(flexflow hip_device_nvidia) - elseif(FF_GPU_BACKEND STREQUAL "hip_rocm") - find_package(hipblas REQUIRED) - find_package(miopen REQUIRED) - if(FF_USE_NCCL) - find_package(rccl REQUIRED) + if(BUILD_SHARED_LIBS) + add_library(flexflow SHARED ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC}) + else() + add_library(flexflow STATIC ${FLEXFLOW_GPU_SRC} ${FLEXFLOW_SRC}) endif() - # find_package(rocrand REQUIRED) - find_library(HIP_RAND_LIBRARY hiprand REQUIRED) - add_compile_definitions(FF_USE_HIP_ROCM) - - if (FF_HIP_ARCH STREQUAL "") - message(FATAL_ERROR "FF_HIP_ARCH is undefined") - endif() - set_property(TARGET flexflow PROPERTY HIP_ARCHITECTURES "${HIP_ARCH_LIST}") - - message(STATUS "FF_GPU_BACKEND: ${FF_GPU_BACKEND}") - message(STATUS "FF_HIP_ARCH: ${FF_HIP_ARCH}") - message(STATUS "HIP_ARCH_LIST: ${HIP_ARCH_LIST}") - get_property(CHECK_HIP_ARCHS TARGET flexflow PROPERTY HIP_ARCHITECTURES) - message(STATUS "CHECK_HIP_ARCHS: ${CHECK_HIP_ARCHS}") - message(STATUS "HIP_CLANG_PATH: ${HIP_CLANG_PATH}") - - # The hip cmake config module defines three targets, - # hip::amdhip64, hip::host, and hip::device. - # - # hip::host and hip::device are interface targets. hip::amdhip64 is an - # imported target for libamdhip. - # - # You do not directly link to hip::amdhip64. hip::host links to hip::amdhip64 - # and hip::device links to hip::host. Link to hip::host to just use hip without - # compiling any GPU code. Link to hip::device to compile the GPU device code. - # - # Docs (outdated): - # https://rocmdocs.amd.com/en/latest/Installation_Guide/Using-CMake-with-AMD-ROCm.html - target_link_libraries(flexflow hip::device roc::hipblas MIOpen ${HIP_RAND_LIBRARY}) - if(FF_USE_NCCL) + list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH}/hip ${ROCM_PATH}) + + find_package(hip REQUIRED) + + if (FF_GPU_BACKEND STREQUAL "hip_cuda") + # The targets defined by the hip cmake config only target amd devices. + # For targeting nvidia devices, we'll make our own interface target, + # hip_device_nvidia, that includes the rocm and hip headers. + add_library(hip_device_nvidia INTERFACE) + + if (NOT FF_CUDA_ARCH STREQUAL "") + target_compile_options(hip_device_nvidia INTERFACE -arch=compute_${FF_CUDA_ARCH}) + endif() + + target_include_directories(hip_device_nvidia SYSTEM INTERFACE ${HIP_INCLUDE_DIRS} ${ROCM_PATH}/include) + target_include_directories(hip_device_nvidia INTERFACE ${HIP_INCLUDE_DIRS} ${ROCM_PATH}/include) + + add_compile_definitions(FF_USE_HIP_CUDA) + + # Linking cuda: + # We do not explicitly link cuda. hipcc when targeting nvidia will + # use nvcc under the hood. nvcc when used for linking will handle + # linking cuda dependencies + target_link_libraries(flexflow hip_device_nvidia) + elseif(FF_GPU_BACKEND STREQUAL "hip_rocm") + find_package(hipblas REQUIRED) + find_package(miopen REQUIRED) + if(FF_USE_NCCL) + find_package(rccl REQUIRED) + endif() + # find_package(rocrand REQUIRED) + find_library(HIP_RAND_LIBRARY hiprand REQUIRED) + + add_compile_definitions(FF_USE_HIP_ROCM) + + if (FF_HIP_ARCH STREQUAL "") + message(FATAL_ERROR "FF_HIP_ARCH is undefined") + endif() + set_property(TARGET flexflow PROPERTY HIP_ARCHITECTURES "${HIP_ARCH_LIST}") + + message(STATUS "FF_GPU_BACKEND: ${FF_GPU_BACKEND}") + message(STATUS "FF_HIP_ARCH: ${FF_HIP_ARCH}") + message(STATUS "HIP_ARCH_LIST: ${HIP_ARCH_LIST}") + get_property(CHECK_HIP_ARCHS TARGET flexflow PROPERTY HIP_ARCHITECTURES) + message(STATUS "CHECK_HIP_ARCHS: ${CHECK_HIP_ARCHS}") + message(STATUS "HIP_CLANG_PATH: ${HIP_CLANG_PATH}") + + # The hip cmake config module defines three targets, + # hip::amdhip64, hip::host, and hip::device. + # + # hip::host and hip::device are interface targets. hip::amdhip64 is an + # imported target for libamdhip. + # + # You do not directly link to hip::amdhip64. hip::host links to hip::amdhip64 + # and hip::device links to hip::host. Link to hip::host to just use hip without + # compiling any GPU code. Link to hip::device to compile the GPU device code. + # + # Docs (outdated): + # https://rocmdocs.amd.com/en/latest/Installation_Guide/Using-CMake-with-AMD-ROCm.html + target_link_libraries(flexflow hip::device roc::hipblas MIOpen ${HIP_RAND_LIBRARY}) + if(FF_USE_NCCL) target_link_libraries(flexflow rccl) + endif() endif() + else() + message(FATAL_ERROR "Unsupported FF_GPU_BACKEND for cmake: ${FF_GPU_BACKEND}") endif() -else() - message(FATAL_ERROR "Unsupported FF_GPU_BACKEND for cmake: ${FF_GPU_BACKEND}") -endif() -if(FF_USE_NCCL AND (FF_GPU_BACKEND STREQUAL "hip_cuda" OR FF_GPU_BACKEND STREQUAL "cuda")) - add_dependencies(flexflow ${NCCL_NAME}) -endif() + if(FF_USE_NCCL AND (FF_GPU_BACKEND STREQUAL "hip_cuda" OR FF_GPU_BACKEND STREQUAL "cuda")) + add_dependencies(flexflow ${NCCL_NAME}) + endif() -target_include_directories(flexflow PUBLIC ${FLEXFLOW_INCLUDE_DIRS}) -# LEGION_URL is defined if we found a precompiled Legion library to download -if(LEGION_URL) - # Legion builds produce two library files: one for the Legion runtime and one for the Realm runtime. - # When linking FlexFlow to a precompiled version of Legion, we need to manually link to both library files. - target_link_libraries(flexflow ${LEGION_LIBRARY} ${REALM_LIBRARY} ${FLEXFLOW_EXT_LIBRARIES} nlohmann_json::nlohmann_json mpark_variant optional) - add_dependencies(flexflow ${LEGION_NAME}) -else() - # When building Legion from source, we do so by calling add_subdirectory(), and obtain a library with both the - # Legion and Realm runtimes. The library's name is saved into the LEGION_LIBRARY variable. Hence, we only need - # to link FlexFlow to ${LEGION_LIBRARY} - target_link_libraries(flexflow ${LEGION_LIBRARY} ${FLEXFLOW_EXT_LIBRARIES} nlohmann_json::nlohmann_json mpark_variant optional) -endif() + target_include_directories(flexflow PUBLIC ${FLEXFLOW_INCLUDE_DIRS}) + # LEGION_URL is defined if we found a precompiled Legion library to download + if(LEGION_URL) + # Legion builds produce two library files: one for the Legion runtime and one for the Realm runtime. + # When linking FlexFlow to a precompiled version of Legion, we need to manually link to both library files. + target_link_libraries(flexflow ${LEGION_LIBRARY} ${REALM_LIBRARY} ${FLEXFLOW_EXT_LIBRARIES} nlohmann_json::nlohmann_json mpark_variant optional) + add_dependencies(flexflow ${LEGION_NAME}) + else() + # When building Legion from source, we do so by calling add_subdirectory(), and obtain a library with both the + # Legion and Realm runtimes. The library's name is saved into the LEGION_LIBRARY variable. Hence, we only need + # to link FlexFlow to ${LEGION_LIBRARY} + target_link_libraries(flexflow ${LEGION_LIBRARY} ${FLEXFLOW_EXT_LIBRARIES} nlohmann_json::nlohmann_json mpark_variant optional) + endif() -#library api version, bump from time to time -set(SOVERSION 1) - -set_target_properties(flexflow PROPERTIES POSITION_INDEPENDENT_CODE ON) -set_target_properties(flexflow PROPERTIES OUTPUT_NAME "flexflow${INSTALL_SUFFIX}") -set_target_properties(flexflow PROPERTIES SOVERSION ${SOVERSION}) -if (CMAKE_SYSTEM_NAME STREQUAL "Linux") - set_target_properties(flexflow PROPERTIES BUILD_RPATH "\$ORIGIN") - set_target_properties(flexflow PROPERTIES INSTALL_RPATH "\$ORIGIN") -elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin") - set_target_properties(flexflow PROPERTIES BUILD_RPATH "@loader_path") - set_target_properties(flexflow PROPERTIES INSTALL_RPATH "@loader_path") -endif() + #library api version, bump from time to time + set(SOVERSION 1) + + set_target_properties(flexflow PROPERTIES POSITION_INDEPENDENT_CODE ON) + set_target_properties(flexflow PROPERTIES OUTPUT_NAME "flexflow${INSTALL_SUFFIX}") + set_target_properties(flexflow PROPERTIES SOVERSION ${SOVERSION}) + if (CMAKE_SYSTEM_NAME STREQUAL "Linux") + set_target_properties(flexflow PROPERTIES BUILD_RPATH "\$ORIGIN") + set_target_properties(flexflow PROPERTIES INSTALL_RPATH "\$ORIGIN") + elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin") + set_target_properties(flexflow PROPERTIES BUILD_RPATH "@loader_path") + set_target_properties(flexflow PROPERTIES INSTALL_RPATH "@loader_path") + endif() -# python related -if (FF_USE_PYTHON) - # create flexflow_cffi_header.py - add_custom_command(TARGET flexflow - PRE_BUILD - COMMAND ${FLEXFLOW_ROOT}/python/flexflow_cffi_build.py --ffhome-dir ${FLEXFLOW_ROOT} --output-dir ${FLEXFLOW_ROOT}/python/flexflow/core - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Creating flexflow_cffi_header.py..." - ) - if (NOT FF_BUILD_FROM_PYPI) - # generate the Legion Python bindings library. When building from pip, we need to do this post-install to prevent Legion from overwriting the path to the Legion shared library - add_custom_command(TARGET flexflow - POST_BUILD - COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/deps/legion/bindings/python/setup.py build --cmake-build-dir ${Legion_BINARY_DIR}/runtime --prefix ${Legion_BINARY_DIR} --build-lib=${Legion_BINARY_DIR}/bindings/python ${Legion_PYTHON_EXTRA_INSTALL_ARGS} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/deps/legion/bindings/python - ) - # create flexflow_python interpreter. When building from pip, we install the FF_HOME/python/flexflow_python script instead. + # python related + if (FF_USE_PYTHON) + # create flexflow_cffi_header.py add_custom_command(TARGET flexflow PRE_BUILD - COMMAND ${PYTHON_EXECUTABLE} ${FLEXFLOW_ROOT}/python/flexflow_python_build.py --build-dir ${CMAKE_BINARY_DIR} + COMMAND ${FLEXFLOW_ROOT}/python/flexflow_cffi_build.py --ffhome-dir ${FLEXFLOW_ROOT} --output-dir ${FLEXFLOW_ROOT}/python/flexflow/core WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Creating flexflow_python interpreter..." + COMMENT "Creating flexflow_cffi_header.py..." ) - install(PROGRAMS ${CMAKE_BINARY_DIR}/flexflow_python DESTINATION "bin") + if (NOT FF_BUILD_FROM_PYPI) + # generate the Legion Python bindings library. When building from pip, we need to do this post-install to prevent Legion from overwriting the path to the Legion shared library + add_custom_command(TARGET flexflow + POST_BUILD + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/deps/legion/bindings/python/setup.py build --cmake-build-dir ${Legion_BINARY_DIR}/runtime --prefix ${Legion_BINARY_DIR} --build-lib=${Legion_BINARY_DIR}/bindings/python ${Legion_PYTHON_EXTRA_INSTALL_ARGS} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/deps/legion/bindings/python + ) + # create flexflow_python interpreter. When building from pip, we install the FF_HOME/python/flexflow_python script instead. + add_custom_command(TARGET flexflow + PRE_BUILD + COMMAND ${PYTHON_EXECUTABLE} ${FLEXFLOW_ROOT}/python/flexflow_python_build.py --build-dir ${CMAKE_BINARY_DIR} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Creating flexflow_python interpreter..." + ) + install(PROGRAMS ${CMAKE_BINARY_DIR}/flexflow_python DESTINATION "bin") + endif() endif() endif() @@ -531,13 +539,13 @@ if(FF_BUILD_UNIT_TESTS) add_subdirectory(tests/unit) endif() -if(FF_BUILD_SUBSTITUTION_TOOL) - add_subdirectory(tools/protobuf_to_json) -endif() + if(FF_BUILD_SUBSTITUTION_TOOL) + add_subdirectory(tools/protobuf_to_json) + endif() -if(FF_BUILD_VISUALIZATION_TOOL) - add_subdirectory(tools/substitutions_to_dot) -endif() + if(FF_BUILD_VISUALIZATION_TOOL) + add_subdirectory(tools/substitutions_to_dot) + endif() if(FF_BUILD_ALL_INFERENCE_EXAMPLES OR FF_BUILD_TOKENIZER) if (FF_GPU_BACKEND STREQUAL "hip_rocm") diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index f4111d8ea6..d7f52543a1 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -54,7 +54,7 @@ if(CUDA_FOUND) set(FF_CUDA_ARCH ${DETECTED_CUDA_ARCH}) # Set FF_CUDA_ARCH to the list of all GPU architectures compatible with FlexFlow elseif("${FF_CUDA_ARCH}" STREQUAL "all") - set(FF_CUDA_ARCH 60,61,62,70,72,75,80,86) + set(FF_CUDA_ARCH 60,61,62,70,72,75,80,86,90) endif() # create CUDA_GENCODE list based on FF_CUDA_ARCH diff --git a/config/config.inc b/config/config.inc index eb1ad21fc0..7f1f0ffcf4 100644 --- a/config/config.inc +++ b/config/config.inc @@ -67,6 +67,15 @@ if [ -n "$CUDNN_DIR" ]; then SET_CUDNN="-DCUDNN_PATH=${CUDNN_DIR}" fi +# build legion only +if [ "$BUILD_LEGION_ONLY" = "ON" ]; then + SET_BUILD_LEGION_ONLY="-DBUILD_LEGION_ONLY=ON" +elif [ "$BUILD_LEGION_ONLY" = "OFF" ]; then + SET_BUILD_LEGION_ONLY="-DBUILD_LEGION_ONLY=OFF" +else + SET_BUILD_LEGION_ONLY="-DBUILD_LEGION_ONLY=OFF" +fi + # enable Python if [ "$FF_USE_PYTHON" = "ON" ]; then SET_PYTHON="-DFF_USE_PYTHON=ON" @@ -218,7 +227,7 @@ if [ -n "$FF_GPU_BACKEND" ]; then fi fi -CMAKE_FLAGS="-DCUDA_USE_STATIC_CUDA_RUNTIME=OFF -DLegion_HIJACK_CUDART=OFF ${SET_CC} ${SET_CXX} ${SET_INSTALL_DIR} ${SET_INFERENCE_TESTS} ${SET_LIBTORCH_PATH} ${SET_BUILD} ${SET_CUDA_ARCH} ${SET_CUDA} ${SET_CUDNN} ${SET_HIP_ARCH} ${SET_PYTHON} ${SET_NCCL} ${SET_NCCL_DIR} ${SET_LEGION_NETWORKS} ${SET_EXAMPLES} ${SET_INFERENCE_EXAMPLES} ${SET_USE_PREBUILT_LEGION} ${SET_USE_PREBUILT_NCCL} ${SET_USE_ALL_PREBUILT_LIBRARIES} ${SET_BUILD_UNIT_TESTS} ${SET_AVX2} ${SET_MAX_DIM} ${SET_LEGION_MAX_RETURN_SIZE} ${SET_ROCM_PATH} ${SET_FF_GPU_BACKEND}" +CMAKE_FLAGS="-DCUDA_USE_STATIC_CUDA_RUNTIME=OFF -DLegion_HIJACK_CUDART=OFF ${SET_CC} ${SET_CXX} ${SET_INSTALL_DIR} ${SET_INFERENCE_TESTS} ${SET_LIBTORCH_PATH} ${SET_BUILD} ${SET_CUDA_ARCH} ${SET_CUDA} ${SET_CUDNN} ${SET_HIP_ARCH} ${SET_PYTHON} ${SET_BUILD_LEGION_ONLY} ${SET_NCCL} ${SET_NCCL_DIR} ${SET_LEGION_NETWORKS} ${SET_EXAMPLES} ${SET_INFERENCE_EXAMPLES} ${SET_USE_PREBUILT_LEGION} ${SET_USE_PREBUILT_NCCL} ${SET_USE_ALL_PREBUILT_LIBRARIES} ${SET_BUILD_UNIT_TESTS} ${SET_AVX2} ${SET_MAX_DIM} ${SET_LEGION_MAX_RETURN_SIZE} ${SET_ROCM_PATH} ${SET_FF_GPU_BACKEND}" function run_cmake() { SRC_LOCATION=${SRC_LOCATION:=`dirname $0`/../} diff --git a/config/config.linux b/config/config.linux index 3686237538..5f15090a02 100755 --- a/config/config.linux +++ b/config/config.linux @@ -77,6 +77,9 @@ FF_USE_AVX2=${FF_USE_AVX2:-OFF} # set MAX_DIM FF_MAX_DIM=${FF_MAX_DIM:-5} +# set BUILD_LEGION_ONLY +BUILD_LEGION_ONLY=${BUILD_LEGION_ONLY:-OFF} + # set LEGION_MAX_RETURN_SIZE LEGION_MAX_RETURN_SIZE=${LEGION_MAX_RETURN_SIZE:-262144} @@ -97,7 +100,7 @@ fi function get_build_configs() { # Create a string with the values of the variables set in this script - BUILD_CONFIGS="FF_CUDA_ARCH=${FF_CUDA_ARCH} FF_HIP_ARCH=${FF_HIP_ARCH} CUDNN_DIR=${CUDNN_DIR} CUDA_DIR=${CUDA_DIR} NCCL_DIR=${NCCL_DIR} FF_USE_PYTHON=${FF_USE_PYTHON} FF_GASNET_CONDUIT=${FF_GASNET_CONDUIT} FF_UCX_URL=${FF_UCX_URL} FF_LEGION_NETWORKS=${FF_LEGION_NETWORKS} FF_BUILD_ALL_EXAMPLES=${FF_BUILD_ALL_EXAMPLES} FF_BUILD_ALL_INFERENCE_EXAMPLES=${FF_BUILD_ALL_INFERENCE_EXAMPLES} FF_BUILD_UNIT_TESTS=${FF_BUILD_UNIT_TESTS} FF_USE_PREBUILT_NCCL=${FF_USE_PREBUILT_NCCL} FF_USE_PREBUILT_LEGION=${FF_USE_PREBUILT_LEGION} FF_USE_ALL_PREBUILT_LIBRARIES=${FF_USE_ALL_PREBUILT_LIBRARIES} FF_USE_AVX2=${FF_USE_AVX2} FF_MAX_DIM=${FF_MAX_DIM} ROCM_PATH=${ROCM_PATH} FF_GPU_BACKEND=${FF_GPU_BACKEND}" + BUILD_CONFIGS="FF_CUDA_ARCH=${FF_CUDA_ARCH} FF_HIP_ARCH=${FF_HIP_ARCH} CUDNN_DIR=${CUDNN_DIR} CUDA_DIR=${CUDA_DIR} NCCL_DIR=${NCCL_DIR} FF_USE_PYTHON=${FF_USE_PYTHON} BUILD_LEGION_ONLY=${BUILD_LEGION_ONLY} FF_GASNET_CONDUIT=${FF_GASNET_CONDUIT} FF_UCX_URL=${FF_UCX_URL} FF_LEGION_NETWORKS=${FF_LEGION_NETWORKS} FF_BUILD_ALL_EXAMPLES=${FF_BUILD_ALL_EXAMPLES} FF_BUILD_ALL_INFERENCE_EXAMPLES=${FF_BUILD_ALL_INFERENCE_EXAMPLES} FF_BUILD_UNIT_TESTS=${FF_BUILD_UNIT_TESTS} FF_USE_PREBUILT_NCCL=${FF_USE_PREBUILT_NCCL} FF_USE_PREBUILT_LEGION=${FF_USE_PREBUILT_LEGION} FF_USE_ALL_PREBUILT_LIBRARIES=${FF_USE_ALL_PREBUILT_LIBRARIES} FF_USE_AVX2=${FF_USE_AVX2} FF_MAX_DIM=${FF_MAX_DIM} ROCM_PATH=${ROCM_PATH} FF_GPU_BACKEND=${FF_GPU_BACKEND}" } if [[ -n "$1" && ( "$1" == "CMAKE_FLAGS" || "$1" == "CUDA_PATH" ) ]]; then diff --git a/docker/build.sh b/docker/build.sh index e72c23fcd8..6603d919f5 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -12,6 +12,7 @@ image=${1:-flexflow} FF_GPU_BACKEND=${FF_GPU_BACKEND:-cuda} cuda_version=${cuda_version:-"empty"} hip_version=${hip_version:-"empty"} +python_version=${python_version:-latest} # Check docker image name if [[ "$image" != @(flexflow-environment|flexflow) ]]; then @@ -96,7 +97,13 @@ fi cores_available=$(nproc --all) n_build_cores=$(( cores_available -1 )) -docker build --build-arg "ff_environment_base_image=${ff_environment_base_image}" --build-arg "N_BUILD_CORES=${n_build_cores}" --build-arg "FF_GPU_BACKEND=${FF_GPU_BACKEND}" --build-arg "hip_version=${hip_version}" -t "flexflow-environment-${FF_GPU_BACKEND}${gpu_backend_version}" -f docker/flexflow-environment/Dockerfile . +# check python_version +if [[ "$python_version" != @(3.8|3.9|3.10|3.11|latest) ]]; then + echo "python_version not supported!" + exit 0 +fi + +docker build --build-arg "ff_environment_base_image=${ff_environment_base_image}" --build-arg "N_BUILD_CORES=${n_build_cores}" --build-arg "FF_GPU_BACKEND=${FF_GPU_BACKEND}" --build-arg "hip_version=${hip_version}" --build-arg "python_version=${python_version}" -t "flexflow-environment-${FF_GPU_BACKEND}${gpu_backend_version}" -f docker/flexflow-environment/Dockerfile . # If the user only wants to build the environment image, we are done if [[ "$image" == "flexflow-environment" ]]; then diff --git a/docker/flexflow-environment/Dockerfile b/docker/flexflow-environment/Dockerfile index a12f31c738..0e9a3cda82 100644 --- a/docker/flexflow-environment/Dockerfile +++ b/docker/flexflow-environment/Dockerfile @@ -16,14 +16,29 @@ RUN apt-get update && apt-get install -y --no-install-recommends wget sudo binut apt-get upgrade -y libstdc++6 # Install Python3 with Miniconda -RUN wget -c -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ - mv Miniconda3-latest-Linux-x86_64.sh ~/Miniconda3-latest-Linux-x86_64.sh && \ - chmod +x ~/Miniconda3-latest-Linux-x86_64.sh && \ - bash ~/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda && \ - rm ~/Miniconda3-latest-Linux-x86_64.sh && \ - /opt/conda/bin/conda upgrade --all && \ - /opt/conda/bin/conda install conda-build conda-verify && \ - /opt/conda/bin/conda clean -ya +ARG python_version "latest" +RUN MINICONDA_SCRIPT_NAME=Miniconda3-latest-Linux-x86_64.sh; \ + if [ "$python_version" != "3.8" ] && [ "$python_version" != "3.9" ] && [ "$python_version" != "3.10" ] && [ "$python_version" != "3.11" ] && [ "$python_version" != "latest" ]; then \ + echo "python_version '${python_version}' is not supported, please choose among {3.8, 3.9, 3.10, 3.11 or latest (default)}"; \ + exit 1; \ + fi; \ + if [ "${python_version}" = "3.8" ]; then \ + MINICONDA_SCRIPT_NAME=Miniconda3-py38_23.5.2-0-Linux-x86_64.sh; \ + elif [ "${python_version}" = "3.9" ]; then \ + MINICONDA_SCRIPT_NAME=Miniconda3-py39_23.5.2-0-Linux-x86_64.sh; \ + elif [ "${python_version}" = "3.10" ]; then \ + MINICONDA_SCRIPT_NAME=Miniconda3-py310_23.5.2-0-Linux-x86_64.sh; \ + elif [ "${python_version}" = "3.11" ]; then \ + MINICONDA_SCRIPT_NAME=Miniconda3-py311_23.5.2-0-Linux-x86_64.sh; \ + fi; \ + wget -c -q https://repo.continuum.io/miniconda/${MINICONDA_SCRIPT_NAME} && \ + mv ./${MINICONDA_SCRIPT_NAME} ~/${MINICONDA_SCRIPT_NAME} && \ + chmod +x ~/${MINICONDA_SCRIPT_NAME} && \ + bash ~/${MINICONDA_SCRIPT_NAME} -b -p /opt/conda && \ + rm ~/${MINICONDA_SCRIPT_NAME} && \ + /opt/conda/bin/conda upgrade --all && \ + /opt/conda/bin/conda install conda-build conda-verify && \ + /opt/conda/bin/conda clean -ya # Optionally install HIP dependencies # Note that amd's docs say to also install the `hip-runtime-nvidia` package. This diff --git a/docker/flexflow/Dockerfile b/docker/flexflow/Dockerfile index a7d540bc71..60f9d4d653 100644 --- a/docker/flexflow/Dockerfile +++ b/docker/flexflow/Dockerfile @@ -15,6 +15,15 @@ COPY . . ARG BUILD_CONFIGS ARG N_BUILD_CORES +# Create install directory if needed +RUN for pair in $BUILD_CONFIGS; do \ + key=${pair%%=*}; \ + value=${pair#*=}; \ + if [ "$key" = "INSTALL_DIR" ] && [ -n "$value" ]; then \ + mkdir -p "$value"; \ + fi; \ + done + # Build and install C++ and Python versions of FlexFlow RUN mkdir -p build && cd build && \ eval "$BUILD_CONFIGS" ../config/config.linux && \