diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index cf73e1d2d27..de9caa0fabe 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -30,7 +30,6 @@ jobs: secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} date: ${{ inputs.date }} @@ -40,7 +39,6 @@ jobs: secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: ${{ inputs.build_type || 'branch' }} branch: ${{ inputs.branch }} date: ${{ inputs.date }} @@ -63,7 +61,7 @@ jobs: arch: "amd64" branch: ${{ inputs.branch }} build_type: ${{ inputs.build_type || 'branch' }} - container_image: "rapidsai/ci:cuda11.8.0-ubuntu22.04-py3.10" + container_image: "rapidsai/ci:latest" date: ${{ inputs.date }} node_type: "gpu-v100-latest-1" run_script: "ci/build_docs.sh" diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index d9029ea37a1..4d52cd26de4 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -35,7 +35,6 @@ jobs: secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-build.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: pull-request node_type: cpu16 conda-cpp-tests: @@ -43,21 +42,18 @@ jobs: secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: pull-request conda-python-build: needs: conda-cpp-build secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: pull-request conda-python-tests: needs: conda-python-build secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: pull-request conda-notebook-tests: needs: conda-python-build @@ -67,7 +63,7 @@ jobs: build_type: pull-request node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:cuda11.8.0-ubuntu22.04-py3.10" + container_image: "rapidsai/ci:latest" run_script: "ci/test_notebooks.sh" docs-build: needs: conda-python-build @@ -77,7 +73,7 @@ jobs: build_type: pull-request node_type: "gpu-v100-latest-1" arch: "amd64" - container_image: "rapidsai/ci:cuda11.8.0-ubuntu22.04-py3.10" + container_image: "rapidsai/ci:latest" run_script: "ci/build_docs.sh" wheel-build-pylibcugraph: needs: checks diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 1b8cfaf25b7..d697b8f1649 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -18,7 +18,6 @@ jobs: secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-cpp-tests.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: nightly branch: ${{ inputs.branch }} date: ${{ inputs.date }} @@ -27,7 +26,6 @@ jobs: secrets: inherit uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-tests.yaml@branch-23.08 with: - matrix_filter: map(select(.CUDA_VER | startswith("11"))) build_type: nightly branch: ${{ inputs.branch }} date: ${{ inputs.date }} diff --git a/build.sh b/build.sh index a8e97d924c6..18359229822 100755 --- a/build.sh +++ b/build.sh @@ -60,7 +60,7 @@ HELP="$0 [ ...] [ ...] and is: -v - verbose build mode -g - build for debug - -n - do not install after a successful build + -n - do not install after a successful build (does not affect Python packages) --pydevelop - use setup.py develop instead of install --allgpuarch - build for all supported GPU architectures --skip_cpp_tests - do not build the SG test binaries as part of the libcugraph and libcugraph_etl targets @@ -104,7 +104,7 @@ BUILD_CPP_MG_TESTS=OFF BUILD_ALL_GPU_ARCH=0 BUILD_WITH_CUGRAPHOPS=ON CMAKE_GENERATOR_OPTION="-G Ninja" -PYTHON_ARGS_FOR_INSTALL="-m pip install --no-build-isolation --no-deps ." +PYTHON_ARGS_FOR_INSTALL="-m pip install --no-build-isolation --no-deps" # Set defaults for vars that may not have been defined externally # FIXME: if PREFIX is not set, check CONDA_PREFIX, but there is no fallback @@ -178,6 +178,12 @@ if hasArg --pydevelop; then PYTHON_ARGS_FOR_INSTALL="-m pip install --no-build-isolation --no-deps -e ." fi +# Append `-DFIND_RAFT_CPP=ON` to EXTRA_CMAKE_ARGS unless a user specified the option. +SKBUILD_EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS}" + if [[ "${EXTRA_CMAKE_ARGS}" != *"DFIND_CUGRAPH_CPP"* ]]; then + SKBUILD_EXTRA_CMAKE_ARGS="${SKBUILD_EXTRA_CMAKE_ARGS} -DFIND_CUGRAPH_CPP=ON" + fi + # If clean or uninstall targets given, run them prior to any other steps if hasArg uninstall; then if [[ "$INSTALL_PREFIX" != "" ]]; then @@ -296,21 +302,9 @@ if buildAll || hasArg pylibcugraph; then if hasArg --clean; then cleanPythonDir ${REPODIR}/python/pylibcugraph else - cd ${REPODIR}/python/pylibcugraph - # setup.py references an env var CUGRAPH_BUILD_PATH to find the libcugraph - # build. If not set by the user, set it to LIBCUGRAPH_BUILD_DIR - CUGRAPH_BUILD_PATH=${CUGRAPH_BUILD_PATH:=${LIBCUGRAPH_BUILD_DIR}} - python setup.py build_ext \ - --inplace \ - -- \ - -DFIND_CUGRAPH_CPP=ON \ - -DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS} \ - -Dcugraph_ROOT=${LIBCUGRAPH_BUILD_DIR} \ - -- \ - -j${PARALLEL_LEVEL:-1} - if [[ ${INSTALL_TARGET} != "" ]]; then - env CUGRAPH_BUILD_PATH=${CUGRAPH_BUILD_PATH} python ${PYTHON_ARGS_FOR_INSTALL} - fi + SKBUILD_CONFIGURE_OPTIONS="${SKBUILD_EXTRA_CMAKE_ARGS} -DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS}" \ + SKBUILD_BUILD_OPTIONS="-j${PARALLEL_LEVEL}" \ + python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/pylibcugraph fi fi @@ -319,22 +313,9 @@ if buildAll || hasArg cugraph; then if hasArg --clean; then cleanPythonDir ${REPODIR}/python/cugraph else - cd ${REPODIR}/python/cugraph - # FIXME: this needs to eventually reference the pylibcugraph build - # setup.py references an env var CUGRAPH_BUILD_PATH to find the libcugraph - # build. If not set by the user, set it to LIBCUGRAPH_BUILD_DIR - CUGRAPH_BUILD_PATH=${CUGRAPH_BUILD_PATH:=${LIBCUGRAPH_BUILD_DIR}} - python setup.py build_ext \ - --inplace \ - -- \ - -DFIND_CUGRAPH_CPP=ON \ - -DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS} \ - -Dcugraph_ROOT=${LIBCUGRAPH_BUILD_DIR} \ - -- \ - -j${PARALLEL_LEVEL:-1} - if [[ ${INSTALL_TARGET} != "" ]]; then - env CUGRAPH_BUILD_PATH=${CUGRAPH_BUILD_PATH} python ${PYTHON_ARGS_FOR_INSTALL} - fi + SKBUILD_CONFIGURE_OPTIONS="${SKBUILD_EXTRA_CMAKE_ARGS} -DUSE_CUGRAPH_OPS=${BUILD_WITH_CUGRAPHOPS}" \ + SKBUILD_BUILD_OPTIONS="-j${PARALLEL_LEVEL}" \ + python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph fi fi @@ -343,12 +324,8 @@ if hasArg cugraph-service; then if hasArg --clean; then cleanPythonDir ${REPODIR}/python/cugraph-service else - if [[ ${INSTALL_TARGET} != "" ]]; then - cd ${REPODIR}/python/cugraph-service/client - python ${PYTHON_ARGS_FOR_INSTALL} - cd ${REPODIR}/python/cugraph-service/server - python ${PYTHON_ARGS_FOR_INSTALL} - fi + python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-service/client + python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-service/server fi fi @@ -357,10 +334,7 @@ if hasArg cugraph-pyg; then if hasArg --clean; then cleanPythonDir ${REPODIR}/python/cugraph-pyg else - if [[ ${INSTALL_TARGET} != "" ]]; then - cd ${REPODIR}/python/cugraph-pyg - python ${PYTHON_ARGS_FOR_INSTALL} - fi + python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-pyg fi fi @@ -369,10 +343,7 @@ if hasArg cugraph-dgl; then if hasArg --clean; then cleanPythonDir ${REPODIR}/python/cugraph-dgl else - if [[ ${INSTALL_TARGET} != "" ]]; then - cd ${REPODIR}/python/cugraph-dgl - python ${PYTHON_ARGS_FOR_INSTALL} - fi + python ${PYTHON_ARGS_FOR_INSTALL} ${REPODIR}/python/cugraph-dgl fi fi diff --git a/ci/build_python.sh b/ci/build_python.sh index 517dda726ee..5125e86d53a 100755 --- a/ci/build_python.sh +++ b/ci/build_python.sh @@ -26,28 +26,36 @@ rapids-mamba-retry mambabuild \ --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ conda/recipes/cugraph -rapids-mamba-retry mambabuild \ - --no-test \ - --channel "${CPP_CHANNEL}" \ - --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ - conda/recipes/cugraph-service - -rapids-mamba-retry mambabuild \ - --no-test \ - --channel "${CPP_CHANNEL}" \ - --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ - --channel pyg \ - --channel pytorch \ - --channel pytorch-nightly \ - conda/recipes/cugraph-pyg - -rapids-mamba-retry mambabuild \ - --no-test \ - --channel "${CPP_CHANNEL}" \ - --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ - --channel dglteam \ - --channel pytorch \ - --channel pytorch-nightly \ - conda/recipes/cugraph-dgl +RAPIDS_CUDA_MAJOR="${RAPIDS_CUDA_VERSION%%.*}" + +if [[ ${RAPIDS_CUDA_MAJOR} == "11" ]]; then + # Only one CUDA configuration is needed, so we choose CUDA 11 arbitrarily. + # Nothing in the cugraph-service packages is CUDA-specific. + rapids-mamba-retry mambabuild \ + --no-test \ + --channel "${CPP_CHANNEL}" \ + --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ + conda/recipes/cugraph-service + + # Only CUDA 11 is supported right now due to PyTorch requirement. + rapids-mamba-retry mambabuild \ + --no-test \ + --channel "${CPP_CHANNEL}" \ + --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ + --channel pyg \ + --channel pytorch \ + --channel pytorch-nightly \ + conda/recipes/cugraph-pyg + + # Only CUDA 11 is supported right now due to PyTorch requirement. + rapids-mamba-retry mambabuild \ + --no-test \ + --channel "${CPP_CHANNEL}" \ + --channel "${RAPIDS_CONDA_BLD_OUTPUT_DIR}" \ + --channel dglteam \ + --channel pytorch \ + --channel pytorch-nightly \ + conda/recipes/cugraph-dgl +fi rapids-upload-conda-to-s3 python diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 9c428ef9d07..16a4d4f0dbc 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -4,15 +4,16 @@ channels: - rapidsai - rapidsai-nightly - dask/label/dev -- conda-forge -- nvidia - pytorch - dglteam/label/cu118 +- conda-forge +- nvidia dependencies: - aiohttp - c-compiler - cmake>=3.26.4 -- cudatoolkit=11.8 +- cuda-version=11.8 +- cudatoolkit - cudf==23.8.* - cupy>=12.0.0 - cxx-compiler diff --git a/conda/recipes/cugraph/conda_build_config.yaml b/conda/recipes/cugraph/conda_build_config.yaml index e3e18d4620b..4530a4c942d 100644 --- a/conda/recipes/cugraph/conda_build_config.yaml +++ b/conda/recipes/cugraph/conda_build_config.yaml @@ -5,6 +5,9 @@ cxx_compiler_version: - 11 cuda_compiler: + - cuda-nvcc + +cuda11_compiler: - nvcc cmake_version: @@ -15,18 +18,3 @@ sysroot_version: ucx_py_version: - "0.33.*" - -# The CTK libraries below are missing from the conda-forge::cudatoolkit -# package. The "*_host_*" version specifiers correspond to `11.8` packages. - -libcublas_host_version: - - "=11.11.3.6" - -libcurand_host_version: - - "=10.3.0.86" - -libcusolver_host_version: - - "=11.4.1.48" - -libcusparse_host_version: - - "=11.7.5.86" diff --git a/conda/recipes/cugraph/meta.yaml b/conda/recipes/cugraph/meta.yaml index 7f2dc542538..e2b9d38c181 100644 --- a/conda/recipes/cugraph/meta.yaml +++ b/conda/recipes/cugraph/meta.yaml @@ -39,36 +39,38 @@ build: requirements: build: - {{ compiler('c') }} - - {{ compiler('cuda') }} {{ cuda_version }} - {{ compiler('cxx') }} + {% if cuda_major == "11" %} + - {{ compiler('cuda11') }} {{ cuda_version }} + {% else %} + - {{ compiler('cuda') }} + {% endif %} + - cuda-version ={{ cuda_version }} - cmake {{ cmake_version }} - ninja - sysroot_{{ target_platform }} {{ sysroot_version }} host: - - cudatoolkit ={{ cuda_version }} + - cuda-version ={{ cuda_version }} + {% if cuda_major == "11" %} + - cudatoolkit + {% endif %} - cudf ={{ minor_version }} - cython >=0.29,<0.30 - - libcublas {{ libcublas_host_version }} - - libcublas-dev {{ libcublas_host_version }} - libcugraph ={{ version }} - - libcurand {{ libcurand_host_version }} - - libcurand-dev {{ libcurand_host_version }} - - libcusolver {{ libcusolver_host_version }} - - libcusolver-dev {{ libcusolver_host_version }} - - libcusparse {{ libcusparse_host_version }} - - libcusparse-dev {{ libcusparse_host_version }} - - libraft ={{ minor_version }} - - libraft-headers ={{ minor_version }} - - pylibraft ={{ minor_version}} + - pylibraft ={{ minor_version }} - python - raft-dask ={{ minor_version }} + - rmm ={{ minor_version }} - scikit-build >=0.13.1 - setuptools - - ucx-proc=*=gpu - - ucx-py {{ ucx_py_version }} run: - - {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }} - - cuda-python >=11.7.1,<12.0 + - {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }} + {% if cuda_major == "11" %} + - cudatoolkit + - cuda-python >=11.7.1,<12.0a0 + {% else %} + - cuda-python >=12.0,<13.0a0 + {% endif %} - cudf ={{ minor_version }} - cupy >=12.0.0 - dask-cuda ={{ minor_version }} @@ -77,8 +79,6 @@ requirements: - dask-core >=2023.5.1 - distributed >=2023.5.1 - libcugraph ={{ version }} - - libraft ={{ minor_version }} - - libraft-headers ={{ minor_version }} - pylibcugraph ={{ version }} - pylibraft ={{ minor_version }} - python @@ -88,7 +88,7 @@ requirements: tests: requirements: - - cudatoolkit ={{ cuda_version }} + - cuda-version ={{ cuda_version }} imports: - cugraph diff --git a/conda/recipes/libcugraph/conda_build_config.yaml b/conda/recipes/libcugraph/conda_build_config.yaml index 287d1d391da..3acd0166a67 100644 --- a/conda/recipes/libcugraph/conda_build_config.yaml +++ b/conda/recipes/libcugraph/conda_build_config.yaml @@ -5,6 +5,9 @@ cxx_compiler_version: - 11 cuda_compiler: + - cuda-nvcc + +cuda11_compiler: - nvcc cmake_version: @@ -19,23 +22,43 @@ nccl_version: gtest_version: - ">=1.13.0" -cuda_profiler_api_version: - - ">=11.8.86,<12" - sysroot_version: - "2.17" # The CTK libraries below are missing from the conda-forge::cudatoolkit -# package. The "*_host_*" version specifiers correspond to `11.8` packages. +# package. The "*_host_*" version specifiers correspond to `11.8` packages +# and the "*_run_*" version specifiers correspond to `11.x` packages. -libcublas_host_version: +cuda11_libcublas_host_version: - "=11.11.3.6" -libcurand_host_version: +cuda11_libcublas_run_version: + - ">=11.5.2.43,<12.0.0" + +cuda11_libcurand_host_version: - "=10.3.0.86" -libcusolver_host_version: +cuda11_libcurand_run_version: + - ">=10.2.5.43,<10.3.1" + +cuda11_libcusolver_host_version: - "=11.4.1.48" -libcusparse_host_version: +cuda11_libcusolver_run_version: + - ">=11.2.0.43,<11.4.2" + +cuda11_libcusparse_host_version: - "=11.7.5.86" + +cuda11_libcusparse_run_version: + - ">=11.6.0.43,<12.0.0" + +# `cuda-profiler-api` only has `11.8.0` and `12.0.0` packages for all +# architectures. The "*_host_*" version specifiers correspond to `11.8` packages and the +# "*_run_*" version specifiers correspond to `11.x` packages. + +cuda11_cuda_profiler_api_host_version: + - "=11.8.86" + +cuda11_cuda_profiler_api_run_version: + - ">=11.4.240,<12" diff --git a/conda/recipes/libcugraph/meta.yaml b/conda/recipes/libcugraph/meta.yaml index f843aabba92..d52d81366d7 100644 --- a/conda/recipes/libcugraph/meta.yaml +++ b/conda/recipes/libcugraph/meta.yaml @@ -4,7 +4,6 @@ {% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %} {% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %} {% set cuda_major = cuda_version.split('.')[0] %} -{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0 {% set date_string = environ['RAPIDS_DATE_STRING'] %} package: @@ -34,29 +33,45 @@ build: requirements: build: - {{ compiler('c') }} - - {{ compiler('cuda') }} {{ cuda_version }} + {% if cuda_major == "11" %} + - {{ compiler('cuda11') }} {{ cuda_version }} + {% else %} + - {{ compiler('cuda') }} + {% endif %} + - cuda-version ={{ cuda_version }} - {{ compiler('cxx') }} - cmake {{ cmake_version }} - ninja - openmpi # Required for building cpp-mgtests (multi-GPU tests) - sysroot_{{ target_platform }} {{ sysroot_version }} host: + {% if cuda_major == "11" %} + - cudatoolkit - cuda-nvtx ={{ cuda_version }} - - cuda-profiler-api {{ cuda_profiler_api_version }} - - cudatoolkit ={{ cuda_version }} + - cuda-profiler-api {{ cuda11_cuda_profiler_api_host_version }} + - libcublas {{ cuda11_libcublas_host_version }} + - libcublas-dev {{ cuda11_libcublas_host_version }} + - libcurand {{ cuda11_libcurand_host_version }} + - libcurand-dev {{ cuda11_libcurand_host_version }} + - libcusolver {{ cuda11_libcusolver_host_version }} + - libcusolver-dev {{ cuda11_libcusolver_host_version }} + - libcusparse {{ cuda11_libcusparse_host_version }} + - libcusparse-dev {{ cuda11_libcusparse_host_version }} + {% else %} + - cuda-nvtx-dev + - cuda-profiler-api + - cuda-cudart-dev + - libcublas-dev + - libcurand-dev + - libcusolver-dev + - libcusparse-dev + {% endif %} + - cuda-version ={{ cuda_version }} - doxygen {{ doxygen_version }} - gmock {{ gtest_version }} - gtest {{ gtest_version }} - - libcublas {{ libcublas_host_version }} - - libcublas-dev {{ libcublas_host_version }} - libcudf ={{ minor_version }} - libcugraphops ={{ minor_version }} - - libcurand {{ libcurand_host_version }} - - libcurand-dev {{ libcurand_host_version }} - - libcusolver {{ libcusolver_host_version }} - - libcusolver-dev {{ libcusolver_host_version }} - - libcusparse {{ libcusparse_host_version }} - - libcusparse-dev {{ libcusparse_host_version }} - libraft ={{ minor_version }} - libraft-headers ={{ minor_version }} - librmm ={{ minor_version }} @@ -71,12 +86,30 @@ outputs: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} ignore_run_exports_from: - - {{ compiler('cuda') }} + {% if cuda_major == "11" %} + - {{ compiler('cuda11') }} + {% endif %} requirements: build: - cmake {{ cmake_version }} + host: + - cuda-version ={{ cuda_version }} run: - - cudatoolkit {{ cuda_spec }} + - {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }} + {% if cuda_major == "11" %} + - cuda-profiler-api {{ cuda11_cuda_profiler_api_run_version }} + - cudatoolkit + - libcublas {{ cuda11_libcublas_run_version }} + - libcurand {{ cuda11_libcurand_run_version }} + - libcusolver {{ cuda11_libcusolver_run_version }} + - libcusparse {{ cuda11_libcusparse_run_version }} + {% else %} + - cuda-profiler-api + - libcublas + - libcurand + - libcusolver + - libcusparse + {% endif %} - libcugraphops ={{ minor_version }} - libraft ={{ minor_version }} - libraft-headers ={{ minor_version }} @@ -95,13 +128,20 @@ outputs: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} ignore_run_exports_from: - - {{ compiler('cuda') }} + {% if cuda_major == "11" %} + - {{ compiler('cuda11') }} + {% endif %} requirements: build: - cmake {{ cmake_version }} + host: + - cuda-version ={{ cuda_version }} run: + - {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }} - {{ pin_subpackage('libcugraph', exact=True) }} - - cudatoolkit {{ cuda_spec }} + {% if cuda_major == "11" %} + - cudatoolkit + {% endif %} - libcudf ={{ minor_version }} - librmm ={{ minor_version }} about: @@ -116,14 +156,21 @@ outputs: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} ignore_run_exports_from: - - {{ compiler('cuda') }} + {% if cuda_major == "11" %} + - {{ compiler('cuda11') }} + {% endif %} requirements: build: - cmake {{ cmake_version }} + host: + - cuda-version ={{ cuda_version }} run: + - {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }} - {{ pin_subpackage('libcugraph_etl', exact=True) }} - {{ pin_subpackage('libcugraph', exact=True) }} - - cudatoolkit {{ cuda_spec }} + {% if cuda_major == "11" %} + - cudatoolkit + {% endif %} - gmock {{ gtest_version }} - gtest {{ gtest_version }} about: diff --git a/conda/recipes/pylibcugraph/conda_build_config.yaml b/conda/recipes/pylibcugraph/conda_build_config.yaml index e3e18d4620b..4530a4c942d 100644 --- a/conda/recipes/pylibcugraph/conda_build_config.yaml +++ b/conda/recipes/pylibcugraph/conda_build_config.yaml @@ -5,6 +5,9 @@ cxx_compiler_version: - 11 cuda_compiler: + - cuda-nvcc + +cuda11_compiler: - nvcc cmake_version: @@ -15,18 +18,3 @@ sysroot_version: ucx_py_version: - "0.33.*" - -# The CTK libraries below are missing from the conda-forge::cudatoolkit -# package. The "*_host_*" version specifiers correspond to `11.8` packages. - -libcublas_host_version: - - "=11.11.3.6" - -libcurand_host_version: - - "=10.3.0.86" - -libcusolver_host_version: - - "=11.4.1.48" - -libcusparse_host_version: - - "=11.7.5.86" diff --git a/conda/recipes/pylibcugraph/meta.yaml b/conda/recipes/pylibcugraph/meta.yaml index de031a6fe94..aa82c20ad44 100644 --- a/conda/recipes/pylibcugraph/meta.yaml +++ b/conda/recipes/pylibcugraph/meta.yaml @@ -39,41 +39,38 @@ build: requirements: build: - {{ compiler('c') }} - - {{ compiler('cuda') }} {{ cuda_version }} - {{ compiler('cxx') }} + {% if cuda_major == "11" %} + - {{ compiler('cuda11') }} {{ cuda_version }} + {% else %} + - {{ compiler('cuda') }} + {% endif %} + - cuda-version ={{ cuda_version }} - cmake {{ cmake_version }} - ninja - sysroot_{{ target_platform }} {{ sysroot_version }} host: - - cudatoolkit ={{ cuda_version }} - - cudf ={{ minor_version }} + - cuda-version ={{ cuda_version }} + {% if cuda_major == "11" %} + - cudatoolkit + {% endif %} - cython >=0.29,<0.30 - - libcublas {{ libcublas_host_version }} - - libcublas-dev {{ libcublas_host_version }} - libcugraph ={{ version }} - - libcurand {{ libcurand_host_version }} - - libcurand-dev {{ libcurand_host_version }} - - libcusolver {{ libcusolver_host_version }} - - libcusolver-dev {{ libcusolver_host_version }} - - libcusparse {{ libcusparse_host_version }} - - libcusparse-dev {{ libcusparse_host_version }} - - libraft ={{ minor_version }} - - libraft-headers ={{ minor_version }} - - pylibraft ={{ minor_version}} + - pylibraft ={{ minor_version }} - python - - rmm ={{ minor_version }} - scikit-build >=0.13.1 - setuptools - - ucx-proc=*=gpu - - ucx-py {{ ucx_py_version }} run: - - {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }} + - {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }} + {% if cuda_major == "11" %} + - cudatoolkit + {% endif %} - libcugraph ={{ version }} - python tests: requirements: - - cudatoolkit ={{ cuda_version }} + - cuda-version ={{ cuda_version }} imports: - pylibcugraph diff --git a/cpp/src/community/legacy/spectral_clustering.cu b/cpp/src/community/legacy/spectral_clustering.cu index 5ab71e7d280..84e9891b7ce 100644 --- a/cpp/src/community/legacy/spectral_clustering.cu +++ b/cpp/src/community/legacy/spectral_clustering.cu @@ -24,9 +24,6 @@ #include #include -#if defined RAFT_COMPILED -#include -#endif #include #include diff --git a/cpp/src/prims/transform_e.cuh b/cpp/src/prims/transform_e.cuh index 9be12262574..7950df58a3e 100644 --- a/cpp/src/prims/transform_e.cuh +++ b/cpp/src/prims/transform_e.cuh @@ -291,23 +291,16 @@ void transform_e(raft::handle_t const& handle, auto major = thrust::get<0>(edge); auto minor = thrust::get<1>(edge); - vertex_t major_offset{}; - vertex_t major_idx{}; auto major_hypersparse_first = edge_partition.major_hypersparse_first(); - if (major_hypersparse_first) { - if (major < *major_hypersparse_first) { - major_offset = edge_partition.major_offset_from_major_nocheck(major); - major_idx = major_offset; - } else { - auto major_hypersparse_idx = - edge_partition.major_hypersparse_idx_from_major_nocheck(major); - assert(major_hypersparse_idx); - major_idx = edge_partition.major_offset_from_major_nocheck(*major_hypersparse_first) + - *major_hypersparse_idx; - } - } else { - major_offset = edge_partition.major_offset_from_major_nocheck(major); - major_idx = major_offset; + auto major_offset = edge_partition.major_offset_from_major_nocheck(major); + vertex_t major_idx{major_offset}; + + if ((major_hypersparse_first) && (major >= *major_hypersparse_first)) { + auto major_hypersparse_idx = + edge_partition.major_hypersparse_idx_from_major_nocheck(major); + assert(major_hypersparse_idx); + major_idx = edge_partition.major_offset_from_major_nocheck(*major_hypersparse_first) + + *major_hypersparse_idx; } auto minor_offset = edge_partition.minor_offset_from_minor_nocheck(minor); diff --git a/cpp/tests/sampling/mg_uniform_neighbor_sampling.cu b/cpp/tests/sampling/mg_uniform_neighbor_sampling.cu index 82fb2430ca1..57f85a212b1 100644 --- a/cpp/tests/sampling/mg_uniform_neighbor_sampling.cu +++ b/cpp/tests/sampling/mg_uniform_neighbor_sampling.cu @@ -163,7 +163,6 @@ class Tests_MGUniform_Neighbor_Sampling EXPECT_THROW( cugraph::uniform_neighbor_sample( *handle_, - handle, mg_graph_view, mg_edge_weight_view, std::optional>{std::nullopt}, diff --git a/cpp/tests/sampling/sg_uniform_neighbor_sampling.cu b/cpp/tests/sampling/sg_uniform_neighbor_sampling.cu index a59ea7feb8f..f795c11437f 100644 --- a/cpp/tests/sampling/sg_uniform_neighbor_sampling.cu +++ b/cpp/tests/sampling/sg_uniform_neighbor_sampling.cu @@ -128,25 +128,29 @@ class Tests_Uniform_Neighbor_Sampling random_sources.size(), handle.get_stream()); -#ifdef NO_CUGRAPH_OPS - EXPECT_THROW(cugraph::uniform_neighbor_sample( - handle, - graph_view, - edge_weight_view, - std::nullopt, - std::nullopt, - std::move(random_sources_copy), - std::move(batch_number), - raft::host_span(uniform_neighbor_sampling_usecase.fanout.data(), - uniform_neighbor_sampling_usecase.fanout.size()), - rng_state, - true, - uniform_neighbor_sampling_usecase.flag_replacement), - std::exception); -#else std::optional, raft::device_span>> label_to_output_comm_rank_mapping{std::nullopt}; +#ifdef NO_CUGRAPH_OPS + EXPECT_THROW( + cugraph::uniform_neighbor_sample( + handle, + graph_view, + edge_weight_view, + std::optional>{std::nullopt}, + std::optional>{std::nullopt}, + raft::device_span{random_sources_copy.data(), random_sources.size()}, + batch_number ? std::make_optional(raft::device_span{batch_number->data(), + batch_number->size()}) + : std::nullopt, + label_to_output_comm_rank_mapping, + raft::host_span(uniform_neighbor_sampling_usecase.fanout.data(), + uniform_neighbor_sampling_usecase.fanout.size()), + rng_state, + true, + uniform_neighbor_sampling_usecase.flag_replacement), + std::exception); +#else auto&& [src_out, dst_out, wgt_out, edge_id, edge_type, hop, labels, offsets] = cugraph::uniform_neighbor_sample( handle, diff --git a/dependencies.yaml b/dependencies.yaml index 9b858999743..572638069dc 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -3,7 +3,7 @@ files: all: output: [conda] matrix: - cuda: ["11.8"] + cuda: ["11.8", "12.0"] arch: [x86_64] includes: - checks @@ -181,10 +181,10 @@ channels: - rapidsai - rapidsai-nightly - dask/label/dev - - conda-forge - - nvidia - pytorch - dglteam/label/cu118 + - conda-forge + - nvidia dependencies: checks: common: @@ -195,22 +195,30 @@ dependencies: specific: - output_types: [conda] matrices: + - matrix: + cuda: "12.0" + packages: + - cuda-version=12.0 - matrix: cuda: "11.8" packages: - - cudatoolkit=11.8 + - cuda-version=11.8 + - cudatoolkit - matrix: cuda: "11.5" packages: - - cudatoolkit=11.5 + - cuda-version=11.5 + - cudatoolkit - matrix: cuda: "11.4" packages: - - cudatoolkit=11.4 + - cuda-version=11.4 + - cudatoolkit - matrix: cuda: "11.2" packages: - - cudatoolkit=11.2 + - cuda-version=11.2 + - cudatoolkit common_build: common: - output_types: [conda, pyproject] @@ -253,6 +261,11 @@ dependencies: cuda: "11.8" packages: - nvcc_linux-aarch64=11.8 + - matrix: + cuda: "12.0" + packages: + - cuda-version=12.0 + - cuda-nvcc docs: common: - output_types: [conda] diff --git a/notebooks/algorithms/layout/Force-Atlas2.ipynb b/notebooks/algorithms/layout/Force-Atlas2.ipynb index a0dafbef511..eaab502f180 100644 --- a/notebooks/algorithms/layout/Force-Atlas2.ipynb +++ b/notebooks/algorithms/layout/Force-Atlas2.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -9,6 +10,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -19,19 +21,14 @@ "| Author Credit | Date | Update | cuGraph Version | Test Hardware |\n", "| -----------------|------------|------------------|-----------------|----------------|\n", "| Hugo Linsenmaier | 11/16/2020 | created | 0.17 | GV100, CUDA 11.0\n", - "| Brad Rees | 01/11/2022 | tested / updated | 22.02 nightly | RTX A6000 CUDA 11.5\n", - "| Ralph Liu | 06/22/2022 | updated/tested | 22.08 | TV100, CUDA 11.5\n", - "| Don Acosta | 08/01/2022 | tested / updated | 22.08 nightly | DGX Tesla A100 CUDA 11.5 " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### This notebook will not currently run because there is a conflict between the version of CuPy required by cugraph (11.0) and the version supported in cuxfilter (7.8 to 10.0). Notebook will be updated when cuxfilter supports CuPy 11." + "| Brad Rees | 01/11/2022 | tested / updated | 22.02 nightly | RTX A6000 48GB CUDA 11.5\n", + "| Ralph Liu | 06/22/2022 | updated/tested | 22.08 nightly | V100, CUDA 11.5\n", + "| Don Acosta | 08/01/2022 | tested / updated | 22.08 nightly | DGX Tesla A100 CUDA 11.5 \n", + "| Don Acosta | 07/17/2023 | tested / updated | 23.08 nightly |RTX A6000 48GB CUDA 11.7 " ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -44,45 +41,20 @@ "See https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0098679 for more details.\n", "\n", "\n", - "Please refer to the [documentation](https://docs.rapids.ai/api/cugraph/stable/api_docs/api/cugraph.force_atlas2.html) on how to use the different parameters.\n", - "\n", - "This library has some additional dependencies that need to be installed prior to running:\n", - "* Holoviews - provides " + "Please refer to the [documentation](https://docs.rapids.ai/api/cugraph/stable/api_docs/api/cugraph.force_atlas2.html) on how to use the different parameters.\n" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Import RAPIDS libraries\n", - "import cudf\n", "import cugraph\n", "import time" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Viz libraries\n", - "\n", - "from cuxfilter.charts.datashader.custom_extensions.graph_assets import calc_connected_edges\n", - "\n", - "import holoviews as hv\n", - "\n", - "from colorcet import fire\n", - "from datashader.bundling import directly_connect_edges, hammer_bundle\n", - "\n", - "from holoviews.operation.datashader import datashade, dynspread\n", - "from holoviews.operation import decimate\n", - "\n", - "from dask.distributed import Client" - ] - }, { "cell_type": "code", "execution_count": null, @@ -106,22 +78,7 @@ ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Setup Viz\n", - "client = Client()\n", - "hv.notebook_extension('bokeh','matplotlib')\n", - "decimate.max_samples=20000\n", - "dynspread.threshold=0.01\n", - "datashade.cmap=fire[40:]\n", - "sz = dict(width=150,height=150)\n", - "%opts RGB [xaxis=None yaxis=None show_grid=False bgcolor=\"black\"]" - ] - }, - { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -134,11 +91,12 @@ "metadata": {}, "outputs": [], "source": [ - "G = netscience.get_graph()\n", + "G = netscience.get_graph(fetch=True)\n", "G.number_of_nodes(), G.number_of_edges()" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -167,14 +125,31 @@ " verbose=False,\n", " callback=None)\n", "elapsed = time.time() - start\n", - "print(\"Cugraph time : \" + str(elapsed))" + "print(\"Cugraph time : \" + str(elapsed))\n", + "pos_gdf.head(5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize the graph\n", + "\n", + "The following section creates a visualization of the network using the locations generated by the Force Atlas algorithms. However, the following section is dependent on having the **cuxfilter** package installed. \n", + "\n", + "See the cuxfilter GitHub page for installation: https://github.com/rapidsai/cuxfilter. \n", + "\n", + "Alternatively, the package comes installed in the RAPIDS development Docker container. \n", + "See: https://hub.docker.com/r/rapidsai/rapidsai-dev/ \n", + "\n", + "
" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Convert a graph into paths suitable for datashading" + "Set up the visualization" ] }, { @@ -183,20 +158,54 @@ "metadata": {}, "outputs": [], "source": [ - "edges_gdf = netscience.get_edgelist()\n", + "import importlib.util\n", + "cux_spec = importlib.util.find_spec(\"cuxfilter\")\n", + "if cux_spec is None:\n", + " print(\"Visualization package is not available.\")\n", + "else:\n", + " from cuxfilter.charts.datashader.custom_extensions.graph_assets import calc_connected_edges\n", + " # Viz libraries\n", + " import holoviews as hv\n", + "\n", + " from colorcet import fire\n", + " from datashader.bundling import directly_connect_edges, hammer_bundle\n", + "\n", + " from holoviews.operation.datashader import datashade, dynspread\n", + " from holoviews.operation import decimate\n", "\n", - "connected = calc_connected_edges(pos_gdf,\n", - " edges_gdf,\n", - " node_x=\"x\",\n", - " node_y=\"y\",\n", - " node_x_dtype=\"float32\",\n", - " node_y_dtype=\"float32\",\n", - " node_id=\"vertex\",\n", - " edge_source=\"src\",\n", - " edge_target=\"dst\",\n", - " edge_aggregate_col=None,\n", - " edge_render_type=\"direct\",\n", - " )" + " from dask.distributed import Client\n", + "\n", + " # Define the parameters \n", + " ITERATIONS=500\n", + " THETA=1.0\n", + " OPTIMIZE=True\n", + "\n", + " # Import a built-in dataset\n", + " from cugraph.experimental.datasets import netscience\n", + "\n", + " # Setup Viz\n", + " client = Client()\n", + " hv.notebook_extension('bokeh','matplotlib')\n", + " decimate.max_samples=20000\n", + " dynspread.threshold=0.01\n", + " datashade.cmap=fire[40:]\n", + " sz = dict(width=150,height=150)\n", + " %opts RGB [xaxis=None yaxis=None show_grid=False bgcolor=\"black\"]\n", + "\n", + " edges_gdf = netscience.get_edgelist()\n", + "\n", + " connected = calc_connected_edges(pos_gdf,\n", + " edges_gdf,\n", + " node_x=\"x\",\n", + " node_y=\"y\",\n", + " node_x_dtype=\"float32\",\n", + " node_y_dtype=\"float32\",\n", + " node_id=\"vertex\",\n", + " edge_source=\"src\",\n", + " edge_target=\"dst\",\n", + " edge_aggregate_col=None,\n", + " edge_render_type=\"direct\",\n", + " )\n" ] }, { @@ -206,23 +215,33 @@ "### Output" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "%%opts RGB [tools=[\"hover\"] width=800 height=800]\n", - "\n", - "r_direct = hv.Curve(connected, label=\"Direct\")\n", - "datashade(r_direct)" + "r_direct = None\n", + "if cux_spec is not None:\n", + " %opts RGB [tools=[\"hover\"] width=800 height=800]\n", + " r_direct = hv.Curve(connected, label=\"Direct\")\n", + " \n", + "r_direct" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Copyright (c) 2020 - 2022, NVIDIA CORPORATION.\n", + "Copyright (c) 2020 - 2023, NVIDIA CORPORATION.\n", "\n", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n", "\n", @@ -246,7 +265,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.10.12" }, "vscode": { "interpreter": { diff --git a/python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml b/python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml index a252d5e0c78..6961a485742 100644 --- a/python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml +++ b/python/cugraph-dgl/conda/cugraph_dgl_dev_cuda-118.yaml @@ -4,10 +4,10 @@ channels: - rapidsai - rapidsai-nightly - dask/label/dev -- conda-forge -- nvidia - pytorch - dglteam/label/cu118 +- conda-forge +- nvidia dependencies: - cugraph==23.8.* - dgl>=1.1.0.cu* diff --git a/python/cugraph-dgl/cugraph_dgl/dataloading/utils/sampling_helpers.py b/python/cugraph-dgl/cugraph_dgl/dataloading/utils/sampling_helpers.py index 02052c9841d..051464f08bb 100644 --- a/python/cugraph-dgl/cugraph_dgl/dataloading/utils/sampling_helpers.py +++ b/python/cugraph-dgl/cugraph_dgl/dataloading/utils/sampling_helpers.py @@ -52,9 +52,11 @@ def _get_tensor_ls_from_sampled_df(df): batch_indices = torch.searchsorted(batch_id_tensor, batch_indices) split_d = {} + for column in ["sources", "destinations", "edge_id", "hop_id"]: - tensor = cast_to_tensor(df[column]) - split_d[column] = torch.tensor_split(tensor, batch_indices.cpu()) + if column in df.columns: + tensor = cast_to_tensor(df[column]) + split_d[column] = torch.tensor_split(tensor, batch_indices.cpu()) result_tensor_ls = [] for i, hop_id_tensor in enumerate(split_d["hop_id"]): @@ -66,7 +68,11 @@ def _get_tensor_ls_from_sampled_df(df): hop_indices = torch.searchsorted(hop_id_tensor, hop_indices) s = torch.tensor_split(split_d["sources"][i], hop_indices.cpu()) d = torch.tensor_split(split_d["destinations"][i], hop_indices.cpu()) - eid = torch.tensor_split(split_d["edge_id"][i], hop_indices.cpu()) + if "edge_id" in split_d: + eid = torch.tensor_split(split_d["edge_id"][i], hop_indices.cpu()) + else: + eid = [None] * len(s) + result_tensor_ls.append((x, y, z) for x, y, z in zip(s, d, eid)) return result_tensor_ls @@ -125,7 +131,7 @@ def _create_homogeneous_sampled_graphs_from_tensors_perhop( def create_homogeneous_dgl_block_from_tensors_ls( src_ids: torch.Tensor, dst_ids: torch.Tensor, - edge_ids: torch.Tensor, + edge_ids: Optional[torch.Tensor], seed_nodes: Optional[torch.Tensor], total_number_of_nodes: int, ): @@ -133,7 +139,8 @@ def create_homogeneous_dgl_block_from_tensors_ls( (src_ids, dst_ids), num_nodes=total_number_of_nodes, ) - sampled_graph.edata[dgl.EID] = edge_ids + if edge_ids is not None: + sampled_graph.edata[dgl.EID] = edge_ids # TODO: Check if unique is needed if seed_nodes is None: seed_nodes = dst_ids.unique() @@ -144,7 +151,8 @@ def create_homogeneous_dgl_block_from_tensors_ls( src_nodes=src_ids.unique(), include_dst_in_src=True, ) - block.edata[dgl.EID] = sampled_graph.edata[dgl.EID] + if edge_ids is not None: + block.edata[dgl.EID] = sampled_graph.edata[dgl.EID] return block diff --git a/python/cugraph/cugraph/gnn/data_loading/bulk_sampler_io.py b/python/cugraph/cugraph/gnn/data_loading/bulk_sampler_io.py index 44c1185bbf1..002b214e783 100644 --- a/python/cugraph/cugraph/gnn/data_loading/bulk_sampler_io.py +++ b/python/cugraph/cugraph/gnn/data_loading/bulk_sampler_io.py @@ -51,6 +51,8 @@ def _write_samples_to_parquet( raise ValueError("Invalid value of partition_info") max_batch_id = offsets.batch_id.max() + results.dropna(axis=1, how="all", inplace=True) + results["hop_id"] = results["hop_id"].astype("uint8") for p in range(0, len(offsets), batches_per_partition): offsets_p = offsets.iloc[p : p + batches_per_partition]