Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove libcugraphops dependency and add CUDA 12 CI support #41

Merged
merged 8 commits into from
Jul 25, 2023
Merged
1 change: 0 additions & 1 deletion ci/release/update-version.sh
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ NEXT_SHORT_TAG_PEP440=$(python -c "from setuptools.extern import packaging; prin

DEPENDENCIES=(

libcugraphops
libraft
libraft-headers
librmm
Expand Down
7 changes: 6 additions & 1 deletion ci/test_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,18 @@ set -euo pipefail
. /opt/conda/etc/profile.d/conda.sh

ARCH=$(arch)
EXITCODE=0

if [ "${ARCH}" = "aarch64" ]; then
rapids-logger "Exiting aarch64 due to no pytorch-cuda"
exit ${EXITCODE}
fi

if [ "${RAPIDS_CUDA_VERSION:0:2}" == "12" ]; then
rapids-logger "Exiting CUDA 12 due to no pytorch stable yet"
exit ${EXITCODE}
fi

rapids-logger "Generate Python testing dependencies"
rapids-dependency-file-generator \
--output conda \
Expand Down Expand Up @@ -44,7 +50,6 @@ rapids-mamba-retry install \

rapids-logger "Check GPU usage"
nvidia-smi
EXITCODE=0
trap "EXITCODE=1" ERR
set +e

Expand Down
10 changes: 2 additions & 8 deletions conda/environments/all_cuda-118_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,10 @@ dependencies:
- gcc_linux-64=11.*
- gitpython
- graphviz
- gtest>=1.13.0
- gmock>=1.13.0
- ipykernel
- ipython
- libcugraphops=23.8.*
- libraft-headers=23.8.*
- libraft=23.8.*
- librmm=23.8.*
- libraft-headers==23.08.*
- librmm==23.08.*
- nanobind>=0.2.0
- nbsphinx
- nccl
Expand All @@ -51,6 +47,4 @@ dependencies:
- sphinx<6
- sphinxcontrib-websupport
- sysroot_linux-64=2.17
- unzip
- wget
name: all_cuda-118_arch-x86_64
49 changes: 49 additions & 0 deletions conda/environments/all_cuda-120_arch-x86_64.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- pytorch
- conda-forge
- nvidia
dependencies:
- breathe
- c-compiler
- clang-tools=16.0.0
- clangxx=16.0.0
- cmake>=3.23.1,!=3.25.0
- cuda-nvcc
- cuda-nvtx
- cuda-version=12.0
- cudatoolkit
- cudnn=8.4
- cxx-compiler
- cython
- doxygen=1.8.20
- gcc_linux-64=11.*
- gitpython
- graphviz
- ipykernel
- ipython
- libraft-headers==23.08.*
- librmm==23.08.*
- nanobind>=0.2.0
- nbsphinx
- nccl
- ninja
- numpy>=1.17
- numpydoc
- pre-commit
- pydata-sphinx-theme
- pytest
- pytest-forked
- pytest-xdist
- python>=3.9,<3.11
- recommonmark
- scikit-build
- sphinx-copybutton
- sphinx-markdown-tables
- sphinx<6
- sphinxcontrib-websupport
- sysroot_linux-64=2.17
name: all_cuda-120_arch-x86_64
5 changes: 4 additions & 1 deletion conda/recipes/libwholegraph/conda_build_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,13 @@ cxx_compiler_version:
- 11

cuda_compiler:
- cuda-nvcc

cuda11_compiler:
- nvcc

cmake_version:
- ">=3.23.1,!=3.25.0"
- ">=3.26.4"

doxygen_version:
- ">=1.8.11"
Expand Down
49 changes: 42 additions & 7 deletions conda/recipes/libwholegraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,19 +41,28 @@ build:
requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cuda') }} {{ cuda_version }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% else %}
- cuda-cudart-dev
- cuda-driver-dev
dongxuy04 marked this conversation as resolved.
Show resolved Hide resolved
{% endif %}
- cmake {{ cmake_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cmake {{ cmake_version }}
- cuda-nvtx ={{ cuda_version }}
- cudatoolkit ={{ cuda_version }}
- cuda-version ={{ cuda_version }}
- doxygen {{ doxygen_version }}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
- libcugraphops ={{ minor_version }}
- libraft ={{ minor_version }}
- libraft-headers ={{ minor_version }}
- librmm ={{ minor_version }}
Expand All @@ -72,9 +81,19 @@ outputs:
requirements:
build:
- cmake {{ cmake_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% else %}
- cuda-cudart-dev
- cuda-driver-dev
{% endif %}
run:
- cudatoolkit {{ cuda_spec }}
- libcugraphops ={{ minor_version }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- libraft ={{ minor_version }}
- libraft-headers ={{ minor_version }}
- librmm ={{ minor_version }}
Expand All @@ -91,13 +110,29 @@ outputs:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
requirements:
build:
- cmake {{ cmake_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% else %}
- cuda-cudart-dev
- cuda-driver-dev
{% endif %}
run:
- {{ pin_subpackage('libwholegraph', exact=True) }}
- cudatoolkit {{ cuda_spec }}
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- gmock {{ gtest_version }}
- gtest {{ gtest_version }}
about:
Expand Down
5 changes: 4 additions & 1 deletion conda/recipes/pylibwholegraph/conda_build_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,13 @@ cxx_compiler_version:
- 11

cuda_compiler:
- cuda-nvcc

cuda11_compiler:
- nvcc

cmake_version:
- ">=3.23.1,!=3.25.0"
- ">=3.26.4"

scikit_build_version:
- ">=0.13.1"
Expand Down
22 changes: 18 additions & 4 deletions conda/recipes/pylibwholegraph/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,23 @@ build:
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}

requirements:
build:
- {{ compiler('c') }}
- {{ compiler('cuda') }} {{ cuda_version }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- nccl
Expand All @@ -51,18 +61,22 @@ requirements:
- doxygen =1.8.20
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cudatoolkit ={{ cuda_version }}
- cuda-version ={{ cuda_version }}
- libwholegraph ={{ version }}
- nanobind >=0.2.0
- python
- scikit-build {{ scikit_build_version }}
run:
- {{ compiler('c') }}
- {{ compiler('cuda') }} {{ cuda_version }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- cmake {{ cmake_version }}
- ninja
- {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }}
- libwholegraph ={{ version }}
- python

Expand Down
12 changes: 11 additions & 1 deletion dependencies.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ files:
all:
output: [conda]
matrix:
cuda: ["11.5", "11.8"]
cuda: ["11.8", "12.0"]
arch: [x86_64]
includes:
- checks
Expand Down Expand Up @@ -98,6 +98,11 @@ dependencies:
cuda: "11.8"
packages:
- nvcc_linux-aarch64=11.8
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
- cuda-nvcc
cudatoolkit:
specific:
- output_types: conda
Expand All @@ -122,6 +127,11 @@ dependencies:
packages:
- cudatoolkit=11.8
- cuda-nvtx=11.8
- matrix:
cuda: "12.0"
packages:
- cuda-version=12.0
dongxuy04 marked this conversation as resolved.
Show resolved Hide resolved
- cuda-nvtx
checks:
common:
- output_types: [conda, requirements]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -351,13 +351,13 @@ cdef class GlobalContextWrapper:
if output_global_context:
self.output_global_context = <PyObject *> output_global_context
Py_INCREF(self.output_global_context)
self.env_func.temporary_fns.create_memory_context_fn = &python_cb_wrapper_temp_create_context
self.env_func.temporary_fns.destroy_memory_context_fn = &python_cb_wrapper_temp_destroy_context
self.env_func.temporary_fns.malloc_fn = &python_cb_wrapper_temp_malloc
self.env_func.temporary_fns.free_fn = &python_cb_wrapper_temp_free
self.env_func.temporary_fns.create_memory_context_fn = <wholememory_create_memory_context_func_t>&python_cb_wrapper_temp_create_context
self.env_func.temporary_fns.destroy_memory_context_fn = <wholememory_destroy_memory_context_func_t>&python_cb_wrapper_temp_destroy_context
self.env_func.temporary_fns.malloc_fn = <wholememory_malloc_func_t>&python_cb_wrapper_temp_malloc
self.env_func.temporary_fns.free_fn = <wholememory_free_func_t>&python_cb_wrapper_temp_free
self.env_func.temporary_fns.global_context = <PyObject *> self
self.env_func.output_fns.malloc_fn = &python_cb_wrapper_output_malloc
self.env_func.output_fns.free_fn = &python_cb_wrapper_output_free
self.env_func.output_fns.malloc_fn = <wholememory_malloc_func_t>&python_cb_wrapper_output_malloc
self.env_func.output_fns.free_fn = <wholememory_free_func_t>&python_cb_wrapper_output_free
self.env_func.output_fns.global_context = <PyObject *> self

cpdef int64_t get_env_fns(self):
Expand Down Expand Up @@ -952,7 +952,7 @@ cdef class PyWholeMemoryUniqueID:
dlm_tensor.manager_ctx = <void *> self
cpython.Py_INCREF(self)
dlm_tensor.deleter = deleter
return cpython.PyCapsule_New(dlm_tensor, 'dltensor', pycapsule_deleter)
return cpython.PyCapsule_New(dlm_tensor, 'dltensor', <cpython.PyCapsule_Destructor>&pycapsule_deleter)

def __dlpack_device__(self):
return (kDLCPU, 0)
Expand Down Expand Up @@ -1173,7 +1173,7 @@ cdef class PyWholeMemoryFlattenDlpack:
dlm_tensor.manager_ctx = <void *> self
cpython.Py_INCREF(self)
dlm_tensor.deleter = deleter
return cpython.PyCapsule_New(dlm_tensor, 'dltensor', pycapsule_deleter)
return cpython.PyCapsule_New(dlm_tensor, 'dltensor', <cpython.PyCapsule_Destructor> &pycapsule_deleter)

def __dlpack_device__(self):
if self.device_type == MlHost:
Expand Down