Skip to content

Commit

Permalink
Remove official support for CUDA 11.0 and 11.1
Browse files Browse the repository at this point in the history
This is to support using newer CCCL (cuda 11.0 not supported) and simplify the pyflamegpu distribution matrix (11.1).

11.0 is currently builds and passes tests on linux, but does not build on windows.
11.1 currently builds and passes tests on both.

Workarounds and warning specific to these versions are not being removed just incase, and camek will only warn but not error if they are used (as the currently work, just incase 11.2+ is not available somwhere).

Also fixes some typos as and when encountered
  • Loading branch information
ptheywood committed Dec 1, 2023
1 parent 3801865 commit d31e54a
Show file tree
Hide file tree
Showing 11 changed files with 36 additions and 30 deletions.
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/bug_report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ body:
attributes:
label: CUDA Versions
description:
placeholder: e.g. CUDA 11.0, CUDA 12.2
placeholder: e.g. CUDA 11.2, CUDA 12.2
validations:
required: false
- type: input
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/CMake.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
# Multiplicative build matrix
matrix:
cudacxx:
- cuda: "11.0"
- cuda: "11.2"
cuda_arch: "35"
hostcxx: gcc-8
os: ubuntu-20.04
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/Draft-Release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ jobs:
cuda_arch: "35-real;90-real;90-virtual"
hostcxx: gcc-9
os: ubuntu-20.04
- cuda: "11.0"
- cuda: "11.2"
cuda_arch: "35-real;80-real;80-virtual"
hostcxx: gcc-8
os: ubuntu-20.04
Expand Down Expand Up @@ -202,7 +202,7 @@ jobs:
cuda_arch: "35-real;90-real;90-virtual"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
- cuda: "11.0.3"
- cuda: "11.2.2"
cuda_arch: "35-real;80-real;80-virtual"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/Ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ jobs:
cuda_arch: "35"
hostcxx: gcc-11
os: ubuntu-22.04
- cuda: "11.0"
- cuda: "11.2"
cuda_arch: "35"
hostcxx: gcc-8
os: ubuntu-20.04
Expand All @@ -56,15 +56,15 @@ jobs:
exclude:
# Exclude VIS=ON for oldest cuda.
- cudacxx:
cuda: "11.0"
cuda: "11.2"
VISUALISATION: "ON"
# Exclude beltsoff builds for old cuda's
- cudacxx:
cuda: "11.8"
config:
name: "Beltsoff"
- cudacxx:
cuda: "11.0"
cuda: "11.2"
config:
name: "Beltsoff"
# Exclude beltsoff vis builds to keep the matrix lighter.
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/Windows-Tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
cuda_arch: "35"
hostcxx: "Visual Studio 17 2022"
os: windows-2022
- cuda: "11.0.3"
- cuda: "11.2.2"
cuda_arch: "35"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/Windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ jobs:
cuda_arch: "35"
hostcxx: "Visual Studio 17 2022"
os: windows-2022
- cuda: "11.0.3"
- cuda: "11.2.2"
cuda_arch: "35"
hostcxx: "Visual Studio 16 2019"
os: windows-2019
Expand All @@ -56,15 +56,15 @@ jobs:
exclude:
# Exclude VIS=ON for oldest cuda.
- cudacxx:
cuda: "11.0.3"
cuda: "11.2.2"
VISUALISATION: "ON"
# Exclude beltsoff builds for old cuda's
- cudacxx:
cuda: "11.8.0"
config:
name: "Beltsoff"
- cudacxx:
cuda: "11.0.3"
cuda: "11.2.2"
config:
name: "Beltsoff"
# Exclude beltsoff vis builds to keep the matrix lighter.
Expand Down
17 changes: 11 additions & 6 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,20 +49,25 @@ if(CMAKE_CUDA_COMPILER)
flamegpu_set_cuda_architectures()
endif()

# Set the minimum supported version of CUDA for FLAME GPU, currently 11.0
set(MINIMUM_SUPPORTED_CUDA_VERSION 11.0)
# Set the minimum, usable, but deprecated CUDA version. Currently there are no deprecated versions
# Set the minimum supported version of CUDA for FLAME GPU, currently 11.2
set(MINIMUM_SUPPORTED_CUDA_VERSION 11.2)
# Set the minimum, potentially usable, but unsupported CUDA version.
# Currently 11.0 on linux and 11.1 on windows (due to CCCL support).
# CUDA 11.1 is not supported to simplify python releases
set(MINIMUM_CUDA_VERSION 11.0)
if(WIN32)
set(MINIMUM_CUDA_VERSION 11.1)
endif()

# If the CUDA compiler is too old, trigger a docs only build.
if(CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_CUDA_VERSION})
set(DOCUMENTATION_ONLY_BUILD ON)
message(STATUS "Documentation-only build: CUDA ${MINIMUM_SUPPORTED_CUDA_VERSION} or greater is required for compilation.")
endif()

# If the CUDA compiler is atleast the minimum deprecated version, but less than the minimum actually supported version, issue a dev warning.
# If the CUDA compiler is at least the minimum (unsupported) version, but less than the minimum actually supported version, issue a warning.
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL ${MINIMUM_CUDA_VERSION} AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_SUPPORTED_CUDA_VERSION})
message(DEPRECATION "Support for CUDA verisons <= ${MINIMUM_SUPPORTED_CUDA_VERSION} is deprecated and will be removed in a future release.")
message(WARNING "CUDA versions >= ${MINIMUM_CUDA_VERSION} && < ${MINIMUM_SUPPORTED_CUDA_VERSION} are unsupported buy may work on some platforms.")
endif()

# If CUDA is not available, or the minimum version is too low only build the docs.
Expand All @@ -78,7 +83,7 @@ endif()
# include for dependent modules
include(CMakeDependentOption)

# Option to enable building all examples, defaults to ON if FLAMEPGU is the top level cmake, else OFF
# Option to enable building all examples, defaults to ON if FLAMEGPU is the top level cmake, else OFF
cmake_dependent_option(FLAMEGPU_BUILD_ALL_EXAMPLES "Enable building all FLAMEGPU examples" ON "FLAMEGPU_PROJECT_IS_TOP_LEVEL" OFF)

# Options to enable building individual examples, if FLAMEGPU_BUILD_ALL_EXAMPLES is off.
Expand Down
7 changes: 2 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ Building FLAME GPU has the following requirements. There are also optional depen

+ [CMake](https://cmake.org/download/) `>= 3.18`
+ `>= 3.20` if building python bindings using a multi-config generator (Visual Studio, Eclipse or Ninja Multi-Config)
+ [CUDA](https://developer.nvidia.com/cuda-downloads) `>= 11.0` and a [Compute Capability](https://developer.nvidia.com/cuda-gpus) `>= 3.5` NVIDIA GPU.
+ [CUDA](https://developer.nvidia.com/cuda-downloads) `>= 11.2` and a [Compute Capability](https://developer.nvidia.com/cuda-gpus) `>= 3.5` NVIDIA GPU.
+ C++17 capable C++ compiler (host), compatible with the installed CUDA version
+ [Microsoft Visual Studio 2019 or 2022](https://visualstudio.microsoft.com/) (Windows)
+ *Note:* Visual Studio must be installed before the CUDA toolkit is installed. See the [CUDA installation guide for Windows](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html) for more information.
Expand Down Expand Up @@ -242,7 +242,7 @@ Several environmental variables are used or required by FLAME GPU 2.

| Environment Variable | Description |
|--------------------------------------|-------------|
| `CUDA_PATH` | Required when using RunTime Compilation (RTC), pointing to the root of the CUDA Toolkit where NVRTC resides. <br /> i.e. `/usr/local/cuda-11.0/` or `C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.0`. <br /> Alternatively `CUDA_HOME` may be used if `CUDA_PATH` was not set. |
| `CUDA_PATH` | Required when using RunTime Compilation (RTC), pointing to the root of the CUDA Toolkit where NVRTC resides. <br /> i.e. `/usr/local/cuda-11.2/` or `C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2`. <br /> Alternatively `CUDA_HOME` may be used if `CUDA_PATH` was not set. |
| `FLAMEGPU_INC_DIR` | When RTC compilation is required, if the location of the `include` directory cannot be found it must be specified using the `FLAMEGPU_INC_DIR` environment variable. |
| `FLAMEGPU_TMP_DIR` | FLAME GPU may cache some files to a temporary directory on the system, using the temporary directory returned by [`std::filesystem::temp_directory_path`](https://en.cppreference.com/w/cpp/filesystem/temp_directory_path). The location can optionally be overridden using the `FLAMEGPU_TMP_DIR` environment variable. |
| `FLAMEGPU_RTC_INCLUDE_DIRS` | A list of include directories that should be provided to the RTC compiler, these should be separated using `;` (Windows) or `:` (Linux). If this variable is not found, the working directory will be used as a default. |
Expand Down Expand Up @@ -362,6 +362,3 @@ For a full list of known issues pleases see the [Issue Tracker](https://github.c
+ Warnings and a loss of performance due to hash collisions in device code ([#356](https://github.com/FLAMEGPU/FLAMEGPU2/issues/356))
+ Multiple known areas where performance can be improved (e.g. [#449](https://github.com/FLAMEGPU/FLAMEGPU2/issues/449), [#402](https://github.com/FLAMEGPU/FLAMEGPU2/issues/402))
+ Windows/MSVC builds using CUDA 11.0 may encounter errors when performing incremental builds if the static library has been recompiled. If this presents itself, re-save any `.cu` file in your executable producing project and re-trigger the build.
+ Debug builds under linux with CUDA 11.0 may encounter cuda errors during `validateIDCollisions`. Consider using an alternate CUDA version if this is required ([#569](https://github.com/FLAMEGPU/FLAMEGPU2/issues/569)).
+ CUDA 11.0 with GCC 9 may encounter a segmentation fault during compilation of the test suite. Consider using GCC 8 with CUDA 11.0.
2 changes: 1 addition & 1 deletion cmake/CUDAArchitectures.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ function(flamegpu_set_cuda_architectures)
endif()
message(AUTHOR_WARNING
" ${CMAKE_CURRENT_FUNCTION} failed to parse NVCC --help output for default architecture generation\n"
" Using ${default_archs} based on CUDA 11.0 to 11.8."
" Using ${default_archs} based on CUDA 11.2 to 11.8."
)
endif()
# We actually want real for each arch, then virtual for the final, but only for library-provided values, to only embed one arch worth of ptx.
Expand Down
14 changes: 9 additions & 5 deletions cmake/common.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -134,22 +134,26 @@ if(FLAMEGPU_ENABLE_NVTX)
endif()
endif(FLAMEGPU_ENABLE_NVTX)

# Set the minimum supported cuda version, if not already set. Currently duplicated due to docs only build logic.
# CUDA 11.0 is current minimum cuda version, and the minimum supported
# Set the minimum unsupported and minimum supported cuda version, if not already set.
# Currently duplicated due to docs only build logic.
# CUDA 11.0/11.1 is current minimum (unsupported but usable) cuda version
if(NOT DEFINED MINIMUM_CUDA_VERSION)
set(MINIMUM_CUDA_VERSION 11.0)
if(WIN32)
set(MINIMUM_CUDA_VERSION 11.1)
endif()
# Require a minimum cuda version
if(CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_CUDA_VERSION})
message(FATAL_ERROR "CUDA version must be at least ${MINIMUM_CUDA_VERSION}")
endif()
endif()
# CUDA 11.0 is the current minimum supported version.
# CUDA 11.2 is the current minimum supported version.
if(NOT DEFINED MINIMUM_SUPPORTED_CUDA_VERSION)
set(MINIMUM_SUPPORTED_CUDA_VERSION 11.0)
set(MINIMUM_SUPPORTED_CUDA_VERSION 11.2)
# Warn on deprecated cuda version.
# If the CUDA compiler is atleast the minimum deprecated version, but less than the minimum actually supported version, issue a dev warning.
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL ${MINIMUM_CUDA_VERSION} AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_SUPPORTED_CUDA_VERSION})
message(DEPRECATION "Support for CUDA verisons <= ${MINIMUM_SUPPORTED_CUDA_VERSION} is deprecated and will be removed in a future release.")
message(WARNING "CUDA versions >= ${MINIMUM_CUDA_VERSION} && < ${MINIMUM_SUPPORTED_CUDA_VERSION} are unsupported buy may work on some platforms.")
endif()
endif()

Expand Down
4 changes: 2 additions & 2 deletions src/flamegpu/detail/compute_capability.cu
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ std::vector<int> compute_capability::getNVRTCSupportedComputeCapabilties() {
}
// If any of the above functions failed, we have no idea what arch's are supported, so assume none are?
return {};
// Older CUDA's do not support this, but this is simple to hard-code for CUDA 11.0/11.1 (and our deprected CUDA 10.x).
// CUDA 11.1 suports 35 to 86
// Older CUDA's do not support this, but this is simple to hard-code for CUDA 11.0/11.1 (and our CUDA 10.x).
// CUDA 11.1 supports 35 to 86
#elif (__CUDACC_VER_MAJOR__ == 11) && __CUDACC_VER_MINOR__ == 1
return {35, 37, 50, 52, 53, 60, 61, 62, 70, 72, 75, 80, 86};
// CUDA 11.0 supports 35 to 80
Expand Down

0 comments on commit d31e54a

Please sign in to comment.