Skip to content

Commit

Permalink
* Upgrade presets for DNNL 2.5.2, CUDA 11.6.0, DepthAI 2.14.1
Browse files Browse the repository at this point in the history
  • Loading branch information
saudet committed Jan 14, 2022
1 parent 2880891 commit fb6afbd
Show file tree
Hide file tree
Showing 90 changed files with 1,709 additions and 200 deletions.
7 changes: 4 additions & 3 deletions .github/actions/deploy-centos/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,13 @@ runs:
if [[ "$CI_DEPLOY_PLATFORM" == "linux-x86_64" ]] && [[ -n ${CI_DEPLOY_NEED_CUDA:-} ]]; then
echo Installing CUDA, cuDNN, etc
curl -LO https://developer.download.nvidia.com/compute/cuda/11.5.1/local_installers/cuda-repo-rhel7-11-5-local-11.5.1_495.29.05-1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/11.6.0/local_installers/cuda-repo-rhel7-11-6-local-11.6.0_510.39.01-1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libnccl-2.11.4-1+cuda11.5.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libnccl-devel-2.11.4-1+cuda11.5.x86_64.rpm
rpm -i --force --ignorearch --nodeps cuda-repo-rhel7-11-5-local-11.5.1_495.29.05-1.x86_64.rpm libnccl*.rpm
pushd /var/cuda-repo-rhel7-11-5-local/; rpm -i --force --ignorearch --nodeps cuda*.rpm libc*.rpm libn*.rpm; rm *.rpm; popd
rpm -i --force --ignorearch --nodeps cuda-repo-rhel7-11-6-local-11.6.0_510.39.01-1.x86_64.rpm libnccl*.rpm
pushd /var/cuda-repo-rhel7-11-6-local/; rpm -i --force --ignorearch --nodeps cuda*.rpm libc*.rpm libn*.rpm; rm *.rpm; popd
ln -sf /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/libcuda.so
ln -sf /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/local/cuda/lib64/libnvidia-ml.so
tar -hxvf cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz --strip-components=1 -C /usr/local/cuda/
Expand All @@ -85,6 +85,7 @@ runs:
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.3
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.4
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.5
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.6
cp /usr/local/cuda/lib64/stubs/libcuda.so /usr/lib64/libcuda.so
cp /usr/local/cuda/lib64/stubs/libcuda.so /usr/lib64/libcuda.so.1
cp /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/lib64/libnvidia-ml.so
Expand Down
11 changes: 6 additions & 5 deletions .github/actions/deploy-ubuntu/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,21 +14,21 @@ runs:
export ARCH=arm64
export ARCH2=sbsa
export PREFIX=aarch64-linux-gnu
export CUDA=cuda-repo-rhel8-11-5-local-11.5.1_495.29.05-1.aarch64.rpm
export CUDA=cuda-repo-rhel8-11-6-local-11.6.0_510.39.01-1.aarch64.rpm
export CUDNN=cudnn-linux-sbsa-8.3.2.44_cuda11.5-archive.tar.xz
export NCCL=2.11.4-1+cuda11.5.aarch64
elif [[ "$CI_DEPLOY_PLATFORM" == "linux-ppc64le" ]]; then
export ARCH=ppc64el
export ARCH2=ppc64le
export PREFIX=powerpc64le-linux-gnu
export CUDA=cuda-repo-rhel8-11-5-local-11.5.1_495.29.05-1.ppc64le.rpm
export CUDA=cuda-repo-rhel8-11-6-local-11.6.0_510.39.01-1.ppc64le.rpm
export CUDNN=cudnn-linux-ppc64le-8.3.2.44_cuda11.5-archive.tar.xz
export NCCL=2.11.4-1+cuda11.5.ppc64le
elif [[ "$CI_DEPLOY_PLATFORM" == "linux-x86_64" ]]; then
export ARCH=amd64
export ARCH2=x86_64
export PREFIX=x86_64-linux-gnu
export CUDA=cuda-repo-rhel8-11-5-local-11.5.1_495.29.05-1.x86_64.rpm
export CUDA=cuda-repo-rhel8-11-6-local-11.6.0_510.39.01-1.x86_64.rpm
export CUDNN=cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive.tar.xz
export NCCL=2.11.4-1+cuda11.5.x86_64
fi
Expand Down Expand Up @@ -110,13 +110,13 @@ runs:
if [[ "$CI_DEPLOY_PLATFORM" == "linux-arm64" ]] || [[ "$CI_DEPLOY_PLATFORM" == "linux-ppc64le" ]] && [[ -n ${CI_DEPLOY_NEED_CUDA:-} ]]; then
echo Installing CUDA, cuDNN, etc
curl -LO https://developer.download.nvidia.com/compute/cuda/11.5.1/local_installers/$CUDA
curl -LO https://developer.download.nvidia.com/compute/cuda/11.6.0/local_installers/$CUDA
curl -LO https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/$CUDNN
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel8/$ARCH2/libnccl-$NCCL.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel8/$ARCH2/libnccl-devel-$NCCL.rpm
rpm -i --force --ignorearch --nodeps $CUDA libnccl*.rpm
pushd /var/cuda-repo-rhel8-11-5-local/; rpm -i --force --ignorearch --nodeps cuda*.rpm libc*.rpm libn*.rpm; rm *.rpm; popd
pushd /var/cuda-repo-rhel8-11-6-local/; rpm -i --force --ignorearch --nodeps cuda*.rpm libc*.rpm libn*.rpm; rm *.rpm; popd
ln -sf /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/libcuda.so
ln -sf /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/local/cuda/lib64/libnvidia-ml.so
tar -hxvf $CUDNN --strip-components=1 -C /usr/local/cuda/
Expand All @@ -133,6 +133,7 @@ runs:
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.3
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.4
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.5
ln -s libcudart.so.11.0 /usr/local/cuda/lib64/libcudart.so.11.6
cp /usr/local/cuda/lib64/stubs/libcuda.so /usr/lib64/libcuda.so
cp /usr/local/cuda/lib64/stubs/libcuda.so /usr/lib64/libcuda.so.1
cp /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/lib64/libnvidia-ml.so
Expand Down
34 changes: 24 additions & 10 deletions .github/actions/deploy-windows/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ runs:
cd /d %USERPROFILE%
echo Installing MSYS2
C:\msys64\usr\bin\bash -lc "pacman -S --needed --noconfirm base-devel git tar pkg-config unzip p7zip zip autoconf autoconf-archive automake make patch gnupg"
C:\msys64\usr\bin\bash -lc "pacman -S --needed --noconfirm pkg-config"
C:\msys64\usr\bin\bash -lc "pacman -S --needed --noconfirm base-devel git tar unzip p7zip zip autoconf autoconf-archive automake make patch gnupg"
C:\msys64\usr\bin\bash -lc "pacman -S --needed --noconfirm mingw-w64-x86_64-nasm mingw-w64-x86_64-toolchain mingw-w64-x86_64-libtool mingw-w64-x86_64-gcc mingw-w64-i686-gcc mingw-w64-x86_64-gcc-fortran mingw-w64-i686-gcc-fortran mingw-w64-x86_64-libwinpthread-git mingw-w64-i686-libwinpthread-git mingw-w64-x86_64-SDL2 mingw-w64-i686-SDL2 mingw-w64-x86_64-ragel"
set "PATH=C:\msys64\usr\bin;%PATH%"
Expand Down Expand Up @@ -71,17 +72,18 @@ runs:
if "%CI_DEPLOY_PLATFORM%"=="windows-x86_64" if not "%CI_DEPLOY_NEED_CUDA%"=="" (
echo Installing CUDA, cuDNN, etc
curl -LO https://developer.download.nvidia.com/compute/cuda/11.5.1/local_installers/cuda_11.5.1_496.13_windows.exe
curl -LO https://developer.download.nvidia.com/compute/cuda/11.6.0/local_installers/cuda_11.6.0_511.23_windows.exe
curl -LO https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive.zip
cuda_11.5.1_496.13_windows.exe -s
cuda_11.6.0_511.23_windows.exe -s
unzip cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive.zip
move cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive\bin\*.dll "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5\bin"
move cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive\include\*.h "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5\include"
move cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive\lib\*.lib "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5\lib\x64"
move cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive\bin\*.dll "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6\bin"
move cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive\include\*.h "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6\include"
move cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive\lib\*.lib "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6\lib\x64"
echo Applying hotfix to Visual Studio 2019 for CUDA
curl -LO https://raw.githubusercontent.com/microsoft/STL/main/stl/inc/cmath
bash -c "find 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/' -name cmath -exec cp -v cmath {} \;"
bash -c "sed -i '/device_segmented_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/include/cub/cub.cuh'"
)
if "%CI_DEPLOY_MODULE%"=="nvcodec" (
Expand Down Expand Up @@ -178,13 +180,25 @@ runs:
C:/msys64/mingw64/bin/bazel.exe version
)
if exist "%ProgramFiles%\NVIDIA GPU Computing Toolkit" (
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5"
set "CUDA_PATH_V11_5=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5"
set "PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5\libnvvp;%PATH%"
echo CUDA Version 11.5.1>"%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.5\version.txt"
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6"
set "CUDA_PATH_V11_6=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6"
set "PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6\libnvvp;%PATH%"
echo CUDA Version 11.6.0>"%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v11.6\version.txt"
)
set "PATH=C:\msys64\%MSYSTEM%\bin;C:\msys64\usr\bin;%PATH%"
where bash
where git
where cl
where gcc
where cmake
where gradle
where mvn
where gpg
where python
where python3
where clang-cl
bash --version
git --version
cl
Expand Down
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
* Map C++ JIT classes and functions of TorchScript in presets for PyTorch ([issue #1068](https://github.com/bytedeco/javacpp-presets/issues/1068))
* Synchronize `cachePackage()` and prevent repeated package caching in all presets ([pull #1071](https://github.com/bytedeco/javacpp-presets/pull/1071))
* Build FFmpeg with VA-API enabled and bundle its libraries to avoid loading issues ([issue bytedeco/javacv#1188](https://github.com/bytedeco/javacv/issues/1188))
* Upgrade presets for OpenCV 4.5.5, FFmpeg 4.4.1, librealsense2 2.50.0, Arrow 6.0.1, MKL 2022.0, DNNL 2.5.1, OpenBLAS 0.3.19, FFTW 3.3.10, CPython 3.10.1, NumPy 1.22.0, SciPy 1.8.0, Gym 0.21.0, LLVM 13.0.0, libpostal 1.1, Leptonica 1.82.0, Tesseract 5.0.1, CUDA 11.5.1, cuDNN 8.3.2, NCCL 2.11.4, MXNet 1.9.0, PyTorch 1.10.1, TensorFlow Lite 2.7.0, TensorRT 8.2.2.1, ALE 0.7.3, DepthAI 2.14.0, ONNX 1.10.2, ONNX Runtime 1.10.0, TVM 0.8.0, ModSecurity 3.0.6, and their dependencies
* Upgrade presets for OpenCV 4.5.5, FFmpeg 4.4.1, librealsense2 2.50.0, Arrow 6.0.1, MKL 2022.0, DNNL 2.5.2, OpenBLAS 0.3.19, FFTW 3.3.10, CPython 3.10.1, NumPy 1.22.0, SciPy 1.8.0, Gym 0.21.0, LLVM 13.0.0, libpostal 1.1, Leptonica 1.82.0, Tesseract 5.0.1, CUDA 11.6.0, cuDNN 8.3.2, NCCL 2.11.4, MXNet 1.9.0, PyTorch 1.10.1, TensorFlow Lite 2.7.0, TensorRT 8.2.2.1, ALE 0.7.3, DepthAI 2.14.1, ONNX 1.10.2, ONNX Runtime 1.10.0, TVM 0.8.0, ModSecurity 3.0.6, and their dependencies

### August 2, 2021 version 1.5.6
* Change `opencv_core.Mat` constructors to create column vectors out of arrays for consistency ([issue #1064](https://github.com/bytedeco/javacpp-presets/issues/1064))
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip
* Tesseract 5.0.x https://github.com/tesseract-ocr/tesseract
* Caffe 1.0 https://github.com/BVLC/caffe
* OpenPose 1.7.0 https://github.com/CMU-Perceptual-Computing-Lab/openpose
* CUDA 11.5.x https://developer.nvidia.com/cuda-downloads
* CUDA 11.6.x https://developer.nvidia.com/cuda-downloads
* cuDNN 8.3.x https://developer.nvidia.com/cudnn
* NCCL 2.11.x https://developer.nvidia.com/nccl
* NVIDIA Video Codec SDK 11.1.x https://developer.nvidia.com/nvidia-video-codec-sdk
Expand Down
2 changes: 1 addition & 1 deletion caffe/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>11.5-8.3-1.5.7-SNAPSHOT</version>
<version>11.6-8.3-1.5.7-SNAPSHOT</version>
</dependency>

</dependencies>
Expand Down
2 changes: 1 addition & 1 deletion caffe/samples/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>11.5-8.3-1.5.7-SNAPSHOT</version>
<version>11.6-8.3-1.5.7-SNAPSHOT</version>
</dependency>

</dependencies>
Expand Down
6 changes: 3 additions & 3 deletions cuda/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:

* CUDA 11.5.1 https://developer.nvidia.com/cuda-zone
* CUDA 11.6.0 https://developer.nvidia.com/cuda-zone
* cuDNN 8.3.2 https://developer.nvidia.com/cudnn
* NCCL 2.11.4 https://developer.nvidia.com/nccl

Expand Down Expand Up @@ -64,14 +64,14 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform</artifactId>
<version>11.5-8.3-1.5.7-SNAPSHOT</version>
<version>11.6-8.3-1.5.7-SNAPSHOT</version>
</dependency>

<!-- Additional dependencies to use bundled CUDA, cuDNN, and NCCL -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>11.5-8.3-1.5.7-SNAPSHOT</version>
<version>11.6-8.3-1.5.7-SNAPSHOT</version>
</dependency>

</dependencies>
Expand Down
2 changes: 1 addition & 1 deletion cuda/platform/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform</artifactId>
<version>11.5-8.3-${project.parent.version}</version>
<version>11.6-8.3-${project.parent.version}</version>
<name>JavaCPP Presets Platform for CUDA</name>

<properties>
Expand Down
2 changes: 1 addition & 1 deletion cuda/platform/redist/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>11.5-8.3-${project.parent.version}</version>
<version>11.6-8.3-${project.parent.version}</version>
<name>JavaCPP Presets Platform Redist for CUDA</name>

<properties>
Expand Down
2 changes: 1 addition & 1 deletion cuda/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>cuda</artifactId>
<version>11.5-8.3-${project.parent.version}</version>
<version>11.6-8.3-${project.parent.version}</version>
<name>JavaCPP Presets for CUDA</name>

<dependencies>
Expand Down
4 changes: 2 additions & 2 deletions cuda/samples/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform</artifactId>
<version>11.5-8.3-1.5.7-SNAPSHOT</version>
<version>11.6-8.3-1.5.7-SNAPSHOT</version>
</dependency>

<!-- Additional dependencies to use bundled CUDA, cuDNN, and NCCL -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>11.5-8.3-1.5.7-SNAPSHOT</version>
<version>11.6-8.3-1.5.7-SNAPSHOT</version>
</dependency>

</dependencies>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
// Targeted by JavaCPP version 1.5.7-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.cuda.cudart;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;

import static org.bytedeco.cuda.global.cudart.*;



/**
* CUDA array memory requirements
*/
@Properties(inherit = org.bytedeco.cuda.presets.cudart.class)
public class CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public CUDA_ARRAY_MEMORY_REQUIREMENTS_v1() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public CUDA_ARRAY_MEMORY_REQUIREMENTS_v1(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public CUDA_ARRAY_MEMORY_REQUIREMENTS_v1(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 position(long position) {
return (CUDA_ARRAY_MEMORY_REQUIREMENTS_v1)super.position(position);
}
@Override public CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 getPointer(long i) {
return new CUDA_ARRAY_MEMORY_REQUIREMENTS_v1((Pointer)this).offsetAddress(i);
}

/** Total required memory size */
public native @Cast("size_t") long size(); public native CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 size(long setter);
/** alignment requirement */
public native @Cast("size_t") long alignment(); public native CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 alignment(long setter);
public native @Cast("unsigned int") int reserved(int i); public native CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 reserved(int i, int setter);
@MemberGetter public native @Cast("unsigned int*") IntPointer reserved();
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import static org.bytedeco.cuda.global.cudart.*;



/**
* CUDA Resource descriptor
*/
Expand Down
11 changes: 11 additions & 0 deletions cuda/src/gen/java/org/bytedeco/cuda/cudart/CUstreamCallback.java
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,17 @@
import static org.bytedeco.cuda.global.cudart.*;













/**
* CUDA stream callback
* @param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
// Targeted by JavaCPP version 1.5.7-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.cuda.cudart;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;

import static org.bytedeco.cuda.global.cudart.*;



/**
* CUDA array and CUDA mipmapped array memory requirements
*/
@Properties(inherit = org.bytedeco.cuda.presets.cudart.class)
public class cudaArrayMemoryRequirements extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public cudaArrayMemoryRequirements() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public cudaArrayMemoryRequirements(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public cudaArrayMemoryRequirements(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public cudaArrayMemoryRequirements position(long position) {
return (cudaArrayMemoryRequirements)super.position(position);
}
@Override public cudaArrayMemoryRequirements getPointer(long i) {
return new cudaArrayMemoryRequirements((Pointer)this).offsetAddress(i);
}

/** Total size of the array. */
public native @Cast("size_t") long size(); public native cudaArrayMemoryRequirements size(long setter);
/** Alignment necessary for mapping the array. */
public native @Cast("size_t") long alignment(); public native cudaArrayMemoryRequirements alignment(long setter);
public native @Cast("unsigned int") int reserved(int i); public native cudaArrayMemoryRequirements reserved(int i, int setter);
@MemberGetter public native @Cast("unsigned int*") IntPointer reserved();
}
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,4 @@ public class cudaDeviceProp extends Pointer {
public native int accessPolicyMaxWindowSize(); public native cudaDeviceProp accessPolicyMaxWindowSize(int setter);
/** Shared memory reserved by CUDA driver per block in bytes */
public native @Cast("size_t") long reservedSharedMemPerBlock(); public native cudaDeviceProp reservedSharedMemPerBlock(long setter);



}
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,8 @@ public class cudaTextureDesc extends Pointer {
* Disable any trilinear filtering optimizations.
*/
public native int disableTrilinearOptimization(); public native cudaTextureDesc disableTrilinearOptimization(int setter);
/**
* Enable seamless cube map filtering.
*/
public native int seamlessCubemap(); public native cudaTextureDesc seamlessCubemap(int setter);
}
Loading

0 comments on commit fb6afbd

Please sign in to comment.