Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Fix build issue with USE_CUDNN=0 #11470

Merged
merged 23 commits into from
Jul 12, 2018
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,17 @@ try {
}
}
},
'GPU: MKLDNN_CUDNNOFF': {
node('mxnetlinux-cpu') {
ws('workspace/build-mkldnn-gpu-nocudnn') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_mkldnn_nocudnn', false)
pack_lib('mkldnn_gpu_nocudnn', mx_mkldnn_lib)
}
}
}
},
'GPU: CUDA9.1+cuDNN7': {
node('mxnetlinux-cpu') {
ws('workspace/build-gpu') {
Expand Down Expand Up @@ -645,6 +656,20 @@ try {
}
}
},
'Python3: MKLDNN-GPU-NOCUDNN': {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it needed to test python2 and python3 or is one enough?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

one is enough , i will remove python2.

node('mxnetlinux-gpu') {
ws('workspace/ut-python3-mkldnn-gpu-nocudnn') {
try {
init_git()
unpack_lib('mkldnn_gpu_nocudnn', mx_mkldnn_lib)
python3_gpu_ut('ubuntu_gpu')
publish_test_coverage()
} finally {
collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_mkldnn_gpu_nocudnn.xml')
}
}
}
},
'Python3: CentOS 7 CPU': {
node('mxnetlinux-cpu') {
ws('workspace/build-centos7-cpu') {
Expand Down
18 changes: 18 additions & 0 deletions ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,24 @@ build_ubuntu_gpu_mkldnn() {
report_ccache_usage
}

build_ubuntu_gpu_mkldnn_nocudnn() {
set -ex

build_ccache_wrappers

make \
DEV=1 \
USE_CPP_PACKAGE=1 \
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need for the cpp package here since we don't run the cpp tests

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not sure about this . shouldnt we test if it builds fine with CUDNN with CPP_PACKAGE just in case cpp package depends on the flag to use cudnn specific functionality (unlikely) ?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The CPP package just generates wrappers around the C-API and should not rely on backend-specific things like CUDA or CUDNN. Thus, I'd say that we should keep it separate and only build the CPP package if we're actually going to run it (which we do in another test)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay, removed!

USE_BLAS=openblas \
USE_MKLDNN=1 \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=0 \
-j$(nproc)

report_ccache_usage
}

build_ubuntu_gpu_cuda91_cudnn7() {
set -ex
# unfortunately this build has problems in 3rdparty dependencies with ccache and make
Expand Down
12 changes: 6 additions & 6 deletions src/operator/nn/convolution.cu
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ void ConvolutionCompute<gpu>(const nnvm::NodeAttrs& attrs,
int dtype = inputs[conv::kData].type_flag_;

#if CUDNN_MAJOR < 5
if (param_.layout.value() != kNCW &&
param_.layout.value() != kNCHW &&
param_.layout.value() != kNCDHW) {
if (param.layout.value() != kNCW &&
param.layout.value() != kNCHW &&
param.layout.value() != kNCDHW) {
// Need CuDNN > 5.0 for layout support. use MXNet implementation
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
ConvolutionOp<gpu, DType> op;
Expand Down Expand Up @@ -168,9 +168,9 @@ void ConvolutionGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
int dtype = out_grad.type_flag_;

#if CUDNN_MAJOR < 5
if (param_.layout.value() != kNCW &&
param_.layout.value() != kNCHW &&
param_.layout.value() != kNCDHW) {
if (param.layout.value() != kNCW &&
param.layout.value() != kNCHW &&
param.layout.value() != kNCDHW) {
// Need CuDNN > 5.0 for layout support. use MXNet implementation
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
ConvolutionOp<gpu, DType> op;
Expand Down