Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Add test pipeline for USE_TVM_OP=OFF on Unix #16450

Merged
merged 7 commits into from
Oct 19, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 71 additions & 0 deletions ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -522,6 +522,29 @@ build_ubuntu_cpu_cmake_debug() {
popd
}

build_ubuntu_cpu_cmake_no_tvm_op() {
set -ex
pushd .
cd /work/build
build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DUSE_CUDA=OFF \
-DUSE_TVM_OP=OFF \
-DPython3_EXECUTABLE=/usr/bin/python3 \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_OPENMP=OFF \
-DUSE_OPENCV=ON \
-DUSE_SIGNAL_HANDLER=ON \
-DCMAKE_BUILD_TYPE=Release \
-G Ninja \
/work/mxnet

ninja -v
popd
}

build_ubuntu_cpu_cmake_asan() {
set -ex

Expand Down Expand Up @@ -793,6 +816,27 @@ build_ubuntu_gpu_cuda101_cudnn7() {
make cython PYTHON=python3
}

build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op() {
set -ex
build_ccache_wrappers
make \
DEV=1 \
USE_BLAS=openblas \
USE_MKLDNN=0 \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_TVM_OP=0 \
USE_CPP_PACKAGE=1 \
USE_DIST_KVSTORE=1 \
CUDA_ARCH="$CI_CUDA_COMPUTE_CAPABILITIES" \
USE_SIGNAL_HANDLER=1 \
-j$(nproc)

make cython PYTHON=python2
make cython PYTHON=python3
}

build_ubuntu_amalgamation() {
set -ex
# Amalgamation can not be run with -j nproc
Expand Down Expand Up @@ -865,6 +909,33 @@ build_ubuntu_gpu_cmake() {
ninja -v
}

build_ubuntu_gpu_cmake_no_tvm_op() {
set -ex
cd /work/build
build_ccache_wrappers
cmake \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \
-DUSE_SIGNAL_HANDLER=ON \
-DUSE_CUDA=ON \
-DUSE_CUDNN=ON \
-DUSE_TVM_OP=OFF \
-DPython3_EXECUTABLE=/usr/bin/python3 \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLML_MKL=OFF \
-DUSE_MKLDNN=OFF \
-DUSE_DIST_KVSTORE=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCUDA_ARCH_NAME=Manual \
-DCUDA_ARCH_BIN=$CI_CMAKE_CUDA_ARCH_BIN \
-DBUILD_CYTHON_MODULES=1 \
-G Ninja \
/work/mxnet

ninja -v
}

build_ubuntu_cpu_large_tensor() {
set -ex
cd /work/build
Expand Down
75 changes: 75 additions & 0 deletions ci/jenkins/Jenkins_steps.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,15 @@ mx_pip = 'build/*.whl'

// mxnet cmake libraries, in cmake builds we do not produce a libnvvm static library by default.
mx_cmake_lib = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so'
mx_cmake_lib_no_tvm_op = 'build/libmxnet.so, build/libmxnet.a, build/libsample_lib.so, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so'
mx_cmake_lib_cython = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so, python/mxnet/_cy2/*.so, python/mxnet/_cy3/*.so'
// mxnet cmake libraries, in cmake builds we do not produce a libnvvm static library by default.
mx_cmake_lib_debug = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/libsample_lib.so, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests'
mx_cmake_mkldnn_lib = 'build/libmxnet.so, build/libmxnet.a, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/3rdparty/dmlc-core/libdmlc.a, build/tests/mxnet_unit_tests, build/3rdparty/openmp/runtime/src/libomp.so, build/3rdparty/mkldnn/src/libmkldnn.so.0'
mx_mkldnn_lib = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, libsample_lib.so, lib/libiomp5.so, lib/libmkldnn.so.0, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a'
mx_tensorrt_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, lib/libnvonnxparser_runtime.so.0, lib/libnvonnxparser.so.0, lib/libonnx_proto.so, lib/libonnx.so'
mx_lib_cpp_examples = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, libsample_lib.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/*, python/mxnet/_cy2/*.so, python/mxnet/_cy3/*.so'
mx_lib_cpp_examples_no_tvm_op = 'lib/libmxnet.so, lib/libmxnet.a, libsample_lib.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/*, python/mxnet/_cy2/*.so, python/mxnet/_cy3/*.so'
mx_lib_cpp_examples_cpu = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/cpp-package/example/*'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mseth10 Can you take a look at all the libsample_lib.so's? Some of them have a path like "build/libsample_lib.so" and others are just "libsample_lib.so"

Copy link
Contributor

@mseth10 mseth10 Oct 15, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I took a look and let @reminisce know offline. mx_cmake_lib_no_tvm_op is the binary set corresponding to a cmake build and hence we need to put it in a build/ directory.


// Python unittest for CPU
Expand Down Expand Up @@ -133,6 +135,20 @@ def compile_unix_openblas_debug_cpu() {
}]
}

def compile_unix_openblas_cpu_no_tvm_op() {
return ['CPU: Openblas, cmake, TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/build-cpu-openblas-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_cpu', 'build_ubuntu_cpu_cmake_no_tvm_op', false)
utils.pack_lib('cpu_openblas_no_tvm_op', mx_cmake_lib_no_tvm_op)
}
}
}
}]
}

def compile_unix_int64_cpu() {
return ['CPU: USE_INT64_TENSOR_SIZE': {
node(NODE_LINUX_CPU) {
Expand Down Expand Up @@ -245,6 +261,20 @@ def compile_unix_full_gpu() {
}]
}

def compile_unix_full_gpu_no_tvm_op() {
return ['GPU: CUDA10.1+cuDNN7 TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/build-gpu-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_build_cuda', 'build_ubuntu_gpu_cuda101_cudnn7_no_tvm_op', false)
utils.pack_lib('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
}
}
}
}]
}

def compile_unix_cmake_mkldnn_gpu() {
return ['GPU: CMake MKLDNN': {
node(NODE_LINUX_CPU) {
Expand Down Expand Up @@ -273,6 +303,19 @@ def compile_unix_cmake_gpu() {
}]
}

def compile_unix_cmake_gpu_no_tvm_op() {
return ['GPU: CMake TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/build-cmake-gpu-no-tvm-op') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_cmake_no_tvm_op', false)
}
}
}
}]
}

def compile_unix_tensorrt_gpu() {
return ['TensorRT': {
node(NODE_LINUX_CPU) {
Expand Down Expand Up @@ -756,6 +799,22 @@ def test_unix_python3_gpu() {
}]
}

def test_unix_python3_gpu_no_tvm_op() {
return ['Python3: GPU TVM_OP OFF': {
node(NODE_LINUX_GPU) {
ws('workspace/ut-python3-gpu-no-tvm-op') {
try {
utils.unpack_and_init('gpu_no_tvm_op', mx_lib_cpp_examples_no_tvm_op)
python3_gpu_ut_cython('ubuntu_gpu_cu101')
utils.publish_test_coverage()
} finally {
utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_gpu.xml')
}
}
}
}]
}

def test_unix_python3_quantize_gpu() {
return ['Python3: Quantize GPU': {
node(NODE_LINUX_GPU_P3) {
Expand Down Expand Up @@ -790,6 +849,22 @@ def test_unix_python3_debug_cpu() {
}]
}

def test_unix_python3_cpu_no_tvm_op() {
return ['Python3: CPU TVM_OP OFF': {
node(NODE_LINUX_CPU) {
ws('workspace/ut-python3-cpu-no-tvm-op') {
try {
utils.unpack_and_init('cpu_openblas_no_tvm_op', mx_cmake_lib_no_tvm_op)
python3_ut('ubuntu_cpu')
} finally {
utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_cpu_no_tvm_op_unittest.xml')
utils.collect_test_results_unix('nosetests_quantization.xml', 'nosetests_python3_cpu_no_tvm_op_quantization.xml')
}
}
}
}]
}

def test_unix_python2_mkldnn_cpu() {
return ['Python2: MKLDNN-CPU': {
node(NODE_LINUX_CPU) {
Expand Down
4 changes: 3 additions & 1 deletion ci/jenkins/Jenkinsfile_unix_cpu
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ core_logic: {
custom_steps.compile_unix_mkl_cpu(),
custom_steps.compile_unix_mkldnn_cpu(),
custom_steps.compile_unix_mkldnn_mkl_cpu(),
custom_steps.compile_unix_int64_cpu()
custom_steps.compile_unix_int64_cpu(),
custom_steps.compile_unix_openblas_cpu_no_tvm_op(),
])

utils.parallel_stage('Tests', [
Expand Down Expand Up @@ -68,6 +69,7 @@ core_logic: {
* https://github.com/apache/incubator-mxnet/issues/11801
custom_steps.test_unix_distributed_kvstore_cpu()
*/
custom_steps.test_unix_python3_cpu_no_tvm_op(),
])
}
,
Expand Down
9 changes: 6 additions & 3 deletions ci/jenkins/Jenkinsfile_unix_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,10 @@ core_logic: {
custom_steps.compile_unix_cmake_mkldnn_gpu(),
custom_steps.compile_unix_cmake_gpu(),
custom_steps.compile_unix_tensorrt_gpu(),
custom_steps.compile_unix_int64_gpu()
])
custom_steps.compile_unix_int64_gpu(),
custom_steps.compile_unix_full_gpu_no_tvm_op(),
custom_steps.compile_unix_cmake_gpu_no_tvm_op(),
])

utils.parallel_stage('Tests', [
custom_steps.test_unix_python2_gpu(),
Expand All @@ -60,7 +62,8 @@ core_logic: {
custom_steps.test_unix_cpp_package_gpu(),
custom_steps.test_unix_scala_gpu(),
custom_steps.test_unix_distributed_kvstore_gpu(),
custom_steps.test_static_python_gpu()
custom_steps.test_static_python_gpu(),
custom_steps.test_unix_python3_gpu_no_tvm_op(),

// Disabled due to: https://github.com/apache/incubator-mxnet/issues/11407
//custom_steps.test_unix_caffe_gpu()
Expand Down