Skip to content

Commit

Permalink
[TEST] Checkin test docker and scripts (#48)
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed May 29, 2018
1 parent 3b47db7 commit e04779b
Show file tree
Hide file tree
Showing 29 changed files with 524 additions and 20 deletions.
126 changes: 126 additions & 0 deletions nnvm/Jenkinsfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
#!groovy
// -*- mode: groovy -*-
// Jenkins pipeline
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/

// nnvm libraries
nnvm_lib = "tvm/lib/libtvm.so, tvm/lib/libtvm_runtime.so, lib/libnnvm_top.so, config.mk"

// command to start a docker container
docker_run = 'tests/ci_build/ci_build.sh'
// timeout in minutes
max_time = 60

// initialize source codes
def init_git() {
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
sh 'git submodule update --init --recursive'
}
}
}

def init_git_win() {
checkout scm
retry(5) {
timeout(time: 2, unit: 'MINUTES') {
bat 'git submodule update --init --recursive'
}
}
}

stage("Sanity Check") {
timeout(time: max_time, unit: 'MINUTES') {
node('linux') {
ws('workspace/tvm/sanity') {
init_git()
sh "${docker_run} lint ./tests/scripts/task_lint.sh"
}
}
}
}

// Run make. First try to do an incremental make from a previous workspace in hope to
// accelerate the compilation. If something wrong, clean the workspace and then
// build from scratch.
def make(docker_type, make_flag) {
timeout(time: max_time, unit: 'MINUTES') {
try {
sh "${docker_run} ${docker_type} ./tests/script/task_build.sh ${make_flag}"
} catch (exc) {
echo 'Incremental compilation failed. Fall back to build from scratch'
sh "${docker_run} ${docker_type} ./tests/script/task_clean.sh"
sh "${docker_run} ${docker_type} ./tests/script/task_build.sh ${make_flag}"
}
}
}

// pack libraries for later use
def pack_lib(name, libs) {
sh """
echo "Packing ${libs} into ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
stash includes: libs, name: name
}


// unpack libraries saved before
def unpack_lib(name, libs) {
unstash name
sh """
echo "Unpacked ${libs} from ${name}"
echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
"""
}

stage('Build') {
timeout(time: max_time, unit: 'MINUTES') {
node('GPU' && 'linux') {
ws('workspace/nnvm/build-gpu') {
init_git()
make('gpu', '-j2')
pack_lib('gpu', nnvm_lib)
}
}
}
}

stage('Tests') {
parallel 'python': {
node('GPU' && 'linux') {
ws('workspace/nnvm/it-python-gpu') {
init_git()
unpack_lib('gpu', nnvm_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} gpu ./tests/scripts/task_python_test.sh"
sh "${docker_run} gpu ./tests/scripts/task_frontend_test.sh"
}
}
}
},
'docs': {
node('GPU' && 'linux') {
ws('workspace/nnvm/docs-python-gpu') {
init_git()
unpack_lib('gpu', nnvm_lib)
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} gpu ./tests/scripts/task_python_docs.sh"
}
pack_lib('mydocs', 'docs.tgz')
}
}
}
}

stage('Deploy') {
node('docker' && 'doc') {
ws('workspace/nnvm/deploy-docs') {
if (env.BRANCH_NAME == "master") {
unpack_lib('mydocs', 'docs.tgz')
sh "tar xf docs.tgz -C /var/docs"
}
}
}
}
6 changes: 4 additions & 2 deletions nnvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ PLUGIN_OBJ =
include $(NNVM_PLUGINS)

# specify tensor path
.PHONY: clean all test lint pylint doc cython cython3 cyclean
.PHONY: clean all test lint cpplint pylint doc cython cython3 cyclean

UNAME_S := $(shell uname -s)

Expand Down Expand Up @@ -87,7 +87,9 @@ cython3:
cyclean:
rm -rf python/nnvm/*/*.so python/nnvm/*/*.dylib python/nnvm/*/*.cpp

lint: pylint
lint: pylint cpplint

cpplint:
python dmlc-core/scripts/lint.py nnvm cpp include src

pylint:
Expand Down
2 changes: 1 addition & 1 deletion nnvm/include/nnvm/compiler/op_attr_types.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*!
* Copyright (c) 2017 by Contributors
* \file op_attr_types.h
* \file nnvm/compiler/op_attr_types.h
* \brief The Expr and related elements in DataFlow construction.
*/
#ifndef NNVM_COMPILER_OP_ATTR_TYPES_H_
Expand Down
2 changes: 1 addition & 1 deletion nnvm/include/nnvm/op_attr_types.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*!
* Copyright (c) 2016 by Contributors
* \file op_attr_types.h
* \file nnvm/op_attr_types.h
* \brief Data structures that can appear in operator attributes.
*/
#ifndef NNVM_OP_ATTR_TYPES_H_
Expand Down
4 changes: 3 additions & 1 deletion nnvm/python/nnvm/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
""" ctypes library of nnvm and helper functions """
from __future__ import absolute_import

import os
import sys
import ctypes
import numpy as np
Expand Down Expand Up @@ -44,7 +45,8 @@ def _load_lib():
__version__ = libinfo.__version__
# library instance of nnvm
_LIB = _load_lib()

# The FFI mode of TVM
_FFI_MODE = os.environ.get("TVM_FFI", "auto")

# type definitions
nn_uint = ctypes.c_uint
Expand Down
13 changes: 8 additions & 5 deletions nnvm/python/nnvm/symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,24 @@

from numbers import Number as _Number
from . import _base
from ._base import _LIB, check_call as _check_call
from ._base import _LIB, check_call as _check_call, _FFI_MODE
from .attribute import AttrScope
from . import _symbol_internal as _internal

# Use different verison of SymbolBase
# When possible, use cython to speedup part of computation.

IMPORT_EXCEPT = RuntimeError if _FFI_MODE == "cython" else ImportError

try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from ._ctypes.symbol import SymbolBase, _init_symbol_module
elif _sys.version_info >= (3, 0):
if _FFI_MODE == "ctypes":
raise ImportError()
if _sys.version_info >= (3, 0):
from ._cy3.symbol import SymbolBase, _init_symbol_module
else:
from ._cy2.symbol import SymbolBase, _init_symbol_module
except ImportError:
except IMPORT_EXCEPT:
# pylint: disable=wrong-import-position
from ._ctypes.symbol import SymbolBase, _init_symbol_module


Expand Down
3 changes: 3 additions & 0 deletions nnvm/python/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
from distutils.core import setup

def config_cython():
# temporary disable cython for now
# as NNVM uses local DLL build
return []
try:
from Cython.Build import cythonize
from distutils.extension import Extension
Expand Down
44 changes: 44 additions & 0 deletions nnvm/tests/ci_build/Dockerfile.gpu
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
FROM nvidia/cuda:8.0-cudnn7-devel

# Base scripts
RUN apt-get update --fix-missing

COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
RUN bash /install/ubuntu_install_core.sh

COPY install/ubuntu_install_python.sh /install/ubuntu_install_python.sh
RUN bash /install/ubuntu_install_python.sh

COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
RUN bash /install/ubuntu_install_llvm.sh

COPY install/ubuntu_install_opencl.sh /install/ubuntu_install_opencl.sh
RUN bash /install/ubuntu_install_opencl.sh

COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
RUN bash /install/ubuntu_install_python_package.sh

COPY install/ubuntu_install_sphinx.sh /install/ubuntu_install_sphinx.sh
RUN bash /install/ubuntu_install_sphinx.sh

# Fix recommonmark to latest version
RUN git clone https://github.com/rtfd/recommonmark
RUN cd recommonmark; python setup.py install

# Enable doxygen for c++ doc build
RUN apt-get update && apt-get install -y doxygen graphviz libprotobuf-dev protobuf-compiler

# DL Frameworks
COPY install/ubuntu_install_mxnet.sh /install/ubuntu_install_mxnet.sh
RUN bash /install/ubuntu_install_mxnet.sh

COPY install/ubuntu_install_onnx.sh /install/ubuntu_install_onnx.sh
RUN bash /install/ubuntu_install_onnx.sh

# Environment variables
ENV PATH=/usr/local/nvidia/bin:${PATH}
ENV PATH=/usr/local/cuda/bin:${PATH}
ENV CPLUS_INCLUDE_PATH=/usr/local/cuda/include:${CPLUS_INCLUDE_PATH}
ENV C_INCLUDE_PATH=/usr/local/cuda/include:${C_INCLUDE_PATH}
ENV LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LIBRARY_PATH}
ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
6 changes: 6 additions & 0 deletions nnvm/tests/ci_build/Dockerfile.lint
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# For lint test
FROM ubuntu:16.04

RUN apt-get update && apt-get install -y python-pip sudo
RUN apt-get install -y doxygen graphviz
RUN pip install cpplint pylint
36 changes: 36 additions & 0 deletions nnvm/tests/ci_build/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# CI Build Scripts

This directory contains the files and setup instructions to run all tests.

## Run locally

To run locally, we need to first install
[docker](https://docs.docker.com/engine/installation/) and
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker/wiki).

Then we can run the tasks defined in the [Jenkinsfile](../../Jenkinsfile) by
using (`ci_build.sh`)[./ci_build.sh]. For example

- lint the python codes

```bash
./ci_build.sh lint make pylint
```

- build codes with CUDA supports

```bash
./ci_build.sh gpu tests/scripts/task_build.sh
```

- do the python unittest

```bash
./ci_build.sh gpu tests/scripts/task_python_test.sh
```

- build the documents. The results will be available at `docs/_build/html`

```bash
tests/ci_build/ci_build.sh gpu tests/scripts/task_python_docs.sh
```
Loading

0 comments on commit e04779b

Please sign in to comment.