Skip to content

Commit

Permalink
Merge pull request #1137 from pytorch/circleci-editor/245/circleci-pr…
Browse files Browse the repository at this point in the history
…oject-setup

CI/CD setup
  • Loading branch information
Wei authored Jun 22, 2022
2 parents 52e686e + d2375fc commit 30cd67c
Show file tree
Hide file tree
Showing 3 changed files with 282 additions and 12 deletions.
96 changes: 96 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
# Use the latest 2.1 version of CircleCI pipeline process engine.
# See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1

# Define a job to be invoked later in a workflow.
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
jobs:
build:
machine:
# Primary container image where all steps run.
# image: nvcr.io/nvidia/tensorrt:22.01-py3 # does not work with customized image
# https://circleci.com/docs/2.0/configuration-reference#available-linux-gpu-images
image: ubuntu-2004-cuda-11.4:202110-01
resource_class: gpu.nvidia.large
steps:
- checkout
- run:
name: install cudnn + tensorrt + bazel
command: |
cd ~
OS=ubuntu2004
CUDNN_VERSION=8.2.1.*-1+cuda11.3
TRT_VERSION=8.2.4-1+cuda11.4
BAZEL_VERSION=5.1.1
wget https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin
sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/7fa2af80.pub
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 536F8F1DE80F6A35
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/ /"
sudo apt-get update
sudo apt-get install libcudnn8=${CUDNN_VERSION}
sudo apt-get install libcudnn8-dev=${CUDNN_VERSION}
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/{OS}/x86_64/3bf863cc.pub
sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/ /"
sudo apt-get update
sudo apt-get install libnvinfer8=${TRT_VERSION} libnvonnxparsers8=${TRT_VERSION} libnvparsers8=${TRT_VERSION} libnvinfer-plugin8=${TRT_VERSION} libnvinfer-dev=${TRT_VERSION} libnvonnxparsers-dev=${TRT_VERSION} libnvparsers-dev=${TRT_VERSION} libnvinfer-plugin-dev=${TRT_VERSION} python3-libnvinfer=${TRT_VERSION}
# check available version, apt list libnvinfer8 -a
sudo wget -q https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-linux-x86_64 -O /usr/bin/bazel
sudo chmod a+x /usr/bin/bazel
- run:
name: set up python environment
command: |
pip3 install nvidia-pyindex
pip3 install nvidia-tensorrt==8.2.4.2
pip3 install --pre torch==1.13.0.dev20220618 torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu113
pip3 install pytest parameterized expecttest
# install torch_tensorrt
mv WORKSPACE.ci WORKSPACE
cd py
python3 setup.py install
# install fx2trt
# cd py/torch_tensorrt/fx/setup
# python3 setup.py install
- run:
name: run fx2trt tests
command: |
# one fix pending to enable below
# cd py/torch_tensorrt/fx/test
# pytest $(find . -name '*.py' | grep -v test_dispatch* | grep -v test_setitem*)
cd py/torch_tensorrt/fx/test
pushd converters/acc_op
pytest
popd
pushd passes
list_passes=$(ls | grep -v test_setitem*)
pytest $list_passes
popd
pushd core
pytest
popd
# pushd quant
# pytest
# popd
pushd tools
pytest
popd
pushd trt_lower
pytest
popd
pushd tracer
list_tracer=$(ls | grep -v test_dispatch_*)
pytest $list_tracer
popd
# Invoke jobs via workflows
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
workflows:
build_run:
jobs:
- build
147 changes: 147 additions & 0 deletions WORKSPACE.ci
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
workspace(name = "Torch-TensorRT")

load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

http_archive(
name = "rules_python",
sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz",
)

load("@rules_python//python:pip.bzl", "pip_install")

http_archive(
name = "rules_pkg",
sha256 = "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
"https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
],
)

load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")

rules_pkg_dependencies()

git_repository(
name = "googletest",
commit = "703bd9caab50b139428cea1aaff9974ebee5742e",
remote = "https://github.com/google/googletest",
shallow_since = "1570114335 -0400",
)

# External dependency for torch_tensorrt if you already have precompiled binaries.
local_repository(
name = "torch_tensorrt",
path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt"
)

# CUDA should be installed on the system locally
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
path = "/usr/local/cuda/",
)

new_local_repository(
name = "cublas",
build_file = "@//third_party/cublas:BUILD",
path = "/usr",
)
#############################################################################################################
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
#############################################################################################################

#http_archive(
# name = "libtorch",
# build_file = "@//third_party/libtorch:BUILD",
# sha256 = "8d9e829ce9478db4f35bdb7943308cf02e8a2f58cf9bb10f742462c1d57bf287",
# strip_prefix = "libtorch",
# urls = ["https://download.pytorch.org/libtorch/cu113/libtorch-cxx11-abi-shared-with-deps-1.11.0%2Bcu113.zip"],
#)
#
#http_archive(
# name = "libtorch_pre_cxx11_abi",
# build_file = "@//third_party/libtorch:BUILD",
# sha256 = "90159ecce3ff451f3ef3f657493b6c7c96759c3b74bbd70c1695f2ea2f81e1ad",
# strip_prefix = "libtorch",
# urls = ["https://download.pytorch.org/libtorch/cu113/libtorch-shared-with-deps-1.11.0%2Bcu113.zip"],
#)

# Download these tarballs manually from the NVIDIA website
# Either place them in the distdir directory in third_party and use the --distdir flag
# or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz

#http_archive(
# name = "cudnn",
# build_file = "@//third_party/cudnn/archive:BUILD",
# sha256 = "0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7",
# strip_prefix = "cuda",
# urls = [
# "https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.2.4/11.4_20210831/cudnn-11.4-linux-x64-v8.2.4.15.tgz",
# ],
#)
#
#http_archive(
# name = "tensorrt",
# build_file = "@//third_party/tensorrt/archive:BUILD",
# sha256 = "826180eaaecdf9a7e76116855b9f1f3400ea9b06e66b06a3f6a0747ba6f863ad",
# strip_prefix = "TensorRT-8.2.4.2",
# urls = [
# "https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.2.4/tars/tensorrt-8.2.4.2.linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz",
# ],
#)

####################################################################################
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
####################################################################################

# NOTE: In the case you are using just the pre-cxx11-abi path or just the cxx11 abi path
# with your local libtorch, just point deps at the same path to satisfy bazel.

# NOTE: NVIDIA's aarch64 PyTorch (python) wheel file uses the CXX11 ABI unlike PyTorch's standard
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
# for both versions here and do not use --config=pre-cxx11-abi

new_local_repository(
name = "libtorch",
path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
build_file = "third_party/libtorch/BUILD"
)

new_local_repository(
name = "libtorch_pre_cxx11_abi",
path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
build_file = "third_party/libtorch/BUILD"
)

new_local_repository(
name = "cudnn",
path = "/usr/",
build_file = "@//third_party/cudnn/local:BUILD"
)

new_local_repository(
name = "tensorrt",
path = "/usr/",
build_file = "@//third_party/tensorrt/local:BUILD"
)

# #########################################################################
# # Testing Dependencies (optional - comment out on aarch64)
# #########################################################################
# pip_install(
# name = "torch_tensorrt_py_deps",
# requirements = "//py:requirements.txt",
# )

# pip_install(
# name = "py_test_deps",
# requirements = "//tests/py:requirements.txt",
# )

pip_install(
name = "pylinter_deps",
requirements = "//tools/linter:requirements.txt",
)
51 changes: 39 additions & 12 deletions py/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,14 @@
JETPACK_VERSION = None

__version__ = '1.2.0a0'

FX_ONLY = False

def get_git_revision_short_hash() -> str:
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip()

if "--fx-only" in sys.argv:
FX_ONLY = True
sys.argv.remove("--fx-only")

if "--release" not in sys.argv:
__version__ = __version__ + "+" + get_git_revision_short_hash()
Expand Down Expand Up @@ -138,11 +141,14 @@ def finalize_options(self):
develop.finalize_options(self)

def run(self):
global CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)
gen_version_file()
copy_libtorchtrt()
develop.run(self)
if FX_ONLY:
develop.run(self)
else:
global CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)
gen_version_file()
copy_libtorchtrt()
develop.run(self)


class InstallCommand(install):
Expand All @@ -155,11 +161,14 @@ def finalize_options(self):
install.finalize_options(self)

def run(self):
global CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
gen_version_file()
copy_libtorchtrt()
install.run(self)
if FX_ONLY:
install.run(self)
else:
global CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
gen_version_file()
copy_libtorchtrt()
install.run(self)


class BdistCommand(bdist_wheel):
Expand Down Expand Up @@ -254,6 +263,23 @@ def run(self):
] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
undef_macros=["NDEBUG"])
]
if FX_ONLY:
ext_modules=None
packages=[
"torch_tensorrt.fx",
"torch_tensorrt.fx.converters",
"torch_tensorrt.fx.passes",
"torch_tensorrt.fx.tools",
"torch_tensorrt.fx.tracer.acc_tracer",
]
package_dir={
"torch_tensorrt.fx": "torch_tensorrt/fx",
"torch_tensorrt.fx.converters": "torch_tensorrt/fx/converters",
"torch_tensorrt.fx.passes": "torch_tensorrt/fx/passes",
"torch_tensorrt.fx.tools": "torch_tensorrt/fx/tools",
"torch_tensorrt.fx.tracer.acc_tracer": "torch_tensorrt/fx/tracer/acc_tracer",
}


with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
Expand Down Expand Up @@ -282,7 +308,8 @@ def run(self):
},
zip_safe=False,
license="BSD",
packages=find_packages(),
packages=packages if FX_ONLY else find_packages(),
package_dir=package_dir if FX_ONLY else {},
classifiers=[
"Development Status :: 5 - Stable", "Environment :: GPU :: NVIDIA CUDA",
"License :: OSI Approved :: BSD License", "Intended Audience :: Developers",
Expand Down

0 comments on commit 30cd67c

Please sign in to comment.