Skip to content

Commit

Permalink
Merge pull request #1329 from pytorch/python_tests
Browse files Browse the repository at this point in the history
refactor(//tests) : Refactor the test suite
  • Loading branch information
peri044 authored Sep 8, 2022
2 parents 99db0cd + af20761 commit 10b9ecd
Show file tree
Hide file tree
Showing 35 changed files with 713 additions and 335 deletions.
1 change: 1 addition & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,7 @@ commands:
mkdir -p /tmp/artifacts/test_results
cd tests/py
pytest --junitxml=/tmp/artifacts/test_results/api/api_test_results.xml api/
pytest --junitxml=/tmp/artifacts/test_results/models/models_test_results.xml models/
pytest --junitxml=/tmp/artifacts/test_results/integrations/integrations_test_results.xml integrations/
cd ~/project
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/docgen.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
- name: Set up Python 3.9.4
uses: actions/setup-python@v2
with:
python-version: 3.9.4
python-version: 3.9.4
- uses: actions/checkout@v2
with:
ref: ${{github.head_ref}}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/linter.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ jobs:
pip3 install -r $GITHUB_WORKSPACE/.github/scripts/requirements.txt
pip3 install -r $GITHUB_WORKSPACE/requirements-dev.txt
- name: Lint C++
run: |
run: |
cd $GITHUB_WORKSPACE
python3 $GITHUB_WORKSPACE/.github/scripts/run_cpp_linter.py
env:
Expand Down
96 changes: 39 additions & 57 deletions noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@
if USE_HOST_DEPS:
print("Using dependencies from host python")

# Set epochs to train VGG model for accuracy tests
EPOCHS = 25

SUPPORTED_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"]

nox.options.sessions = [
Expand Down Expand Up @@ -63,31 +66,6 @@ def install_torch_trt(session):
session.run("python", "setup.py", "develop")


def download_datasets(session):
print(
"Downloading dataset to path",
os.path.join(TOP_DIR, "examples/int8/training/vgg16"),
)
session.chdir(os.path.join(TOP_DIR, "examples/int8/training/vgg16"))
session.run_always(
"wget", "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz", external=True
)
session.run_always("tar", "-xvzf", "cifar-10-binary.tar.gz", external=True)
session.run_always(
"mkdir",
"-p",
os.path.join(TOP_DIR, "tests/accuracy/datasets/data"),
external=True,
)
session.run_always(
"cp",
"-rpf",
os.path.join(TOP_DIR, "examples/int8/training/vgg16/cifar-10-batches-bin"),
os.path.join(TOP_DIR, "tests/accuracy/datasets/data/cidar-10-batches-bin"),
external=True,
)


def train_model(session):
session.chdir(os.path.join(TOP_DIR, "examples/int8/training/vgg16"))
session.install("-r", "requirements.txt")
Expand All @@ -107,14 +85,14 @@ def train_model(session):
"--ckpt-dir",
"vgg16_ckpts",
"--epochs",
"25",
str(EPOCHS),
env={"PYTHONPATH": PYT_PATH},
)

session.run_always(
"python",
"export_ckpt.py",
"vgg16_ckpts/ckpt_epoch25.pth",
"vgg16_ckpts/ckpt_epoch" + str(EPOCHS) + ".pth",
env={"PYTHONPATH": PYT_PATH},
)
else:
Expand All @@ -130,10 +108,12 @@ def train_model(session):
"--ckpt-dir",
"vgg16_ckpts",
"--epochs",
"25",
str(EPOCHS),
)

session.run_always("python", "export_ckpt.py", "vgg16_ckpts/ckpt_epoch25.pth")
session.run_always(
"python", "export_ckpt.py", "vgg16_ckpts/ckpt_epoch" + str(EPOCHS) + ".pth"
)


def finetune_model(session):
Expand All @@ -156,17 +136,17 @@ def finetune_model(session):
"--ckpt-dir",
"vgg16_ckpts",
"--start-from",
"25",
str(EPOCHS),
"--epochs",
"26",
str(EPOCHS + 1),
env={"PYTHONPATH": PYT_PATH},
)

# Export model
session.run_always(
"python",
"export_qat.py",
"vgg16_ckpts/ckpt_epoch26.pth",
"vgg16_ckpts/ckpt_epoch" + str(EPOCHS + 1) + ".pth",
env={"PYTHONPATH": PYT_PATH},
)
else:
Expand All @@ -182,13 +162,17 @@ def finetune_model(session):
"--ckpt-dir",
"vgg16_ckpts",
"--start-from",
"25",
str(EPOCHS),
"--epochs",
"26",
str(EPOCHS + 1),
)

# Export model
session.run_always("python", "export_qat.py", "vgg16_ckpts/ckpt_epoch26.pth")
session.run_always(
"python",
"export_qat.py",
"vgg16_ckpts/ckpt_epoch" + str(EPOCHS + 1) + ".pth",
)


def cleanup(session):
Expand Down Expand Up @@ -219,6 +203,19 @@ def run_base_tests(session):
session.run_always("pytest", test)


def run_model_tests(session):
print("Running model tests")
session.chdir(os.path.join(TOP_DIR, "tests/py"))
tests = [
"models",
]
for test in tests:
if USE_HOST_DEPS:
session.run_always("pytest", test, env={"PYTHONPATH": PYT_PATH})
else:
session.run_always("pytest", test)


def run_accuracy_tests(session):
print("Running accuracy tests")
session.chdir(os.path.join(TOP_DIR, "tests/py"))
Expand Down Expand Up @@ -282,7 +279,7 @@ def run_dla_tests(session):
print("Running DLA tests")
session.chdir(os.path.join(TOP_DIR, "tests/py"))
tests = [
"test_api_dla.py",
"hw/test_api_dla.py",
]
for test in tests:
if USE_HOST_DEPS:
Expand Down Expand Up @@ -322,21 +319,19 @@ def run_l0_dla_tests(session):
cleanup(session)


def run_l1_accuracy_tests(session):
def run_l1_model_tests(session):
if not USE_HOST_DEPS:
install_deps(session)
install_torch_trt(session)
download_datasets(session)
train_model(session)
run_accuracy_tests(session)
download_models(session)
run_model_tests(session)
cleanup(session)


def run_l1_int8_accuracy_tests(session):
if not USE_HOST_DEPS:
install_deps(session)
install_torch_trt(session)
download_datasets(session)
train_model(session)
finetune_model(session)
run_int8_accuracy_tests(session)
Expand All @@ -347,9 +342,6 @@ def run_l2_trt_compatibility_tests(session):
if not USE_HOST_DEPS:
install_deps(session)
install_torch_trt(session)
download_models(session)
download_datasets(session)
train_model(session)
run_trt_compatibility_tests(session)
cleanup(session)

Expand All @@ -376,9 +368,9 @@ def l0_dla_tests(session):


@nox.session(python=SUPPORTED_PYTHON_VERSIONS, reuse_venv=True)
def l1_accuracy_tests(session):
"""Checking accuracy performance on various usecases"""
run_l1_accuracy_tests(session)
def l1_model_tests(session):
"""When a user needs to test the functionality of standard models compilation and results"""
run_l1_model_tests(session)


@nox.session(python=SUPPORTED_PYTHON_VERSIONS, reuse_venv=True)
Expand All @@ -397,13 +389,3 @@ def l2_trt_compatibility_tests(session):
def l2_multi_gpu_tests(session):
"""Makes sure that Torch-TensorRT can operate on multi-gpu systems"""
run_l2_multi_gpu_tests(session)


@nox.session(python=SUPPORTED_PYTHON_VERSIONS, reuse_venv=True)
def download_test_models(session):
"""Grab all the models needed for testing"""
try:
import torch
except ModuleNotFoundError:
install_deps(session)
download_models(session)
18 changes: 14 additions & 4 deletions py/torch_tensorrt/ptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,13 @@ def write_calibration_cache(self, cache):
return b""


# deepcopy (which involves pickling) is performed on the compile_spec internally during compilation.
# We register this __reduce__ function for pickler to identity the calibrator object returned by DataLoaderCalibrator during deepcopy.
# This should be the object's local name relative to the module https://docs.python.org/3/library/pickle.html#object.__reduce__
def __reduce__(self):
return self.__class__.__name__


class DataLoaderCalibrator(object):
"""
Constructs a calibrator class in TensorRT and uses pytorch dataloader to load/preproces
Expand Down Expand Up @@ -114,24 +121,27 @@ def __new__(cls, *args, **kwargs):
"get_batch": get_cache_mode_batch if use_cache else get_batch,
"read_calibration_cache": read_calibration_cache,
"write_calibration_cache": write_calibration_cache,
"__reduce__": __reduce__, # used when you deepcopy the DataLoaderCalibrator object
}

# Using type metaclass to construct calibrator class based on algorithm type
if algo_type == CalibrationAlgo.ENTROPY_CALIBRATION:
return type(
"DataLoaderCalibrator", (_C.IInt8EntropyCalibrator,), attribute_mapping
"Int8EntropyCalibrator", (_C.IInt8EntropyCalibrator,), attribute_mapping
)()
elif algo_type == CalibrationAlgo.ENTROPY_CALIBRATION_2:
return type(
"DataLoaderCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping
"Int8EntropyCalibrator2",
(_C.IInt8EntropyCalibrator2,),
attribute_mapping,
)()
elif algo_type == CalibrationAlgo.LEGACY_CALIBRATION:
return type(
"DataLoaderCalibrator", (_C.IInt8LegacyCalibrator,), attribute_mapping
"Int8LegacyCalibrator", (_C.IInt8LegacyCalibrator,), attribute_mapping
)()
elif algo_type == CalibrationAlgo.MINMAX_CALIBRATION:
return type(
"DataLoaderCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping
"Int8MinMaxCalibrator", (_C.IInt8MinMaxCalibrator,), attribute_mapping
)()
else:
log(
Expand Down
4 changes: 2 additions & 2 deletions py/torch_tensorrt/ts/_compile_spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def _parse_input_signature(input_signature: Any):


def _parse_compile_spec(compile_spec_: Dict[str, Any]) -> _ts_C.CompileSpec:
# TODO: Remove deep copy once collections does not need partial compilation
# TODO: Use deepcopy to support partial compilation of collections
compile_spec = deepcopy(compile_spec_)
info = _ts_C.CompileSpec()

Expand Down Expand Up @@ -301,7 +301,7 @@ def _parse_compile_spec(compile_spec_: Dict[str, Any]) -> _ts_C.CompileSpec:
compile_spec["enabled_precisions"]
)

if "calibrator" in compile_spec:
if "calibrator" in compile_spec and compile_spec["calibrator"]:
info.ptq_calibrator = compile_spec["calibrator"]

if "sparse_weights" in compile_spec:
Expand Down
2 changes: 1 addition & 1 deletion tests/core/lowering/test_module_fallback_passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,5 +124,5 @@ TEST(Lowering, LowerAndPartitionSimpleModuleFallbackCorrectly) {
}

auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99));
}
4 changes: 2 additions & 2 deletions tests/core/partitioning/test_fallback_graph_output.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ TEST(Partitioning, ComputeResNet50FallbackGraphCorrectly) {
auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();
auto trt_mod = torch_tensorrt::core::CompileGraph(mod, cfg);
auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99));
}

TEST(Partitioning, ComputeMobileNetFallbackGraphCorrectly) {
Expand Down Expand Up @@ -64,6 +64,6 @@ TEST(Partitioning, ComputeMobileNetFallbackGraphCorrectly) {
auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();
auto trt_mod = torch_tensorrt::core::CompileGraph(mod, cfg);
auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99));
}
#endif
8 changes: 4 additions & 4 deletions tests/cpp/test_collections.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ TEST(CppAPITests, TestCollectionStandardTensorInput) {
auto trt_mod = torch_tensorrt::torchscript::compile(mod, compile_settings);
auto trt_out = trt_mod.forward(inputs_);

ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(out.toTensor(), trt_out.toTensor(), 1e-5));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(out.toTensor(), trt_out.toTensor(), 0.99));
}

TEST(CppAPITests, TestCollectionTupleInput) {
Expand Down Expand Up @@ -85,7 +85,7 @@ TEST(CppAPITests, TestCollectionTupleInput) {
auto trt_mod = torch_tensorrt::torchscript::compile(mod, compile_settings);
auto trt_out = trt_mod.forward(complex_inputs);

ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(out.toTensor(), trt_out.toTensor(), 1e-5));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(out.toTensor(), trt_out.toTensor(), 0.99));
}

TEST(CppAPITests, TestCollectionListInput) {
Expand Down Expand Up @@ -144,7 +144,7 @@ TEST(CppAPITests, TestCollectionListInput) {
LOG_DEBUG("Finish compile");
auto trt_out = trt_mod.forward(complex_inputs);

ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(out.toTensor(), trt_out.toTensor(), 1e-5));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(out.toTensor(), trt_out.toTensor(), 0.99));
}

TEST(CppAPITests, TestCollectionTupleInputOutput) {
Expand Down Expand Up @@ -317,4 +317,4 @@ TEST(CppAPITests, TestCollectionComplexModel) {
out.toTuple()->elements()[0].toTensor(), trt_out.toTuple()->elements()[0].toTensor(), 1e-5));
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(
out.toTuple()->elements()[1].toTensor(), trt_out.toTuple()->elements()[1].toTensor(), 1e-5));
}
}
6 changes: 1 addition & 5 deletions tests/cpp/test_compiled_modules.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ TEST_P(CppAPITests, CompiledModuleIsClose) {

for (size_t i = 0; i < trt_results.size(); i++) {
ASSERT_TRUE(
torch_tensorrt::tests::util::almostEqual(jit_results[i], trt_results[i].reshape_as(jit_results[i]), threshold));
torch_tensorrt::tests::util::cosineSimEqual(jit_results[i], trt_results[i].reshape_as(jit_results[i]), 0.99));
}
}

Expand All @@ -52,11 +52,7 @@ INSTANTIATE_TEST_SUITE_P(
CompiledModuleForwardIsCloseSuite,
CppAPITests,
testing::Values(
PathAndInput({"tests/modules/resnet18_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}),
PathAndInput({"tests/modules/resnet50_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}),
PathAndInput({"tests/modules/mobilenet_v2_traced.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}),
PathAndInput({"tests/modules/resnet18_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}),
PathAndInput({"tests/modules/resnet50_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}),
PathAndInput({"tests/modules/mobilenet_v2_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 2e-5}),
PathAndInput({"tests/modules/efficientnet_b0_scripted.jit.pt", {{1, 3, 224, 224}}, {at::kFloat}, 8e-3}),
PathAndInput({"tests/modules/bert_base_uncased_traced.jit.pt", {{1, 14}, {1, 14}}, {at::kInt, at::kInt}, 8e-2}),
Expand Down
4 changes: 2 additions & 2 deletions tests/cpp/test_module_fallback.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ TEST(CppAPITest, ResNetModuleFallbacksCorrectly) {
auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();
auto trt_mod = torch_tensorrt::ts::compile(mod, cfg);
auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99));
}

TEST(CppAPITest, MobileNetModuleFallbacksCorrectlyWithOneEngine) {
Expand Down Expand Up @@ -69,6 +69,6 @@ TEST(CppAPITest, MobileNetModuleFallbacksCorrectlyWithOneEngine) {
ASSERT_TRUE(trt_count == 1);

auto trt_results = trt_mod.forward(trt_inputs_ivalues).toTensor();
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results, trt_results, 2e-6));
ASSERT_TRUE(torch_tensorrt::tests::util::cosineSimEqual(jit_results, trt_results, 0.99));
}
#endif
Loading

0 comments on commit 10b9ecd

Please sign in to comment.