Skip to content

Commit

Permalink
enable trt test check and fix trt ut error(3/3) (PaddlePaddle#36581)
Browse files Browse the repository at this point in the history
  • Loading branch information
jiweibo authored and piotrekobi committed Nov 3, 2021
1 parent d8b343b commit fde2b70
Show file tree
Hide file tree
Showing 13 changed files with 87 additions and 21 deletions.
4 changes: 4 additions & 0 deletions paddle/fluid/framework/ir/graph_viz_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,14 @@ void GraphVizPass::ApplyImpl(ir::Graph* graph) const {
}
}
}
const std::string& optim_cache_dir = Get<std::string>("optim_cache_dir");
std::string program_bytes = program_desc.Proto()->SerializeAsString();
// rename from "17_ir_fc_fuse_pass.dot" to "fc_fuse_pass.pdmodel"
program_path =
graph_viz_path.substr(found1 + 4, found2 - found1 - 4) + ".pdmodel";
if (!optim_cache_dir.empty()) {
program_path = optim_cache_dir + "/" + program_path;
}
std::ofstream file(program_path.c_str(), std::ios::binary);
file.write(program_bytes.c_str(), program_bytes.size());
file.close();
Expand Down
14 changes: 11 additions & 3 deletions paddle/fluid/inference/analysis/ir_pass_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,18 @@ void IRPassManager::CreatePasses(Argument *argument,
auto pass = framework::ir::PassRegistry::Instance().Get(pass_name);

if (pass_name == "graph_viz_pass") {
std::string dot_file_path = std::to_string(pass_num) + "_ir_" +
(pre_pass.empty() ? "origin" : pre_pass) +
".dot";
std::string optim_cache_dir = argument->optim_cache_dir();
std::string dot_file_path;
if (optim_cache_dir.empty()) {
dot_file_path = std::to_string(pass_num) + "_ir_" +
(pre_pass.empty() ? "origin" : pre_pass) + ".dot";
} else {
dot_file_path = optim_cache_dir + "/" + std::to_string(pass_num) +
"_ir_" + (pre_pass.empty() ? "origin" : pre_pass) +
".dot";
}
pass->Set("graph_viz_path", new std::string(std::move(dot_file_path)));
pass->Set("optim_cache_dir", new std::string(std::move(optim_cache_dir)));
pass_num++;
} else if (pass_name == "mkldnn_placement_pass") {
pass->Set("mkldnn_enabled_op_types",
Expand Down
45 changes: 34 additions & 11 deletions paddle/fluid/inference/api/analysis_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <sstream>
#include <string>
#include <tuple>
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
#include "paddle/fluid/inference/api/paddle_pass_builder.h"
#include "paddle/fluid/inference/utils/table_printer.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"

#ifdef PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/helper.h"
#endif

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
DECLARE_uint64(initial_gpu_memory_in_mb);
#endif
Expand Down Expand Up @@ -758,17 +764,6 @@ std::string AnalysisConfig::Summary() {
{"mkldnn_cache_capacity", std::to_string(mkldnn_cache_capacity_)});
os.InsetDivider();

auto Precision2String =
[](paddle::AnalysisConfig::Precision prec) -> std::string {
if (prec == Precision::kFloat32)
return "fp32";
else if (prec == Precision::kHalf)
return "fp16";
else if (prec == Precision::kInt8)
return "int8";
else
return "None";
};
// gpu info
os.InsertRow({"use_gpu", use_gpu_ ? "true" : "false"});
if (use_gpu_) {
Expand All @@ -780,6 +775,33 @@ std::string AnalysisConfig::Summary() {

os.InsertRow({"use_tensorrt", use_tensorrt_ ? "true" : "false"});
if (use_tensorrt_) {
#ifdef PADDLE_WITH_TENSORRT
auto Precision2String =
[](paddle::AnalysisConfig::Precision prec) -> std::string {
if (prec == Precision::kFloat32)
return "fp32";
else if (prec == Precision::kHalf)
return "fp16";
else if (prec == Precision::kInt8)
return "int8";
else
return "None";
};
auto version2string =
[](const std::tuple<int, int, int> &ver) -> std::string {
std::ostringstream os;
int major = std::get<0>(ver);
int minor = std::get<1>(ver);
int patch = std::get<2>(ver);
os << major << "." << minor << "." << patch;
return os.str();
};
os.InsertRow(
{"trt_compile_version",
version2string(inference::tensorrt::GetTrtCompileVersion())});
os.InsertRow(
{"trt_runtime_version",
version2string(inference::tensorrt::GetTrtRuntimeVersion())});
os.InsertRow({"tensorrt_precision_mode",
Precision2String(tensorrt_precision_mode_)});
os.InsertRow({"tensorrt_workspace_size",
Expand All @@ -805,6 +827,7 @@ std::string AnalysisConfig::Summary() {
if (trt_use_dla_) {
os.InsertRow({"tensorrt_dla_core", std::to_string(trt_dla_core_)});
}
#endif
}
}
os.InsetDivider();
Expand Down
23 changes: 23 additions & 0 deletions paddle/scripts/paddle_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2388,6 +2388,25 @@ function find_temporary_files() {
fi
}

function trt_convert_test() {
set +e
cd ${PADDLE_ROOT}
result_num=0
export PYTHONPATH=$PYTHONPATH:${PADDLE_ROOT}/build/python
for file_name in `find python/ -name 'test_trt_convert*'`;do
echo "----- test trt ut: $file_name -----"
python $file_name
res=$?
if [ "$res" != "0" ];then
echo "$file_name convert test failed " >&2
result_num=11
fi
done
if [ "$result_num" != "0" ];then
exit 11
fi
}

function build_pr_and_develop() {
cmake_gen_and_build ${PYTHON_ABI:-""} ${parallel_number}
mkdir ${PADDLE_ROOT}/build/pr_whl && cp ${PADDLE_ROOT}/build/python/dist/*.whl ${PADDLE_ROOT}/build/pr_whl
Expand Down Expand Up @@ -2656,6 +2675,10 @@ function main() {
test_model_benchmark)
test_model_benchmark
;;
trt_convert_test)
# only test trt convert.
trt_convert_test
;;
*)
print_usage
exit 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import unittest
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest


class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import unittest
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest


class TrtConvertDepthwiseConv2dTest(TrtLayerAutoScanTest):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest


class TrtConvertDepthwiseConv2dTransposeTest(TrtLayerAutoScanTest):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest


class TrtConvertNearestInterpV2Test(TrtLayerAutoScanTest):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
attrs, False), (1e-4, 1e-4)

# for dynamic_shape
generate_dynamic_shape(attrs)
Expand All @@ -129,7 +129,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
attrs, True), (1e-4, 1e-4)

pass

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
attrs, False), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
attrs, False), (1e-4, 1e-4)

# for dynamic_shape
generate_dynamic_shape(attrs)
Expand All @@ -129,7 +129,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
attrs, True), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
attrs, True), (1e-4, 1e-4)

pass

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,8 @@ def assert_tensors_near(self,
"Output has diff between GPU and TensorRT. ")

def assert_op_size(self, trt_engine_num, paddle_op_num):
last_passed_program = 'transpose_flatten_concat_fuse_pass.pdmodel'
last_passed_program = os.path.join(
self.trt_cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel')
model_bytes = paddle.static.load_from_file(last_passed_program)
pg = paddle.static.deserialize_program(model_bytes)
main_block = pg.desc.block(0)
Expand Down Expand Up @@ -179,7 +180,8 @@ def inference_config_str(self, config: paddle_infer.Config):

def run_test(self, quant=False):
status = True
np.random.seed(int(1000 * time.time()) % 2**32)
# Choose different tests by week
np.random.seed(int(time.strftime("%W")))
run_flags = []
for prog_config in self.sample_program_configs():
# In CI, only run 30% cases
Expand Down Expand Up @@ -283,4 +285,4 @@ def run_test(self, quant=False):
self.success_log('RUN ' + str(prog_config) + ' vs ' +
self.inference_config_str(pred_config))

# self.assertTrue(status)
self.assertTrue(status)

0 comments on commit fde2b70

Please sign in to comment.