diff --git a/python/tvm/driver/tvmc/compiler.py b/python/tvm/driver/tvmc/compiler.py index 8f24dd4d7536..b29aede95891 100644 --- a/python/tvm/driver/tvmc/compiler.py +++ b/python/tvm/driver/tvmc/compiler.py @@ -137,6 +137,11 @@ def add_compile_parser(subparsers, _): type=parse_pass_list_str, default="", ) + parser.add_argument( + "--module-name", + default="default", + help="The output module name. Defaults to 'default'.", + ) def drive_compile(args): @@ -179,6 +184,7 @@ def drive_compile(args): disabled_pass=args.disabled_pass, pass_context_configs=args.pass_config, additional_target_options=reconstruct_target_args(args), + mod_name=args.module_name, ) return 0 @@ -202,6 +208,7 @@ def compile_model( pass_context_configs: Optional[List[str]] = None, additional_target_options: Optional[Dict[str, Dict[str, Any]]] = None, use_vm: bool = False, + mod_name: Optional[str] = "default", ): """Compile a model from a supported framework into a TVM module. @@ -251,6 +258,8 @@ def compile_model( Additional target options in a dictionary to combine with initial Target arguments use_vm: bool Whether to use the VM to compile the model as opposed to the graph executor + mod_name: str, optional + The module name Returns ------- @@ -275,7 +284,7 @@ def compile_model( if codegen["config_key"] is not None: config[codegen["config_key"]] = codegen_from_cli["opts"] with tvm.transform.PassContext(config=config): - mod = partition_function(mod, params, **codegen_from_cli["opts"]) + mod = partition_function(mod, params, mod_name=mod_name, **codegen_from_cli["opts"]) if tuning_records and os.path.exists(tuning_records): logger.debug("tuning records file provided: %s", tuning_records) @@ -300,6 +309,7 @@ def compile_model( runtime=runtime, params=params, use_vm=use_vm, + mod_name=mod_name, ) else: with autotvm.apply_history_best(tuning_records): @@ -314,6 +324,7 @@ def compile_model( runtime=runtime, params=params, use_vm=use_vm, + mod_name=mod_name, ) else: with tvm.transform.PassContext( @@ -327,6 +338,7 @@ def compile_model( runtime=runtime, params=params, use_vm=use_vm, + mod_name=mod_name, ) # Generate output dump files with sources @@ -364,6 +376,7 @@ def build( runtime: Runtime, params: Dict[str, tvm.nd.NDArray], use_vm: bool, + mod_name: str, ): """ Builds the model with the provided executor. @@ -383,13 +396,17 @@ def build( A parameter dictionary for the model. use_vm: bool Whether to use the VM to compile the model as opposed to the graph executor + mod_name: str + The module name """ if use_vm: logger.debug("building with vm compile") return relay.vm.compile(mod, target=tvm_target, params=params) logger.debug("building with relay build") - return relay.build(mod, target=tvm_target, executor=executor, runtime=runtime, params=params) + return relay.build( + mod, target=tvm_target, executor=executor, runtime=runtime, params=params, mod_name=mod_name + ) def save_dumps(module_name: str, dumps: Dict[str, str], dump_root: str = "."): diff --git a/python/tvm/driver/tvmc/model.py b/python/tvm/driver/tvmc/model.py index 93ca27c60947..04946ec9c6d0 100644 --- a/python/tvm/driver/tvmc/model.py +++ b/python/tvm/driver/tvmc/model.py @@ -393,7 +393,7 @@ def import_package(self, package_path: str): has_graph_executor = "graph" in metadata["executors"] graph = temp.relpath("executor-config/graph/graph.json") if has_graph_executor else None - params = temp.relpath("parameters/default.params") + params = temp.relpath(f'parameters/{metadata["model_name"]}.params') self.type = "mlf" else: diff --git a/python/tvm/relay/op/contrib/cmsisnn.py b/python/tvm/relay/op/contrib/cmsisnn.py index e8e583537fc9..e39fa034c571 100644 --- a/python/tvm/relay/op/contrib/cmsisnn.py +++ b/python/tvm/relay/op/contrib/cmsisnn.py @@ -31,7 +31,7 @@ def enabled(): return "cmsis-nn" in Target.list_kinds() -def partition_for_cmsisnn(mod, params=None, **opts): +def partition_for_cmsisnn(mod, params=None, mod_name="default", **opts): """Partition the graph greedily offloading supported operators on Cortex-M using CMSIS-NN @@ -41,6 +41,8 @@ def partition_for_cmsisnn(mod, params=None, **opts): The module to run passes on. params : Optional[Dict[str, NDArray]] Constant input parameters. + mod_name: str, optional + The module name Returns ------- @@ -55,7 +57,7 @@ def partition_for_cmsisnn(mod, params=None, **opts): transform.InferType(), transform.MergeComposite(pattern_table()), transform.AnnotateTarget("cmsis-nn"), - transform.PartitionGraph(), + transform.PartitionGraph(mod_name=mod_name), GenerateCMSISNNConstants(), ScalarToTensorConstants(), ExtractConstantsFromPartitionedFunction(), diff --git a/python/tvm/relay/op/contrib/ethosu.py b/python/tvm/relay/op/contrib/ethosu.py index 0893be4bb84a..f9ae836debc9 100644 --- a/python/tvm/relay/op/contrib/ethosu.py +++ b/python/tvm/relay/op/contrib/ethosu.py @@ -1767,7 +1767,10 @@ def pattern_table() -> List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Cal # pylint: disable=unused-argument @requires_vela def partition_for_ethosu( - mod: tvm.ir.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None, **opts + mod: tvm.ir.IRModule, + params: Optional[Dict[str, tvm.runtime.NDArray]] = None, + mod_name: str = "default", + **opts, ): """This helper function partition the relay graph as produced by the relay frontend for a given model into external functions @@ -1779,6 +1782,8 @@ def partition_for_ethosu( The IRModule that gets generated from a relay frontend params : Optional[Dict[str, tvm.runtime.NDArray]] Constant input parameters. + mod_name: str, optional + The module name Returns ------- @@ -1796,7 +1801,7 @@ def partition_for_ethosu( mod = relay.transform.AnnotateTarget("ethos-u")(mod) mod = relay.transform.MergeCompilerRegions()(mod) mod = relay.transform.InferType()(mod) - mod = relay.transform.PartitionGraph()(mod) + mod = relay.transform.PartitionGraph(mod_name)(mod) mod = relay.transform.InferType()(mod) mod = preprocess.preprocess_ext_io()(mod) return mod diff --git a/tests/python/driver/tvmc/test_compiler.py b/tests/python/driver/tvmc/test_compiler.py index bc836de7d554..365dbdb6bf23 100644 --- a/tests/python/driver/tvmc/test_compiler.py +++ b/tests/python/driver/tvmc/test_compiler.py @@ -552,6 +552,154 @@ def test_compile_check_configs_composite_target(mock_pkg, mock_pc, mock_fe, mock ) +def test_compile_tflite_module_with_mod_name(tmpdir_factory, tflite_cnn_s_quantized): + pytest.importorskip("tflite") + + output_dir = tmpdir_factory.mktemp("mlf") + tvmc_model = tvmc.load(tflite_cnn_s_quantized) + + output_file_name = f"{output_dir}/file.tar" + + tvmc.compiler.compile_model( + tvmc_model, + target=f"c -mcpu=cortex-m55", + runtime=Runtime("crt", {"system-lib": True}), + executor=Executor("aot"), + output_format="mlf", + package_path=output_file_name, + pass_context_configs=["tir.disable_vectorize=true"], + mod_name="classify", + ) + + # check that an MLF package was created + assert os.path.exists(output_file_name) + + with tarfile.open(output_file_name) as mlf_package: + # check that the C source files have been named classify_lib*.c + c_source_files = [ + name + for name in mlf_package.getnames() + if re.match(r"\./codegen/host/src/classify_lib\d+\.c", name) + ] + assert len(c_source_files) > 0 + + # check that "default" doesn't occur in any of the C source files + # check that function names are of the form "tvmgen_classify_*" + for file_name in c_source_files: + with mlf_package.extractfile(file_name) as f: + content = f.read() + assert b"default" not in content + assert b"tvmgen_classify_" in content + + # check that tvmgen_classify_run() function exists + with mlf_package.extractfile("./codegen/host/src/classify_lib0.c") as f: + content = f.read() + assert b"tvmgen_classify_run(" in content + + +@tvm.testing.requires_cmsisnn +def test_compile_tflite_module_with_mod_name_and_cmsisnn(tmpdir_factory, tflite_cnn_s_quantized): + pytest.importorskip("tflite") + + output_dir = tmpdir_factory.mktemp("mlf") + tvmc_model = tvmc.load(tflite_cnn_s_quantized) + + output_file_name = f"{output_dir}/file.tar" + + tvmc.compiler.compile_model( + tvmc_model, + target=f"cmsis-nn, c -mcpu=cortex-m55", + runtime=Runtime("crt", {"system-lib": True}), + executor=Executor("aot"), + output_format="mlf", + package_path=output_file_name, + pass_context_configs=["tir.disable_vectorize=true"], + mod_name="classify", + ) + + # check that an MLF package was created + assert os.path.exists(output_file_name) + + with tarfile.open(output_file_name) as mlf_package: + # check that the C source files have been named classify_lib*.c + c_source_files = [ + name + for name in mlf_package.getnames() + if re.match(r"\./codegen/host/src/classify_lib\d+\.c", name) + ] + assert len(c_source_files) > 0 + + # check that "default" doesn't occur in any of the C source files + # check that function names are of the form "tvmgen_classify_*" + for file_name in c_source_files: + with mlf_package.extractfile(file_name) as f: + content = f.read() + assert b"default" not in content + assert b"tvmgen_classify_" in content + + # check that tvmgen_classify_run() function exists + with mlf_package.extractfile("./codegen/host/src/classify_lib0.c") as f: + content = f.read() + assert b"tvmgen_classify_run(" in content + + # check that CMSIS-NN function names are of the form "tvmgen_classify_cmsis_nn_main_*" + with mlf_package.extractfile("./codegen/host/src/classify_lib2.c") as f: + content = f.read() + assert b"tvmgen_classify_cmsis_nn_main_" in content + + +def test_compile_tflite_module_with_mod_name_and_ethosu( + tmpdir_factory, tflite_mobilenet_v1_1_quant +): + pytest.importorskip("tflite") + pytest.importorskip("ethosu.vela") + + output_dir = tmpdir_factory.mktemp("mlf") + tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant) + output_file_name = f"{output_dir}/file.tar" + + tvmc.compiler.compile_model( + tvmc_model, + target=f"ethos-u -accelerator_config=ethos-u55-256, c -mcpu=cortex-m55", + runtime=Runtime("crt"), + executor=Executor("aot", {"unpacked-api": True}), + output_format="mlf", + package_path=output_file_name, + pass_context_configs=["tir.disable_vectorize=true"], + mod_name="classify", + ) + + # check that an MLF package was created + assert os.path.exists(output_file_name) + + with tarfile.open(output_file_name) as mlf_package: + # check that the C source files have been named classify_lib*.c + c_source_files = [ + name + for name in mlf_package.getnames() + if re.match(r"\./codegen/host/src/classify_lib\d+\.c", name) + ] + assert len(c_source_files) > 0 + + # check that "default" doesn't occur in any of the C source files + # check that function names are of the form "tvmgen_classify_*" + for file_name in c_source_files: + with mlf_package.extractfile(file_name) as f: + content = f.read() + assert b"default" not in content + assert b"tvmgen_classify_" in content + + # check that tvmgen_classify_run() function exists + with mlf_package.extractfile("./codegen/host/src/classify_lib0.c") as f: + content = f.read() + assert b"tvmgen_classify_run(" in content + + # check that microNPU function names are of the form "tvmgen_classify_ethos_u_main_*" + with mlf_package.extractfile("./codegen/host/src/classify_lib2.c") as f: + content = f.read() + assert b"tvmgen_classify_ethos_u_main_" in content + + if __name__ == "__main__": import sys