From 75b00e87e8f6a50d0bb63f3f5f2c10c28c080988 Mon Sep 17 00:00:00 2001 From: Ashok Sudarsanam Date: Thu, 6 May 2021 19:11:02 +0000 Subject: [PATCH] TVM changes to support introduction and typing of new custom operations. Merged in SIM-6711 (pull request #36) Approved-by: Mikael Sevenier Approved-by: Joey Chou --- python/tvm/custom_operation_config.py | 79 +++++ python/tvm/relay/op/nn/__init__.py | 1 + python/tvm/relay/op/nn/custom_operation.py | 180 ++++++++++ src/relay/op/nn/custom_operation.cc | 181 ++++++++++ tests/python/relay/test_custom_ops.py | 381 +++++++++++++++++++++ 5 files changed, 822 insertions(+) create mode 100644 python/tvm/custom_operation_config.py create mode 100644 python/tvm/relay/op/nn/custom_operation.py create mode 100644 src/relay/op/nn/custom_operation.cc create mode 100644 tests/python/relay/test_custom_ops.py diff --git a/python/tvm/custom_operation_config.py b/python/tvm/custom_operation_config.py new file mode 100644 index 000000000000..9689d96d8f25 --- /dev/null +++ b/python/tvm/custom_operation_config.py @@ -0,0 +1,79 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, too-many-lines +"""Custom operation configuration interface.""" +from typing import List, Dict, Callable +from dataclasses import dataclass +from tvm.ir import TensorType +import json + + +@dataclass() +class CustomOpConfigInfo(): + """ + Dataclass that contains configuration information for a custom operation. + This dataclass contains the following fields: + 1. code: a string that contains the corresponding C code implementation. + 2. func_name: the name of the function in the C code that implements the + custom operation. + 3. datatype: a string that specifies the underlying tensor datatype that + is assumed by the C code implementation. Currently supported values are + “int8”, “float”, and “double”. + 4. type_func: a Python function that returns the type of the custom opera- + tion, based on the types of the input tensor(s) and relevant attributes. + 5. compiler_flags: a string that contains custom operation-specific flags + for the target compiler. + """ + + code: str + func_name: str + datatype: str + type_func: Callable[..., TensorType] + compiler_flags: str + + +class CustomOperationConfig: + """ + Singleton class that contains configuration information for each custom + operation that exists in an ML model. This information is used during + the construction and typing of custom operations. + """ + + __instance = None + config_dict: Dict[str, CustomOpConfigInfo] = dict() + + @staticmethod + def get_instance(): + if CustomOperationConfig.__instance == None: + CustomOperationConfig() + return CustomOperationConfig.__instance + + def __init__(self): + if CustomOperationConfig.__instance != None: + raise Exception("CustomOperationConfig class is a singleton.") + else: + CustomOperationConfig.__instance = self + + def add_config_for_custom_op(self, custom_op_name: str, + custom_op_config_info: CustomOpConfigInfo): + self.config_dict[custom_op_name] = custom_op_config_info + + def get_config_for_custom_op(self, custom_op_name: str) -> CustomOpConfigInfo: + return self.config_dict[custom_op_name] + + def get_custom_ops(self) -> List[str]: + return list(self.config_dict.keys()) diff --git a/python/tvm/relay/op/nn/__init__.py b/python/tvm/relay/op/nn/__init__.py index ebabbbcd9d3a..4fe4e7fce41f 100644 --- a/python/tvm/relay/op/nn/__init__.py +++ b/python/tvm/relay/op/nn/__init__.py @@ -19,3 +19,4 @@ from __future__ import absolute_import as _abs from .nn import * from . import _nn +from . import custom_operation diff --git a/python/tvm/relay/op/nn/custom_operation.py b/python/tvm/relay/op/nn/custom_operation.py new file mode 100644 index 000000000000..023223d7e2f9 --- /dev/null +++ b/python/tvm/relay/op/nn/custom_operation.py @@ -0,0 +1,180 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, too-many-lines +"""Neural network operations for custom ops.""" +from tvm.relay import expr +from tvm.ir import Attrs + +from . import _make +from typing import List, Tuple +from tvm.custom_operation_config import ( + CustomOpConfigInfo, CustomOperationConfig +) +import tvm._ffi +import json + + +MAX_TENSOR_INPUTS = 5 + + +def custom_op(inputs, input_types, name, code, func_name, datatype, compiler_flags): + """ + Create a Relay IR node for the custom operation. Specifically, a + CallNode around an operator nn.custom_op_{i} is returned, where {i} + denotes the total number of input tensor operands in the custom + operation. The number of input tensor operands cannot exceed 5. + + The inputs to a custom operation may also include constant values + that represent attributes of the operation. Each attribute must + be a string, an integer, a floating point value, a list of integers, + or a list of floating-point values. + + In the custom operation specification in the ML network, the tensor + operands must appear first, followed by the constant attributes. + """ + + # Partition the inputs into tensor operands and constant attributes. + tensor_inputs = [] + constant_attrs = [] + for input in inputs: + if isinstance(input, tvm.relay.expr.ExprWithOp): + if len(constant_attrs) == 0: + tensor_inputs.append(input) + else: + raise AssertionError("Tensor operands must precede constant attributes.") + elif is_valid_attribute(input): + constant_attrs.append(input) + else: + raise AssertionError(f"Input {input} is neither a tensor nor a constant attribute.") + + # Store all attributes of the custom operation in a dictionary. + # The following string attributes are common to all custom + # operations: + # 1. Custom operation name. + # 2. C code implementation. + # 3. C code function name. + # 4. C code datatype. + # 5. Operation-specific compiler flags. + # + # A custom operation may also have constant attributes that are + # specific to it. + custom_op_attrs = { + "name": name, + "code": code, + "func_name": func_name, + "datatype": datatype, + "compiler_flags": compiler_flags, + "constant_attrs": constant_attrs + } + + custom_op_attr_str = json.dumps(custom_op_attrs) + + if len(tensor_inputs) == 1: + return _make.custom_op_1(*tensor_inputs, custom_op_attr_str) + elif len(tensor_inputs) == 2: + return _make.custom_op_2(*tensor_inputs, custom_op_attr_str) + elif len(tensor_inputs) == 3: + return _make.custom_op_3(*tensor_inputs, custom_op_attr_str) + elif len(tensor_inputs) == 4: + return _make.custom_op_4(*tensor_inputs, custom_op_attr_str) + elif len(tensor_inputs) == 5: + return _make.custom_op_5(*tensor_inputs, custom_op_attr_str) + else: + msg = "Unsupported number of input tensor arguments (%d)." % (len(tensor_inputs)) + raise AssertionError(msg) + + +def is_valid_attribute(input): + """ + Returns True if the input operand is a string, an integer, a floating + point number, a list of integers, or a list of floating-point numbers. + """ + + input_type = type(input) + if input_type == str or input_type == int or input_type == float: + return True + + if input_type == list and type(input[0]) in [int, float]: + for elem in input: + if type(elem) != type(input[0]): + return False + return True + + return False + + +@tvm._ffi.register_func("relay.op.nn.custom_op_type_func") +def custom_op_type_func(types, num_inputs, attrs): + """ + Return the type of the specified custom operation, based on the + input types and constant attribute values. This function is + invoked by the registered add_type_rel() function in the C++ code. + """ + + custom_op_attrs = json.loads(attrs.custom_op_attrs) + custom_op_name = custom_op_attrs["name"] + constant_attrs = custom_op_attrs["constant_attrs"] + + # Get the typing function associated with the custom operation. + custom_op_config = CustomOperationConfig.get_instance() + config_info = custom_op_config.get_config_for_custom_op(custom_op_name) + type_func = config_info.type_func + + msg = f"Unsupported number of input tensor arguments {num_inputs} (max = {MAX_TENSOR_INPUTS})" + assert 0 < num_inputs <= MAX_TENSOR_INPUTS, msg + + input_args = tuple([types[i] for i in range(num_inputs)]) + return type_func(*input_args, *constant_attrs) + + +@tvm._ffi.register_object("relay.attrs.CustomOpAttrs") +class CustomOpAttrs(Attrs): + """Attributes for nn custom operations""" + + +def make_custom_op(name, code, func_name, datatype, compiler_flags): + def custom_op_func(inputs, input_types): + return custom_op(inputs, input_types, name, + code, func_name, datatype, + compiler_flags) + + return custom_op_func + + +def get_convert_map_from_custom_op_config(): + """ + Construct a mapping from custom operation name to Relay IR + creation function. This mapping will get inserted into + the front-end's operator conversion map. + """ + + convert_map = {} + custom_op_config = CustomOperationConfig.get_instance() + custom_op_names = custom_op_config.get_custom_ops() + + for custom_op_name in custom_op_names: + config_info = custom_op_config.get_config_for_custom_op(custom_op_name) + code = config_info.code + func_name = config_info.func_name + datatype = config_info.datatype + compiler_flags = config_info.compiler_flags + + convert_map[custom_op_name] = make_custom_op(custom_op_name, code, + func_name, datatype, + compiler_flags) + + return convert_map diff --git a/src/relay/op/nn/custom_operation.cc b/src/relay/op/nn/custom_operation.cc new file mode 100644 index 000000000000..6823e565ce51 --- /dev/null +++ b/src/relay/op/nn/custom_operation.cc @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file custom_op.cc + * \brief Property def of custom operation operators. + */ + +#include "nn.h" + +#include +#include +#include +#include "../op_common.h" + + +namespace tvm { +namespace relay { + +// Custom operation attributes. +struct CustomOpAttrs : public tvm::AttrsNode { + tvm::String custom_op_attrs; + + TVM_DECLARE_ATTRS(CustomOpAttrs, "relay.attrs.CustomOpAttrs") { + TVM_ATTR_FIELD(custom_op_attrs) + .set_default("") + .describe("JSON string containing all custom operation attributes."); + } +}; + +TVM_REGISTER_NODE_TYPE(CustomOpAttrs); + +// Typing function used by all custom operation operators. +bool CustomOpRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + const auto* custom_op_type_func = + runtime::Registry::Get("relay.op.nn.custom_op_type_func"); + + // Call the Python function 'custom_op_type_func' to get the type + // of the custom operation instance. + ObjectRef obj = (*custom_op_type_func)(types, num_inputs, attrs); + Type custom_op_type = Downcast(obj); + + // Assign the returned type to the array element corresponding + // to the output tensor. + reporter->Assign(types[num_inputs], custom_op_type); + + return true; +} + +ObjectPtr +MakeCustomOpAttrs(String attr_str) { + auto attrs = make_object(); + attrs->custom_op_attrs = std::move(attr_str); + + return attrs; +} + + +// nn.custom_op_1: custom operation operator with one tensor input. +Expr MakeCustomOp_1(Expr in0, String attr_str) { + ObjectPtr attrs = MakeCustomOpAttrs(attr_str); + static const Op& op = Op::Get("nn.custom_op_1"); + return Call(op, {in0}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.nn._make.custom_op_1").set_body_typed(MakeCustomOp_1); + +RELAY_REGISTER_OP("nn.custom_op_1") + .describe(R"code(Operator that represents a custom operation with 1 input. +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(1) + .add_argument("in0", "Tensor", "The input tensor.") + .set_support_level(1) + .add_type_rel("CustomOp", CustomOpRel); + + +// nn.custom_op_2: custom operation operator with two tensor inputs. +Expr MakeCustomOp_2(Expr in0, Expr in1, String attr_str) { + ObjectPtr attrs = MakeCustomOpAttrs(attr_str); + static const Op& op = Op::Get("nn.custom_op_2"); + return Call(op, {in0, in1}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.nn._make.custom_op_2").set_body_typed(MakeCustomOp_2); + +RELAY_REGISTER_OP("nn.custom_op_2") + .describe(R"code(Operator that represents a custom operation with 2 inputs. +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(2) + .add_argument("in0", "Tensor", "The first input tensor.") + .add_argument("in1", "Tensor", "The second input tensor.") + .set_support_level(1) + .add_type_rel("CustomOp", CustomOpRel); + + +// nn.custom_op_3: custom operation operator with three tensor inputs. +Expr MakeCustomOp_3(Expr in0, Expr in1, Expr in2, String attr_str) { + ObjectPtr attrs = MakeCustomOpAttrs(attr_str); + static const Op& op = Op::Get("nn.custom_op_3"); + return Call(op, {in0, in1, in2}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.nn._make.custom_op_3").set_body_typed(MakeCustomOp_3); + +RELAY_REGISTER_OP("nn.custom_op_3") + .describe(R"code(Operator that represents a custom operation with 3 inputs. +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(3) + .add_argument("in0", "Tensor", "The first input tensor.") + .add_argument("in1", "Tensor", "The second input tensor.") + .add_argument("in2", "Tensor", "The third input tensor.") + .set_support_level(1) + .add_type_rel("CustomOp", CustomOpRel); + + +// nn.custom_op_4: custom operation operator with four tensor inputs. +Expr MakeCustomOp_4(Expr in0, Expr in1, Expr in2, Expr in3, String attr_str) { + ObjectPtr attrs = MakeCustomOpAttrs(attr_str); + static const Op& op = Op::Get("nn.custom_op_4"); + return Call(op, {in0, in1, in2, in3}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.nn._make.custom_op_4").set_body_typed(MakeCustomOp_4); + +RELAY_REGISTER_OP("nn.custom_op_4") + .describe(R"code(Operator that represents a custom operation with 4 inputs. +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(4) + .add_argument("in0", "Tensor", "The first input tensor.") + .add_argument("in1", "Tensor", "The second input tensor.") + .add_argument("in2", "Tensor", "The third input tensor.") + .add_argument("in3", "Tensor", "The fourth input tensor.") + .set_support_level(1) + .add_type_rel("CustomOp", CustomOpRel); + + +// nn.custom_op_5: custom operation operator with five tensor inputs. +Expr MakeCustomOp_5(Expr in0, Expr in1, Expr in2, Expr in3, Expr in4, String attr_str) { + ObjectPtr attrs = MakeCustomOpAttrs(attr_str); + static const Op& op = Op::Get("nn.custom_op_5"); + return Call(op, {in0, in1, in2, in3, in4}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.nn._make.custom_op_5").set_body_typed(MakeCustomOp_5); + +RELAY_REGISTER_OP("nn.custom_op_5") + .describe(R"code(Operator that represents a custom operation with 5 inputs. +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(5) + .add_argument("in0", "Tensor", "The first input tensor.") + .add_argument("in1", "Tensor", "The second input tensor.") + .add_argument("in2", "Tensor", "The third input tensor.") + .add_argument("in3", "Tensor", "The fourth input tensor.") + .add_argument("in4", "Tensor", "The fifth input tensor.") + .set_support_level(1) + .add_type_rel("CustomOp", CustomOpRel); + +} // namespace relay +} // namespace tvm diff --git a/tests/python/relay/test_custom_ops.py b/tests/python/relay/test_custom_ops.py new file mode 100644 index 000000000000..771bacc38f63 --- /dev/null +++ b/tests/python/relay/test_custom_ops.py @@ -0,0 +1,381 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=unused-wildcard-import +import numpy as np +import pytest +import json +import tvm +import unittest +from tvm import relay +from tvm.relay.op.nn.custom_operation import custom_op +from tvm.relay.testing import run_infer_type +from tvm.custom_operation_config import ( + CustomOpConfigInfo, CustomOperationConfig +) + + +def my_custom_op_1_func(type0, str_attr, int_attr, float_attr): + """ + Typing function for custom operation with 1 tensor operand, + a string attribute, an integer attribute, and a floating + point attribute. + """ + + return type0 + + +def my_custom_op_2_func(type0, type1, int_list_attr, float_list_attr ): + """ + Typing function for custom operation with 2 tensor operands, + an integer list attribute, and a floating point list attribute. + """ + + if int_list_attr[0] < 5: + return type0 + else: + return type1 + + +def my_custom_op_3_func(type0, type1, type2, str_attr, int_list_attr): + """ + Typing function for custom operation with 3 tensor operands, + a string attribute, and an integer list attribute. + """ + + if str_attr == "tensor0": + return type0 + elif str_attr == "tensor1": + return type1 + else: + return type2 + + +def my_custom_op_4_func(type0, type1, type2, type3, float_list_attr, str_attr): + """ + Typing function for custom operation with 4 tensor operands, a + floating point list attribute, and a string attribute. + """ + + if (float_list_attr[0] > 10.101): + return type0 + elif (float_list_attr[1] > 20.201): + return type1 + elif (float_list_attr[2] > 30.301): + return type2 + else: + return type3 + + +def my_custom_op_5_func(type0, type1, type2, type3, type4, int_list_attr): + """ + Typing function for custom operation with 5 tensor operands and + an integer list attribute. + """ + + if int_list_attr[2] < 50: + return type3 + else: + return type4 + + +def init_custom_op_config(): + """ + Initialize the custom operation configuration with info for five + custom operations. + """ + + custom_op_config = CustomOperationConfig.get_instance() + + name = "my_custom_op_1" + config_info = CustomOpConfigInfo("my_custom_op_1_code", + "my_custom_op_1_func", + "int8", + my_custom_op_1_func, + "-Wall") + custom_op_config.add_config_for_custom_op(name, config_info) + + name = "my_custom_op_2" + config_info = CustomOpConfigInfo("my_custom_op_2_code", + "my_custom_op_2_func", + "int8", + my_custom_op_2_func, + "-Wall -O2") + custom_op_config.add_config_for_custom_op(name, config_info) + + name = "my_custom_op_3" + config_info = CustomOpConfigInfo("my_custom_op_3_code", + "my_custom_op_3_func", + "float", + my_custom_op_3_func, + "-Wall -O3") + custom_op_config.add_config_for_custom_op(name, config_info) + + name = "my_custom_op_4" + config_info = CustomOpConfigInfo("my_custom_op_4_code", + "my_custom_op_4_func", + "float", + my_custom_op_4_func, + "-O2") + custom_op_config.add_config_for_custom_op(name, config_info) + + name = "my_custom_op_5" + config_info = CustomOpConfigInfo("my_custom_op_5_code", + "my_custom_op_5_func", + "double", + my_custom_op_5_func, + "-O3") + custom_op_config.add_config_for_custom_op(name, config_info) + + +def generate_and_verify_custom_op(custom_op_name, operator_name, input_args, + constant_attrs, expected_output_type): + """ + Generate a custom operation with the given parameters and verify + its correctness. + """ + + custom_op_config = CustomOperationConfig.get_instance() + config_info = custom_op_config.get_config_for_custom_op(custom_op_name) + + op = custom_op([*input_args, *constant_attrs], + [], + custom_op_name, + config_info.code, + config_info.func_name, + config_info.datatype, + config_info.compiler_flags) + + assert isinstance(op, tvm.relay.expr.Call) + assert op.op.name == operator_name + assert op.op.num_inputs == len(input_args) + assert len(op.args) == len(input_args) + for i, input in enumerate(input_args): + assert op.args[i] == input + + custom_op_attrs = json.loads(op.attrs.custom_op_attrs) + assert custom_op_attrs["name"] == custom_op_name + assert custom_op_attrs["code"] == config_info.code + assert custom_op_attrs["func_name"] == config_info.func_name + assert custom_op_attrs["datatype"] == config_info.datatype + assert custom_op_attrs["compiler_flags"] == config_info.compiler_flags + assert custom_op_attrs["constant_attrs"] == constant_attrs + + # Run the type inferencer on the custom operation and verify that + # it produces the correct output type. + typed_op = run_infer_type(op) + assert typed_op.checked_type == expected_output_type + + +class CustomOpsTestCases(unittest.TestCase): + def test_error_conditions(self): + """Test that various error conditions are appropriately captured.""" + + x = relay.var("x") + input_types = [] + name = "my_custom_op" + code = "custom op code" + func_name = "custom_op_func" + datatype = "int8" + compiler_flags = "" + + # Custom operations cannot have zero tensor operands. + with self.assertRaises(Exception): + inputs = [1, 2, 3] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + # Custom operations cannot have greater than 5 tensor operands. + with self.assertRaises(Exception): + inputs = [x, x, x, x, x, x] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + # Tensors must appear before attributes in the input list. + with self.assertRaises(Exception): + inputs = [x, 1, 3.5, "test", x] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + # Attributes cannot be dictionaries. + with self.assertRaises(Exception): + inputs = [x, 1, 3.5, {"a":"test"}] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + # Attribute lists must be all integers or all floating point numbers. + with self.assertRaises(Exception): + inputs = [x, 1, 3.5, "foo", ["bar", "baz"]] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + with self.assertRaises(Exception): + inputs = [x, 1, 3.5, "foo", [1, 3.5]] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + with self.assertRaises(Exception): + inputs = [x, 1, 3.5, "foo", [10.11, 15]] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + with self.assertRaises(Exception): + inputs = [x, 1, 3.5, "foo", [1, "bar"]] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + with self.assertRaises(Exception): + inputs = [x, 1, 3.5, "foo", [1.11, "bar"]] + op = custom_op(inputs, input_types, name, code, func_name, + datatype, compiler_flags) + + + def test_custom_op_with_1_tensor(self): + """ + Test the creation and typing of a custom operation with + 1 tensor operand, a string attribute, an integer attribute, + and a floating point attribute. + """ + + init_custom_op_config() + + custom_op_name = "my_custom_op_1" + str_attr = "str_attr" + int_attr = 100 + float_attr = 3.1415 + constant_attrs = [str_attr, int_attr, float_attr] + + input0 = relay.var("input0", shape=[3, 18, 14]) + input_args = [input0] + expected_output_type = input0.type_annotation + + generate_and_verify_custom_op(custom_op_name, + "nn.custom_op_1", + input_args, + constant_attrs, + expected_output_type) + + + def test_custom_op_with_2_tensors(self): + """ + Test the creation and typing of a custom operation with + 2 tensor operands, an integer list attribute, and a + floating point list attribute. + """ + + init_custom_op_config() + + custom_op_name = "my_custom_op_2" + int_list_attr = [10, 20, 30] + float_list_attr = [55.5, 55.8, 65.4, 32.112] + constant_attrs = [int_list_attr, float_list_attr] + + input0 = relay.var("input0", shape=[1, 3, 16]) + input1 = relay.var("input1", shape=[256, 256]) + input_args = [input0, input1] + expected_output_type = input1.type_annotation + + generate_and_verify_custom_op(custom_op_name, + "nn.custom_op_2", + input_args, + constant_attrs, + expected_output_type) + + + def test_custom_op_with_3_tensors(self): + """ + Test the creation and typing of a custom operation with + 3 tensor operands, a string attribute, and an integer + list attribute. + """ + + init_custom_op_config() + + custom_op_name = "my_custom_op_3" + str_attr = "tensor1" + int_list_attr = [100, 101] + constant_attrs = [str_attr, int_list_attr] + + input0 = relay.var("input0", shape=[1, 3, 16]) + input1 = relay.var("input1", shape=[13, 128, 256]) + input2 = relay.var("input2", shape=[256, 64]) + input_args = [input0, input1, input2] + expected_output_type = input1.type_annotation + + generate_and_verify_custom_op(custom_op_name, + "nn.custom_op_3", + input_args, + constant_attrs, + expected_output_type) + + + def test_custom_op_with_4_tensors(self): + """ + Test the creation and typing of a custom operation with + 4 tensor operands, a floating point list attribute, and + a string attribute. + """ + + init_custom_op_config() + + custom_op_name = "my_custom_op_4" + float_list_attr = [1.11, 2.22, 3.33, 4.44] + str_attr = "str_attr" + constant_attrs = [float_list_attr, str_attr] + + input0 = relay.var("input0", shape=[1, 3, 16]) + input1 = relay.var("input1", shape=[13, 128, 256]) + input2 = relay.var("input2", shape=[256, 64]) + input3 = relay.var("input3", shape=[16, 3, 2]) + input_args = [input0, input1, input2, input3] + expected_output_type = input3.type_annotation + + generate_and_verify_custom_op(custom_op_name, + "nn.custom_op_4", + input_args, + constant_attrs, + expected_output_type) + + + def test_custom_op_with_5_tensors(self): + """ + Test the creation and typing of a custom operation with + 5 tensor operands and an integer list attribute. + """ + + init_custom_op_config() + + custom_op_name = "my_custom_op_5" + int_list_attr = [10, 20, 30, 40, 50, 60, 70, 80] + constant_attrs = [int_list_attr] + + input0 = relay.var("input0", shape=[1, 3, 16]) + input1 = relay.var("input1", shape=[13, 128, 256]) + input2 = relay.var("input2", shape=[256, 64]) + input3 = relay.var("input3", shape=[1, 10]) + input4 = relay.var("input4", shape=[14, 10]) + input_args = [input0, input1, input2, input3, input4] + expected_output_type = input3.type_annotation + + generate_and_verify_custom_op(custom_op_name, + "nn.custom_op_5", + input_args, + constant_attrs, + expected_output_type) + + +if __name__ == "__main__": + unittest.main(exit=False)