From 8a4856dfd1b4f018197ca2b2c44b6012fbafaeef Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Fri, 5 Oct 2018 15:39:38 +0530 Subject: [PATCH 1/8] [RELAY]Ops Dense, leaky_relu --- docs/langref/relay_op.rst | 4 ++ include/tvm/relay/attrs/nn.h | 24 +++++++ python/tvm/relay/op/nn/nn.py | 52 +++++++++++++++ src/relay/op/tensor/nn.cc | 96 ++++++++++++++++++++++++++++ tests/python/relay/test_op_level2.py | 41 ++++++++++++ tests/python/relay/test_op_level3.py | 12 ++++ 6 files changed, 229 insertions(+) create mode 100644 src/relay/op/tensor/nn.cc diff --git a/docs/langref/relay_op.rst b/docs/langref/relay_op.rst index 47cab696a8e1..2635ca66ad48 100644 --- a/docs/langref/relay_op.rst +++ b/docs/langref/relay_op.rst @@ -49,6 +49,7 @@ This level enables typical convnet models. tvm.relay.nn.conv2d tvm.relay.nn.conv2d_transpose + tvm.relay.nn.dense tvm.relay.nn.max_pool2d tvm.relay.nn.avg_pool2d tvm.relay.nn.global_max_pool2d @@ -68,6 +69,7 @@ This level enables additional math and transform operators. :nosignatures: tvm.relay.zeros + tvm.relay.nn.leaky_relu tvm.relay.zeros_like tvm.relay.ones tvm.relay.ones_like @@ -135,6 +137,7 @@ Level 2 Definitions ------------------- .. autofunction:: tvm.relay.nn.conv2d .. autofunction:: tvm.relay.nn.conv2d_transpose +.. autofunction:: tvm.relay.nn.dense .. autofunction:: tvm.relay.nn.max_pool2d .. autofunction:: tvm.relay.nn.avg_pool2d .. autofunction:: tvm.relay.nn.global_max_pool2d @@ -147,6 +150,7 @@ Level 2 Definitions Level 3 Definitions ------------------- +.. autofunction:: tvm.relay.nn.leaky_relu .. autofunction:: tvm.relay.floor .. autofunction:: tvm.relay.ceil .. autofunction:: tvm.relay.trunc diff --git a/include/tvm/relay/attrs/nn.h b/include/tvm/relay/attrs/nn.h index de0da7477a35..a6f307641826 100644 --- a/include/tvm/relay/attrs/nn.h +++ b/include/tvm/relay/attrs/nn.h @@ -202,6 +202,18 @@ struct GlobalPool2DAttrs : public tvm::AttrsNode { } }; + +/*! \brief Attributes for dense operator */ +struct DenseAttrs : public tvm::AttrsNode { + IndexExpr units; + + TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") { + TVM_ATTR_FIELD(units) + .describe("Number of hidden units of the dense transformation."); + } +}; + + /*! \brief Attributes for upsampling operator */ struct UpSamplingAttrs : public tvm::AttrsNode { int scale; @@ -237,6 +249,18 @@ struct PadAttrs : public tvm::AttrsNode { } }; + +/*! \brief Attributes for leaky relu operator */ +struct LeakyReluAttrs : public tvm::AttrsNode { + double alpha; + + TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.LeakyReluAttrs") { + TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25) + .describe("Slope coefficient for the negative half axis."); + } +}; + + /*! \brief Attributes for LRN operator */ struct LRNAttrs : public tvm::AttrsNode { IndexExpr size; diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py index e95e3e9b715d..d0ffa5bc635a 100644 --- a/python/tvm/relay/op/nn/nn.py +++ b/python/tvm/relay/op/nn/nn.py @@ -429,6 +429,34 @@ def batch_flatten(data): """ return _make.batch_flatten(data) + +def dense(data, weight, units=None): + """Dense operator. + Applies a linear transformation + + .. math:: + + `Y = X * W` + + Parameters + ---------- + data : relay.Expr + The input data to the operator. + + weight : relay.Expr + The weight expressions. + + units : int, optional + Number of hidden units of the dense transformation. + + Returns + ------- + result : relay.Expr + The computed result. + """ + return _make.dense(data, weight, units) + + def relu(data): """Rectified linear unit. @@ -448,6 +476,30 @@ def relu(data): return _make.relu(data) +def leaky_relu(data, alpha): + """This operator takes data as input and does Leaky version + of a Rectified Linear Unit. + + .. math:: + + `y = x > 0 ? x : alpha * x` + + Parameters + ---------- + data : relay.Expr + The input data to the operator. + + alpha : float + Slope coefficient for the negative half axis. + + Returns + ------- + result : relay.Expr + The computed result. + """ + return _make.leaky_relu(data, alpha) + + def pad(data, pad_width, pad_value=0.0): diff --git a/src/relay/op/tensor/nn.cc b/src/relay/op/tensor/nn.cc new file mode 100644 index 000000000000..befccb2205be --- /dev/null +++ b/src/relay/op/tensor/nn.cc @@ -0,0 +1,96 @@ +/*! + * Copyright (c) 2018 by Contributors + * \file nn.cc + * \brief Property def of nn operators. + */ +#include +#include +#include +#include "../type_relations.h" + +namespace tvm { +namespace relay { + +TVM_REGISTER_NODE_TYPE(DenseAttrs); + +bool DenseRel(const Array& types, + int num_inputs, + const Attrs& attrs, + const TypeReporter& reporter) { + CHECK_EQ(types.size(), 3); + const auto* data = types[0].as(); + if (data == nullptr) return false; + const auto* weight = types[1].as(); + if (weight == nullptr) return false; + + const DenseAttrs* param = attrs.as(); + CHECK(param != nullptr); + + CHECK(static_cast(data->shape.size()) != 0); + Array oshape = data->shape; + oshape.Set((oshape.size() - 1), make_const(Int(64), param->units)); + + // assign output type + reporter->Assign(types[2], + TensorTypeNode::make(oshape, + data->dtype)); + return true; +} + +// Positional relay function to create dense operator used by frontend FFI. +Expr MakeDense(Expr data, + Expr weight, + int units) { + auto attrs = make_node(); + attrs->units = units; + static const Op& op = Op::Get("nn.dense"); + return CallNode::make(op, {data, weight}, Attrs(attrs), {}); +} + +TVM_REGISTER_API("relay.op.nn._make.dense") +.set_body([](const TVMArgs& args, TVMRetValue* rv) { + runtime::detail::unpack_call(MakeDense, args, rv); + }); + +RELAY_REGISTER_OP("nn.dense") +.describe(R"code(Applies a linear transformation: :math:`Y = XW^T`. + +- **data**: `(x1, x2, ..., xn, input_dim)` +- **weight**: `(units, input_dim)` +- **out**: `(x1, x2, ..., xn, units)`. + +)code" TVM_ADD_FILELINE) +.set_num_inputs(2) +.add_argument("data", "nD Tensor", "Input data.") +.add_argument("weight", "2D Tensor", "Weight matrix.") +.set_support_level(2) +.add_type_rel("Dense", DenseRel); + + +// Positional relay function to create leaky relu operator used by frontend FFI. +Expr MakeLeakyRelu(Expr data, + double alpha) { + auto attrs = make_node(); + attrs->alpha = alpha; + static const Op& op = Op::Get("nn.leaky_relu"); + return CallNode::make(op, {data}, Attrs(attrs), {}); +} + +TVM_REGISTER_API("relay.op.nn._make.leaky_relu") +.set_body([](const TVMArgs& args, TVMRetValue* rv) { + runtime::detail::unpack_call(MakeLeakyRelu, args, rv); + }); + +RELAY_REGISTER_OP("nn.leaky_relu") +.describe(R"code(Leaky version of a Rectified Linear Unit. + +`y = x > 0 ? x : alpha * x` + +)code" TVM_ADD_FILELINE) +.set_num_inputs(1) +.add_argument("data", "Tensor", "Input data.") +.set_support_level(3) +.add_type_rel("Identity", IdentityRel); + +} // namespace relay +} // namespace tvm diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index d0d02aece06d..026ce019e6ab 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -219,6 +219,46 @@ def test_pad_infer_type(): ftype = func.checked_type assert ftype.ret_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32") +def test_dense_infer_type(): + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + w = ib.param("w", relay.ty.TensorType((w, 2), "float32")) + + with ib.function(x, w) as func: + ib.ret(relay.nn.dense(x, w, units=2)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32") + + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + wh, ww = tvm.var("wh"), tvm.var("ww") + w = ib.param("w", relay.ty.TensorType((wh, ww), "float32")) + + with ib.function(x, w) as func: + ib.ret(relay.nn.dense(x, w)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, ww), "float32") + + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + w = ib.param("w", relay.ty.IncompleteType()) + + with ib.function(x, w) as func: + ib.ret(relay.nn.dense(x, w, units=2)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32") if __name__ == "__main__": test_conv2d_infer_type() @@ -227,3 +267,4 @@ def test_pad_infer_type(): test_flatten_infer_type() test_pad_infer_type() test_conv2d_transpose_infer_type() + test_dense_infer_type() diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index 7d949b21026b..af081470014c 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -169,6 +169,17 @@ def test_full_like(): ftype = func.checked_type assert ftype.ret_type == relay.TensorType((n, c, h, w), "float32") +def test_infer_type_leaky_relu(): + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + with ib.function(x) as func: + ib.ret(relay.nn.leaky_relu(x, alpha=0.1)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, w), "float32") if __name__ == "__main__": test_single_op() @@ -181,3 +192,4 @@ def test_full_like(): test_take_infer_type() test_full() test_full_like() + test_infer_type_leaky_relu() From 6f44ab2b91bbde2f67185043bc6c73e51040441a Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Sat, 6 Oct 2018 14:52:46 +0530 Subject: [PATCH 2/8] Rebasing and fixed review comments --- src/relay/op/tensor/nn.cc | 16 ++++++++++++---- tests/python/relay/test_op_level3.py | 4 ---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/relay/op/tensor/nn.cc b/src/relay/op/tensor/nn.cc index befccb2205be..dd3336eef23e 100644 --- a/src/relay/op/tensor/nn.cc +++ b/src/relay/op/tensor/nn.cc @@ -13,6 +13,7 @@ namespace relay { TVM_REGISTER_NODE_TYPE(DenseAttrs); + bool DenseRel(const Array& types, int num_inputs, const Attrs& attrs, @@ -27,16 +28,21 @@ bool DenseRel(const Array& types, CHECK(param != nullptr); CHECK(static_cast(data->shape.size()) != 0); + Array wshape = weight->shape; + + if (param->units != 0) { + // CHECK_EQ(param->units == wshape[wshape.size() - 1]) + } + Array oshape = data->shape; - oshape.Set((oshape.size() - 1), make_const(Int(64), param->units)); + oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]); // assign output type - reporter->Assign(types[2], - TensorTypeNode::make(oshape, - data->dtype)); + reporter->Assign(types[2], TensorTypeNode::make(oshape, data->dtype)); return true; } + // Positional relay function to create dense operator used by frontend FFI. Expr MakeDense(Expr data, Expr weight, @@ -47,6 +53,7 @@ Expr MakeDense(Expr data, return CallNode::make(op, {data, weight}, Attrs(attrs), {}); } + TVM_REGISTER_API("relay.op.nn._make.dense") .set_body([](const TVMArgs& args, TVMRetValue* rv) { runtime::detail::unpack_call(MakeDense, args, rv); @@ -76,6 +83,7 @@ Expr MakeLeakyRelu(Expr data, return CallNode::make(op, {data}, Attrs(attrs), {}); } + TVM_REGISTER_API("relay.op.nn._make.leaky_relu") .set_body([](const TVMArgs& args, TVMRetValue* rv) { runtime::detail::unpack_call(MakeLeakyRelu, args, rv); diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index af081470014c..75c150110b1b 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -1,11 +1,7 @@ """ Support level3 operator test cases. """ import tvm -import numpy as np from tvm import relay -from tvm.relay.ir_pass import infer_type -from tvm.relay.ir_builder import IRBuilder, func_type -from tvm.relay.env import Environment def test_zeros_ones(): for op in [relay.zeros, relay.ones]: From 949552c7a18b6de950e99c4fa3c9ee5a7c6abe60 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Sat, 6 Oct 2018 23:40:38 +0530 Subject: [PATCH 3/8] Check added to validate units and weight shape --- src/relay/op/tensor/nn.cc | 4 +++- tests/python/relay/test_op_level3.py | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/relay/op/tensor/nn.cc b/src/relay/op/tensor/nn.cc index dd3336eef23e..70892ba8bbde 100644 --- a/src/relay/op/tensor/nn.cc +++ b/src/relay/op/tensor/nn.cc @@ -31,7 +31,7 @@ bool DenseRel(const Array& types, Array wshape = weight->shape; if (param->units != 0) { - // CHECK_EQ(param->units == wshape[wshape.size() - 1]) + CHECK(reporter->AssertEQ(param->units, wshape[wshape.size()-1])); } Array oshape = data->shape; @@ -59,6 +59,7 @@ TVM_REGISTER_API("relay.op.nn._make.dense") runtime::detail::unpack_call(MakeDense, args, rv); }); + RELAY_REGISTER_OP("nn.dense") .describe(R"code(Applies a linear transformation: :math:`Y = XW^T`. @@ -89,6 +90,7 @@ TVM_REGISTER_API("relay.op.nn._make.leaky_relu") runtime::detail::unpack_call(MakeLeakyRelu, args, rv); }); + RELAY_REGISTER_OP("nn.leaky_relu") .describe(R"code(Leaky version of a Rectified Linear Unit. diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index 75c150110b1b..af081470014c 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -1,7 +1,11 @@ """ Support level3 operator test cases. """ import tvm +import numpy as np from tvm import relay +from tvm.relay.ir_pass import infer_type +from tvm.relay.ir_builder import IRBuilder, func_type +from tvm.relay.env import Environment def test_zeros_ones(): for op in [relay.zeros, relay.ones]: From af283496c63378ab205adfc4e6f071f1b7612645 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Sun, 7 Oct 2018 12:57:16 +0530 Subject: [PATCH 4/8] moved to nn.cc --- src/relay/op/nn/nn.cc | 92 +++++++++++++++++++++++++++++++++ src/relay/op/tensor/nn.cc | 106 -------------------------------------- 2 files changed, 92 insertions(+), 106 deletions(-) delete mode 100644 src/relay/op/tensor/nn.cc diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index f2439b9fb7ca..725c6c5dd162 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -15,6 +15,98 @@ namespace tvm { namespace relay { +TVM_REGISTER_NODE_TYPE(DenseAttrs); + + +bool DenseRel(const Array& types, + int num_inputs, + const Attrs& attrs, + const TypeReporter& reporter) { + CHECK_EQ(types.size(), 3); + const auto* data = types[0].as(); + if (data == nullptr) return false; + const auto* weight = types[1].as(); + if (weight == nullptr) return false; + + const DenseAttrs* param = attrs.as(); + CHECK(param != nullptr); + + CHECK(static_cast(data->shape.size()) != 0); + Array wshape = weight->shape; + + if (param->units != 0) { + CHECK(reporter->AssertEQ(param->units, wshape[wshape.size()-1])); + } + + Array oshape = data->shape; + oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]); + + // assign output type + reporter->Assign(types[2], TensorTypeNode::make(oshape, data->dtype)); + return true; +} + + +// Positional relay function to create dense operator used by frontend FFI. +Expr MakeDense(Expr data, + Expr weight, + int units) { + auto attrs = make_node(); + attrs->units = units; + static const Op& op = Op::Get("nn.dense"); + return CallNode::make(op, {data, weight}, Attrs(attrs), {}); +} + + +TVM_REGISTER_API("relay.op.nn._make.dense") +.set_body([](const TVMArgs& args, TVMRetValue* rv) { + runtime::detail::unpack_call(MakeDense, args, rv); + }); + + +RELAY_REGISTER_OP("nn.dense") +.describe(R"code(Applies a linear transformation: :math:`Y = XW^T`. + +- **data**: `(x1, x2, ..., xn, input_dim)` +- **weight**: `(units, input_dim)` +- **out**: `(x1, x2, ..., xn, units)`. + +)code" TVM_ADD_FILELINE) +.set_num_inputs(2) +.add_argument("data", "nD Tensor", "Input data.") +.add_argument("weight", "2D Tensor", "Weight matrix.") +.set_support_level(2) +.add_type_rel("Dense", DenseRel); + + +// Positional relay function to create leaky relu operator used by frontend FFI. +Expr MakeLeakyRelu(Expr data, + double alpha) { + auto attrs = make_node(); + attrs->alpha = alpha; + static const Op& op = Op::Get("nn.leaky_relu"); + return CallNode::make(op, {data}, Attrs(attrs), {}); +} + + +TVM_REGISTER_API("relay.op.nn._make.leaky_relu") +.set_body([](const TVMArgs& args, TVMRetValue* rv) { + runtime::detail::unpack_call(MakeLeakyRelu, args, rv); + }); + + +RELAY_REGISTER_OP("nn.leaky_relu") +.describe(R"code(Leaky version of a Rectified Linear Unit. + +`y = x > 0 ? x : alpha * x` + +)code" TVM_ADD_FILELINE) +.set_num_inputs(1) +.add_argument("data", "Tensor", "Input data.") +.set_support_level(3) +.add_type_rel("Identity", IdentityRel); + + TVM_REGISTER_API("relay.op.nn._make.softmax") .set_body([](const TVMArgs& args, TVMRetValue* rv) { auto make_func = [](Expr data, int axis) { diff --git a/src/relay/op/tensor/nn.cc b/src/relay/op/tensor/nn.cc deleted file mode 100644 index 70892ba8bbde..000000000000 --- a/src/relay/op/tensor/nn.cc +++ /dev/null @@ -1,106 +0,0 @@ -/*! - * Copyright (c) 2018 by Contributors - * \file nn.cc - * \brief Property def of nn operators. - */ -#include -#include -#include -#include "../type_relations.h" - -namespace tvm { -namespace relay { - -TVM_REGISTER_NODE_TYPE(DenseAttrs); - - -bool DenseRel(const Array& types, - int num_inputs, - const Attrs& attrs, - const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); - const auto* data = types[0].as(); - if (data == nullptr) return false; - const auto* weight = types[1].as(); - if (weight == nullptr) return false; - - const DenseAttrs* param = attrs.as(); - CHECK(param != nullptr); - - CHECK(static_cast(data->shape.size()) != 0); - Array wshape = weight->shape; - - if (param->units != 0) { - CHECK(reporter->AssertEQ(param->units, wshape[wshape.size()-1])); - } - - Array oshape = data->shape; - oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]); - - // assign output type - reporter->Assign(types[2], TensorTypeNode::make(oshape, data->dtype)); - return true; -} - - -// Positional relay function to create dense operator used by frontend FFI. -Expr MakeDense(Expr data, - Expr weight, - int units) { - auto attrs = make_node(); - attrs->units = units; - static const Op& op = Op::Get("nn.dense"); - return CallNode::make(op, {data, weight}, Attrs(attrs), {}); -} - - -TVM_REGISTER_API("relay.op.nn._make.dense") -.set_body([](const TVMArgs& args, TVMRetValue* rv) { - runtime::detail::unpack_call(MakeDense, args, rv); - }); - - -RELAY_REGISTER_OP("nn.dense") -.describe(R"code(Applies a linear transformation: :math:`Y = XW^T`. - -- **data**: `(x1, x2, ..., xn, input_dim)` -- **weight**: `(units, input_dim)` -- **out**: `(x1, x2, ..., xn, units)`. - -)code" TVM_ADD_FILELINE) -.set_num_inputs(2) -.add_argument("data", "nD Tensor", "Input data.") -.add_argument("weight", "2D Tensor", "Weight matrix.") -.set_support_level(2) -.add_type_rel("Dense", DenseRel); - - -// Positional relay function to create leaky relu operator used by frontend FFI. -Expr MakeLeakyRelu(Expr data, - double alpha) { - auto attrs = make_node(); - attrs->alpha = alpha; - static const Op& op = Op::Get("nn.leaky_relu"); - return CallNode::make(op, {data}, Attrs(attrs), {}); -} - - -TVM_REGISTER_API("relay.op.nn._make.leaky_relu") -.set_body([](const TVMArgs& args, TVMRetValue* rv) { - runtime::detail::unpack_call(MakeLeakyRelu, args, rv); - }); - - -RELAY_REGISTER_OP("nn.leaky_relu") -.describe(R"code(Leaky version of a Rectified Linear Unit. - -`y = x > 0 ? x : alpha * x` - -)code" TVM_ADD_FILELINE) -.set_num_inputs(1) -.add_argument("data", "Tensor", "Input data.") -.set_support_level(3) -.add_type_rel("Identity", IdentityRel); - -} // namespace relay -} // namespace tvm From 658fa66e0d375e34e9a04dcce68c0ba6b77a8097 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Mon, 8 Oct 2018 10:08:05 +0530 Subject: [PATCH 5/8] Review comments updated --- src/relay/op/nn/nn.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 725c6c5dd162..44a806565b8f 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -34,7 +34,7 @@ bool DenseRel(const Array& types, CHECK(static_cast(data->shape.size()) != 0); Array wshape = weight->shape; - if (param->units != 0) { + if (param->units.defined()) { CHECK(reporter->AssertEQ(param->units, wshape[wshape.size()-1])); } @@ -50,7 +50,7 @@ bool DenseRel(const Array& types, // Positional relay function to create dense operator used by frontend FFI. Expr MakeDense(Expr data, Expr weight, - int units) { + IndexExpr units) { auto attrs = make_node(); attrs->units = units; static const Op& op = Op::Get("nn.dense"); From bc1e6b5826cdc6ea195a485dc8ddea4a2688dfb4 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Tue, 9 Oct 2018 12:32:15 +0530 Subject: [PATCH 6/8] Review comments fixed and rebased --- src/relay/op/nn/nn.cc | 14 +++++++------- tests/python/relay/test_op_level2.py | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 44a806565b8f..9f4afa280be5 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -25,22 +25,22 @@ bool DenseRel(const Array& types, CHECK_EQ(types.size(), 3); const auto* data = types[0].as(); if (data == nullptr) return false; - const auto* weight = types[1].as(); - if (weight == nullptr) return false; const DenseAttrs* param = attrs.as(); CHECK(param != nullptr); CHECK(static_cast(data->shape.size()) != 0); - Array wshape = weight->shape; + Array oshape = data->shape; if (param->units.defined()) { - CHECK(reporter->AssertEQ(param->units, wshape[wshape.size()-1])); + oshape.Set((oshape.size() - 1), param->units); + } else { + const auto* weight = types[1].as(); + if (weight == nullptr) return false; + Array wshape = weight->shape; + oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]); } - Array oshape = data->shape; - oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]); - // assign output type reporter->Assign(types[2], TensorTypeNode::make(oshape, data->dtype)); return true; diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 026ce019e6ab..4f37d4893b66 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -260,6 +260,7 @@ def test_dense_infer_type(): ftype = func.checked_type assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32") + if __name__ == "__main__": test_conv2d_infer_type() test_pool2d_infer_type() From b92d43892a642af919937ea75369fe80aa98c326 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Wed, 10 Oct 2018 09:38:46 +0530 Subject: [PATCH 7/8] weight inference --- include/tvm/relay/attrs/nn.h | 20 ++++++++++++++++++++ src/relay/op/nn/nn.cc | 16 +++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/include/tvm/relay/attrs/nn.h b/include/tvm/relay/attrs/nn.h index a6f307641826..0906aca7fe95 100644 --- a/include/tvm/relay/attrs/nn.h +++ b/include/tvm/relay/attrs/nn.h @@ -248,6 +248,26 @@ struct PadAttrs : public tvm::AttrsNode { "in the format of ((before_1, after_1), ..., (before_N, after_N))"); } }; +/*! \brief Attributes for dense operator */ +struct DenseAttrs : public tvm::AttrsNode { + IndexExpr units; + + TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") { + TVM_ATTR_FIELD(units) + .describe("Number of hidden units of the dense transformation."); + } +}; + +/*! \brief Attributes for leaky relu operator */ +struct LeakyReluAttrs : public tvm::AttrsNode { + double alpha; + + TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.LeakyReluAttrs") { + TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25) + .describe("Slope coefficient for the negative half axis."); + } +}; + /*! \brief Attributes for leaky relu operator */ diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 9f4afa280be5..6536056a672b 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -24,6 +24,7 @@ bool DenseRel(const Array& types, const TypeReporter& reporter) { CHECK_EQ(types.size(), 3); const auto* data = types[0].as(); + const auto* weight = types[1].as(); if (data == nullptr) return false; const DenseAttrs* param = attrs.as(); @@ -33,9 +34,22 @@ bool DenseRel(const Array& types, Array oshape = data->shape; if (param->units.defined()) { + Array dshape = data->shape; + + // validate the weight shape is proper if defined + if (weight != nullptr) { + CHECK(reporter->AssertEQ(weight->shape[0], dshape[dshape.size() - 1])) + << "Dense: shape of weight is inconsistent with input data."; + CHECK(reporter->AssertEQ(weight->shape[1], param->units)) + << "Dense: shape of weight is inconsistent with units."; + } else { + // Assign weight type + std::vector wshape({dshape[dshape.size() - 1], param->units}); + reporter->Assign(types[1], TensorTypeNode::make(wshape, data->dtype)); + } + oshape.Set((oshape.size() - 1), param->units); } else { - const auto* weight = types[1].as(); if (weight == nullptr) return false; Array wshape = weight->shape; oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]); From 431a57d090867b7c6b9e663dd864af11740a42f7 Mon Sep 17 00:00:00 2001 From: Siju Samuel Date: Fri, 12 Oct 2018 11:00:47 +0530 Subject: [PATCH 8/8] Review comment fix --- include/tvm/relay/attrs/nn.h | 20 -------------------- src/relay/op/nn/nn.cc | 14 +++----------- 2 files changed, 3 insertions(+), 31 deletions(-) diff --git a/include/tvm/relay/attrs/nn.h b/include/tvm/relay/attrs/nn.h index 0906aca7fe95..a6f307641826 100644 --- a/include/tvm/relay/attrs/nn.h +++ b/include/tvm/relay/attrs/nn.h @@ -248,26 +248,6 @@ struct PadAttrs : public tvm::AttrsNode { "in the format of ((before_1, after_1), ..., (before_N, after_N))"); } }; -/*! \brief Attributes for dense operator */ -struct DenseAttrs : public tvm::AttrsNode { - IndexExpr units; - - TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") { - TVM_ATTR_FIELD(units) - .describe("Number of hidden units of the dense transformation."); - } -}; - -/*! \brief Attributes for leaky relu operator */ -struct LeakyReluAttrs : public tvm::AttrsNode { - double alpha; - - TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.LeakyReluAttrs") { - TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25) - .describe("Slope coefficient for the negative half axis."); - } -}; - /*! \brief Attributes for leaky relu operator */ diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 6536056a672b..4d89d712bfa2 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -37,17 +37,9 @@ bool DenseRel(const Array& types, Array dshape = data->shape; // validate the weight shape is proper if defined - if (weight != nullptr) { - CHECK(reporter->AssertEQ(weight->shape[0], dshape[dshape.size() - 1])) - << "Dense: shape of weight is inconsistent with input data."; - CHECK(reporter->AssertEQ(weight->shape[1], param->units)) - << "Dense: shape of weight is inconsistent with units."; - } else { - // Assign weight type - std::vector wshape({dshape[dshape.size() - 1], param->units}); - reporter->Assign(types[1], TensorTypeNode::make(wshape, data->dtype)); - } - + // Assign weight type + Array wshape({dshape[dshape.size() - 1], param->units}); + reporter->Assign(types[1], TensorTypeNode::make(wshape, data->dtype)); oshape.Set((oshape.size() - 1), param->units); } else { if (weight == nullptr) return false;