diff --git a/docs/langref/relay_op.rst b/docs/langref/relay_op.rst index 0b937f6636bf..d40346a9e836 100644 --- a/docs/langref/relay_op.rst +++ b/docs/langref/relay_op.rst @@ -51,6 +51,7 @@ This level enables typical convnet models. tvm.relay.nn.conv2d tvm.relay.nn.conv2d_transpose + tvm.relay.nn.dense tvm.relay.nn.max_pool2d tvm.relay.nn.avg_pool2d tvm.relay.nn.global_max_pool2d @@ -70,6 +71,7 @@ This level enables additional math and transform operators. :nosignatures: tvm.relay.zeros + tvm.relay.nn.leaky_relu tvm.relay.zeros_like tvm.relay.ones tvm.relay.ones_like @@ -137,6 +139,7 @@ Level 2 Definitions ------------------- .. autofunction:: tvm.relay.nn.conv2d .. autofunction:: tvm.relay.nn.conv2d_transpose +.. autofunction:: tvm.relay.nn.dense .. autofunction:: tvm.relay.nn.max_pool2d .. autofunction:: tvm.relay.nn.avg_pool2d .. autofunction:: tvm.relay.nn.global_max_pool2d @@ -149,6 +152,7 @@ Level 2 Definitions Level 3 Definitions ------------------- +.. autofunction:: tvm.relay.nn.leaky_relu .. autofunction:: tvm.relay.floor .. autofunction:: tvm.relay.ceil .. autofunction:: tvm.relay.trunc diff --git a/include/tvm/relay/attrs/nn.h b/include/tvm/relay/attrs/nn.h index 0be85d3d1bb9..c7b8695d1da5 100644 --- a/include/tvm/relay/attrs/nn.h +++ b/include/tvm/relay/attrs/nn.h @@ -202,6 +202,18 @@ struct GlobalPool2DAttrs : public tvm::AttrsNode { } }; + +/*! \brief Attributes for dense operator */ +struct DenseAttrs : public tvm::AttrsNode { + IndexExpr units; + + TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") { + TVM_ATTR_FIELD(units) + .describe("Number of hidden units of the dense transformation."); + } +}; + + /*! \brief Attributes for upsampling operator */ struct UpSamplingAttrs : public tvm::AttrsNode { int scale; @@ -237,6 +249,18 @@ struct PadAttrs : public tvm::AttrsNode { } }; + +/*! \brief Attributes for leaky relu operator */ +struct LeakyReluAttrs : public tvm::AttrsNode { + double alpha; + + TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.LeakyReluAttrs") { + TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25) + .describe("Slope coefficient for the negative half axis."); + } +}; + + /*! \brief Attributes used in dropout operator */ struct DropoutAttrs : public tvm::AttrsNode { double rate; @@ -272,6 +296,7 @@ struct BatchNormAttrs : public tvm::AttrsNode { } }; // struct BatchNormAttrs + /*! \brief Attributes for LRN operator */ struct LRNAttrs : public tvm::AttrsNode { IndexExpr size; diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py index 313c26da0234..51acd4bc38b6 100644 --- a/python/tvm/relay/op/nn/nn.py +++ b/python/tvm/relay/op/nn/nn.py @@ -430,6 +430,34 @@ def batch_flatten(data): """ return _make.batch_flatten(data) + +def dense(data, weight, units=None): + """Dense operator. + Applies a linear transformation + + .. math:: + + `Y = X * W` + + Parameters + ---------- + data : relay.Expr + The input data to the operator. + + weight : relay.Expr + The weight expressions. + + units : int, optional + Number of hidden units of the dense transformation. + + Returns + ------- + result : relay.Expr + The computed result. + """ + return _make.dense(data, weight, units) + + def relu(data): """Rectified linear unit. @@ -449,6 +477,30 @@ def relu(data): return _make.relu(data) +def leaky_relu(data, alpha): + """This operator takes data as input and does Leaky version + of a Rectified Linear Unit. + + .. math:: + + `y = x > 0 ? x : alpha * x` + + Parameters + ---------- + data : relay.Expr + The input data to the operator. + + alpha : float + Slope coefficient for the negative half axis. + + Returns + ------- + result : relay.Expr + The computed result. + """ + return _make.leaky_relu(data, alpha) + + def pad(data, pad_width, pad_value=0.0): diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 23dfe90eebf0..dc5ce2e567d0 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -15,6 +15,104 @@ namespace tvm { namespace relay { +TVM_REGISTER_NODE_TYPE(DenseAttrs); + + +bool DenseRel(const Array& types, + int num_inputs, + const Attrs& attrs, + const TypeReporter& reporter) { + CHECK_EQ(types.size(), 3); + const auto* data = types[0].as(); + const auto* weight = types[1].as(); + if (data == nullptr) return false; + + const DenseAttrs* param = attrs.as(); + CHECK(param != nullptr); + + CHECK(static_cast(data->shape.size()) != 0); + + Array oshape = data->shape; + if (param->units.defined()) { + Array dshape = data->shape; + + // validate the weight shape is proper if defined + // Assign weight type + Array wshape({dshape[dshape.size() - 1], param->units}); + reporter->Assign(types[1], TensorTypeNode::make(wshape, data->dtype)); + oshape.Set((oshape.size() - 1), param->units); + } else { + if (weight == nullptr) return false; + Array wshape = weight->shape; + oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]); + } + + // assign output type + reporter->Assign(types[2], TensorTypeNode::make(oshape, data->dtype)); + return true; +} + + +// Positional relay function to create dense operator used by frontend FFI. +Expr MakeDense(Expr data, + Expr weight, + IndexExpr units) { + auto attrs = make_node(); + attrs->units = units; + static const Op& op = Op::Get("nn.dense"); + return CallNode::make(op, {data, weight}, Attrs(attrs), {}); +} + + +TVM_REGISTER_API("relay.op.nn._make.dense") +.set_body([](const TVMArgs& args, TVMRetValue* rv) { + runtime::detail::unpack_call(MakeDense, args, rv); + }); + + +RELAY_REGISTER_OP("nn.dense") +.describe(R"code(Applies a linear transformation: :math:`Y = XW^T`. + +- **data**: `(x1, x2, ..., xn, input_dim)` +- **weight**: `(units, input_dim)` +- **out**: `(x1, x2, ..., xn, units)`. + +)code" TVM_ADD_FILELINE) +.set_num_inputs(2) +.add_argument("data", "nD Tensor", "Input data.") +.add_argument("weight", "2D Tensor", "Weight matrix.") +.set_support_level(2) +.add_type_rel("Dense", DenseRel); + + +// Positional relay function to create leaky relu operator used by frontend FFI. +Expr MakeLeakyRelu(Expr data, + double alpha) { + auto attrs = make_node(); + attrs->alpha = alpha; + static const Op& op = Op::Get("nn.leaky_relu"); + return CallNode::make(op, {data}, Attrs(attrs), {}); +} + + +TVM_REGISTER_API("relay.op.nn._make.leaky_relu") +.set_body([](const TVMArgs& args, TVMRetValue* rv) { + runtime::detail::unpack_call(MakeLeakyRelu, args, rv); + }); + + +RELAY_REGISTER_OP("nn.leaky_relu") +.describe(R"code(Leaky version of a Rectified Linear Unit. + +`y = x > 0 ? x : alpha * x` + +)code" TVM_ADD_FILELINE) +.set_num_inputs(1) +.add_argument("data", "Tensor", "Input data.") +.set_support_level(3) +.add_type_rel("Identity", IdentityRel); + + TVM_REGISTER_API("relay.op.nn._make.softmax") .set_body([](const TVMArgs& args, TVMRetValue* rv) { auto make_func = [](Expr data, int axis) { diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index d0d02aece06d..4f37d4893b66 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -219,6 +219,47 @@ def test_pad_infer_type(): ftype = func.checked_type assert ftype.ret_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32") +def test_dense_infer_type(): + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + w = ib.param("w", relay.ty.TensorType((w, 2), "float32")) + + with ib.function(x, w) as func: + ib.ret(relay.nn.dense(x, w, units=2)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32") + + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + wh, ww = tvm.var("wh"), tvm.var("ww") + w = ib.param("w", relay.ty.TensorType((wh, ww), "float32")) + + with ib.function(x, w) as func: + ib.ret(relay.nn.dense(x, w)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, ww), "float32") + + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + w = ib.param("w", relay.ty.IncompleteType()) + + with ib.function(x, w) as func: + ib.ret(relay.nn.dense(x, w, units=2)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32") + if __name__ == "__main__": test_conv2d_infer_type() @@ -227,3 +268,4 @@ def test_pad_infer_type(): test_flatten_infer_type() test_pad_infer_type() test_conv2d_transpose_infer_type() + test_dense_infer_type() diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index 13ab483f936c..0605ac02339b 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -208,6 +208,17 @@ def test_full_like(): ftype = func.checked_type assert ftype.ret_type == relay.TensorType((n, c, h, w), "float32") +def test_infer_type_leaky_relu(): + ib = relay.ir_builder.IRBuilder() + n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32")) + + with ib.function(x) as func: + ib.ret(relay.nn.leaky_relu(x, alpha=0.1)) + ib.ret(func) + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, c, h, w), "float32") if __name__ == "__main__": test_single_op() @@ -220,5 +231,6 @@ def test_full_like(): test_take_infer_type() test_full() test_full_like() + test_infer_type_leaky_relu() test_squeeze_axes_infer_type() test_squeeze_default_axes_infer_type()