Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RELAY]Ops Dense, leaky_relu #1828

Merged
merged 10 commits into from
Oct 17, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions docs/langref/relay_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ This level enables typical convnet models.

tvm.relay.nn.conv2d
tvm.relay.nn.conv2d_transpose
tvm.relay.nn.dense
tvm.relay.nn.max_pool2d
tvm.relay.nn.avg_pool2d
tvm.relay.nn.global_max_pool2d
Expand All @@ -70,6 +71,7 @@ This level enables additional math and transform operators.
:nosignatures:

tvm.relay.zeros
tvm.relay.nn.leaky_relu
tvm.relay.zeros_like
tvm.relay.ones
tvm.relay.ones_like
Expand Down Expand Up @@ -137,6 +139,7 @@ Level 2 Definitions
-------------------
.. autofunction:: tvm.relay.nn.conv2d
.. autofunction:: tvm.relay.nn.conv2d_transpose
.. autofunction:: tvm.relay.nn.dense
.. autofunction:: tvm.relay.nn.max_pool2d
.. autofunction:: tvm.relay.nn.avg_pool2d
.. autofunction:: tvm.relay.nn.global_max_pool2d
Expand All @@ -149,6 +152,7 @@ Level 2 Definitions

Level 3 Definitions
-------------------
.. autofunction:: tvm.relay.nn.leaky_relu
.. autofunction:: tvm.relay.floor
.. autofunction:: tvm.relay.ceil
.. autofunction:: tvm.relay.trunc
Expand Down
25 changes: 25 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,18 @@ struct GlobalPool2DAttrs : public tvm::AttrsNode<GlobalPool2DAttrs> {
}
};


/*! \brief Attributes for dense operator */
struct DenseAttrs : public tvm::AttrsNode<DenseAttrs> {
IndexExpr units;

TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") {
TVM_ATTR_FIELD(units)
.describe("Number of hidden units of the dense transformation.");
}
};


/*! \brief Attributes for upsampling operator */
struct UpSamplingAttrs : public tvm::AttrsNode<UpSamplingAttrs> {
int scale;
Expand Down Expand Up @@ -237,6 +249,18 @@ struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
}
};


/*! \brief Attributes for leaky relu operator */
struct LeakyReluAttrs : public tvm::AttrsNode<LeakyReluAttrs> {
double alpha;

TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.LeakyReluAttrs") {
TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25)
.describe("Slope coefficient for the negative half axis.");
}
};


/*! \brief Attributes used in dropout operator */
struct DropoutAttrs : public tvm::AttrsNode<DropoutAttrs> {
double rate;
Expand Down Expand Up @@ -272,6 +296,7 @@ struct BatchNormAttrs : public tvm::AttrsNode<BatchNormAttrs> {
}
}; // struct BatchNormAttrs


/*! \brief Attributes for LRN operator */
struct LRNAttrs : public tvm::AttrsNode<LRNAttrs> {
IndexExpr size;
Expand Down
52 changes: 52 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,6 +430,34 @@ def batch_flatten(data):
"""
return _make.batch_flatten(data)


def dense(data, weight, units=None):
"""Dense operator.
Applies a linear transformation

.. math::

`Y = X * W`

Parameters
----------
data : relay.Expr
The input data to the operator.

weight : relay.Expr
The weight expressions.

units : int, optional
Number of hidden units of the dense transformation.

Returns
-------
result : relay.Expr
The computed result.
"""
return _make.dense(data, weight, units)

siju-samuel marked this conversation as resolved.
Show resolved Hide resolved

def relu(data):
"""Rectified linear unit.

Expand All @@ -449,6 +477,30 @@ def relu(data):
return _make.relu(data)


def leaky_relu(data, alpha):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.

.. math::

`y = x > 0 ? x : alpha * x`

Parameters
----------
data : relay.Expr
The input data to the operator.

alpha : float
Slope coefficient for the negative half axis.

Returns
-------
result : relay.Expr
The computed result.
"""
return _make.leaky_relu(data, alpha)


def pad(data,
pad_width,
pad_value=0.0):
Expand Down
98 changes: 98 additions & 0 deletions src/relay/op/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,104 @@
namespace tvm {
namespace relay {

TVM_REGISTER_NODE_TYPE(DenseAttrs);


bool DenseRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 3);
const auto* data = types[0].as<TensorTypeNode>();
const auto* weight = types[1].as<TensorTypeNode>();
if (data == nullptr) return false;

const DenseAttrs* param = attrs.as<DenseAttrs>();
CHECK(param != nullptr);

CHECK(static_cast<int>(data->shape.size()) != 0);

Array<tvm::Expr> oshape = data->shape;
if (param->units.defined()) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if units is defined, we don't need weight to be present, instead we can directly assign to the weight, see the implementation of conv2d

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, need to assign to the weight here, this will infer the shape of the weight if necessary. Please also add a testcase similar to https://github.com/dmlc/tvm/blob/master/tests/python/relay/test_op_level2.py#L14 where weight's shape is not specified but get inferred

Array<tvm::Expr> dshape = data->shape;

// validate the weight shape is proper if defined
// Assign weight type
Array<IndexExpr> wshape({dshape[dshape.size() - 1], param->units});
reporter->Assign(types[1], TensorTypeNode::make(wshape, data->dtype));
oshape.Set((oshape.size() - 1), param->units);
} else {
if (weight == nullptr) return false;
Array<tvm::Expr> wshape = weight->shape;
oshape.Set((oshape.size() - 1), wshape[wshape.size() - 1]);
}

// assign output type
reporter->Assign(types[2], TensorTypeNode::make(oshape, data->dtype));
return true;
}


// Positional relay function to create dense operator used by frontend FFI.
Expr MakeDense(Expr data,
Expr weight,
IndexExpr units) {
auto attrs = make_node<DenseAttrs>();
attrs->units = units;
static const Op& op = Op::Get("nn.dense");
return CallNode::make(op, {data, weight}, Attrs(attrs), {});
}


TVM_REGISTER_API("relay.op.nn._make.dense")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 3>(MakeDense, args, rv);
});


RELAY_REGISTER_OP("nn.dense")
.describe(R"code(Applies a linear transformation: :math:`Y = XW^T`.

- **data**: `(x1, x2, ..., xn, input_dim)`
- **weight**: `(units, input_dim)`
- **out**: `(x1, x2, ..., xn, units)`.

)code" TVM_ADD_FILELINE)
.set_num_inputs(2)
.add_argument("data", "nD Tensor", "Input data.")
.add_argument("weight", "2D Tensor", "Weight matrix.")
.set_support_level(2)
.add_type_rel("Dense", DenseRel);


// Positional relay function to create leaky relu operator used by frontend FFI.
Expr MakeLeakyRelu(Expr data,
double alpha) {
auto attrs = make_node<LeakyReluAttrs>();
attrs->alpha = alpha;
static const Op& op = Op::Get("nn.leaky_relu");
return CallNode::make(op, {data}, Attrs(attrs), {});
}


TVM_REGISTER_API("relay.op.nn._make.leaky_relu")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 2>(MakeLeakyRelu, args, rv);
});


RELAY_REGISTER_OP("nn.leaky_relu")
.describe(R"code(Leaky version of a Rectified Linear Unit.

`y = x > 0 ? x : alpha * x`

)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "Input data.")
.set_support_level(3)
.add_type_rel("Identity", IdentityRel);


TVM_REGISTER_API("relay.op.nn._make.softmax")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
auto make_func = [](Expr data, int axis) {
Expand Down
42 changes: 42 additions & 0 deletions tests/python/relay/test_op_level2.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,47 @@ def test_pad_infer_type():
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")

def test_dense_infer_type():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))

w = ib.param("w", relay.ty.TensorType((w, 2), "float32"))

with ib.function(x, w) as func:
ib.ret(relay.nn.dense(x, w, units=2))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32")

ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))

wh, ww = tvm.var("wh"), tvm.var("ww")
w = ib.param("w", relay.ty.TensorType((wh, ww), "float32"))

with ib.function(x, w) as func:
ib.ret(relay.nn.dense(x, w))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, ww), "float32")

ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))

w = ib.param("w", relay.ty.IncompleteType())

with ib.function(x, w) as func:
ib.ret(relay.nn.dense(x, w, units=2))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, 2), "float32")


if __name__ == "__main__":
test_conv2d_infer_type()
Expand All @@ -227,3 +268,4 @@ def test_pad_infer_type():
test_flatten_infer_type()
test_pad_infer_type()
test_conv2d_transpose_infer_type()
test_dense_infer_type()
12 changes: 12 additions & 0 deletions tests/python/relay/test_op_level3.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,17 @@ def test_full_like():
ftype = func.checked_type
assert ftype.ret_type == relay.TensorType((n, c, h, w), "float32")

def test_infer_type_leaky_relu():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c, h, w), "float32"))

with ib.function(x) as func:
ib.ret(relay.nn.leaky_relu(x, alpha=0.1))
ib.ret(func)
func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c, h, w), "float32")

if __name__ == "__main__":
test_single_op()
Expand All @@ -220,5 +231,6 @@ def test_full_like():
test_take_infer_type()
test_full()
test_full_like()
test_infer_type_leaky_relu()
test_squeeze_axes_infer_type()
test_squeeze_default_axes_infer_type()