Skip to content

Commit

Permalink
[RELAY][OPS]LRN and L2_Normalize (apache#1860)
Browse files Browse the repository at this point in the history
  • Loading branch information
siju-samuel authored and tqchen committed Oct 11, 2018
1 parent 35161c2 commit e114959
Show file tree
Hide file tree
Showing 5 changed files with 206 additions and 0 deletions.
5 changes: 5 additions & 0 deletions docs/langref/relay_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ This level enables fully connected multi-layer perceptron.
tvm.relay.sigmoid
tvm.relay.nn.relu


**Level 2: Convolutions**

This level enables typical convnet models.
Expand All @@ -53,6 +54,8 @@ This level enables typical convnet models.
tvm.relay.nn.global_avg_pool2d
tvm.relay.nn.upsampling
tvm.relay.nn.batch_flatten
tvm.relay.nn.lrn
tvm.relay.nn.l2_normalize


**Level 3: Additional Math And Transform Operators**
Expand Down Expand Up @@ -131,6 +134,8 @@ Level 2 Definitions
.. autofunction:: tvm.relay.nn.global_avg_pool2d
.. autofunction:: tvm.relay.nn.upsampling
.. autofunction:: tvm.relay.nn.batch_flatten
.. autofunction:: tvm.relay.nn.lrn
.. autofunction:: tvm.relay.nn.l2_normalize


Level 3 Definitions
Expand Down
38 changes: 38 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,44 @@ struct UpSamplingAttrs : public tvm::AttrsNode<UpSamplingAttrs> {
};




/*! \brief Attributes for LRN operator */
struct LRNAttrs : public tvm::AttrsNode<LRNAttrs> {
IndexExpr size;
IndexExpr axis;
double bias;
double alpha;
double beta;

TVM_DECLARE_ATTRS(LRNAttrs, "relay.attrs.LRNAttrs") {
TVM_ATTR_FIELD(size).set_default(5)
.describe("The size of the local region to be considered for normalization.");
TVM_ATTR_FIELD(axis).set_default(1)
.describe("Axis of input data layout channel.");
TVM_ATTR_FIELD(bias).set_default(2)
.describe("The offset parameter to avoid division by 0.");
TVM_ATTR_FIELD(alpha).set_default(0.0001)
.describe("The scaling parameter.");
TVM_ATTR_FIELD(beta).set_default(0.75)
.describe("The exponent parameter.");
}
};


/*! \brief Attributes for L2Normalize operator */
struct L2NormalizeAttrs : public tvm::AttrsNode<L2NormalizeAttrs> {
double eps;
Array<IndexExpr> axis;

TVM_DECLARE_ATTRS(L2NormalizeAttrs, "relay.attrs.L2NormalizeAttrs") {
TVM_ATTR_FIELD(eps)
.describe("A lower bound value for the norm, to avoid division by 0.");
TVM_ATTR_FIELD(axis)
.describe("Axis over the normalization applied.");
}
};

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_NN_H_
63 changes: 63 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,3 +383,66 @@ def relu(data):
The computed result.
"""
return _make.relu(data)


def lrn(data, size=5, axis=1, bias=2, alpha=.00001, beta=0.75):
"""This operator takes data as input and does local response normalization.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
(data / (bias + (alpha * sum_data ^2 /size))^beta)
Parameters
----------
data : relay.Expr
The input data to the operator.
size : int, optional
The size of the local region to be considered for normalization.
axis : int, optional
Input data layout channel axis. Default value is 1 for NCHW format
bias : float, optional
The offset parameter to avoid dividing by 0.
alpha : float, optional
The scaling parameter.
beta : float, optional
The exponent parameter.
Returns
-------
result : relay.Expr
The computed result.
"""

return _make.lrn(data, size, axis, alpha, beta, bias)

def l2_normalize(data, eps, axis=None):
"""Perform L2 normalization on the input data
.. math::
y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))
Parameters
----------
data : relay.Expr
The input data to the operator.
eps : float
epsilon value
axis : list of int, optional
axis over the normalization applied
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.l2_normalize(data, eps, axis)
74 changes: 74 additions & 0 deletions src/relay/op/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,5 +143,79 @@ RELAY_REGISTER_UNARY_OP("relay.op.nn._make.", "relu")
.set_support_level(1)
.add_type_rel("Identity", IdentityRel);


// Positional relay function to create LRN operator used by frontend FFI.
Expr MakeLRN(Expr data,
IndexExpr size,
IndexExpr axis,
double alpha,
double beta,
double bias) {
auto attrs = make_node<LRNAttrs>();
attrs->size = size;
attrs->axis = axis;
attrs->alpha = alpha;
attrs->beta = beta;
attrs->bias = bias;
static const Op& op = Op::Get("nn.lrn");
return CallNode::make(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_API("relay.op.nn._make.lrn")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 6>(MakeLRN, args, rv);
});

RELAY_REGISTER_OP("nn.lrn")
.describe(R"code(LRN layer.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (1 + (\alpha/n) \sum_i x_i^2)^\beta,
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
data / (bias + (alpha * sum_data ^2 /size))^beta
- **data**: The input tensor.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.add_type_rel("Identity", IdentityRel);


// Positional relay function to create L2Normalize operator used by frontend FFI.
Expr MakeL2Normalize(Expr data,
double eps,
Array<IndexExpr> axis) {
auto attrs = make_node<L2NormalizeAttrs>();
attrs->eps = eps;
attrs->axis = std::move(axis);
static const Op& op = Op::Get("nn.l2_normalize");
return CallNode::make(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_API("relay.op.nn._make.l2_normalize")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 3>(MakeL2Normalize, args, rv);
});

RELAY_REGISTER_OP("nn.l2_normalize")
.describe(R"code(L2 Normalization layer.
Normalizes along dimension axis using an L2 norm
.. math::
output = x / sqrt(max(sum(x^2), epsilon))
- **data**: The input tensor.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.add_type_rel("Identity", IdentityRel);

} // namespace relay
} // namespace tvm
26 changes: 26 additions & 0 deletions tests/python/relay/test_op_level1.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,30 @@ def test_concatenate_infer_type():
assert ftype.ret_type == relay.ty.TensorType(
(n, t + t, 100), "float32")

def test_lrn():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c , h, w), "float32"))
with ib.function(x) as func:
ib.ret(relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75))
ib.ret(func)

func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c , h, w), "float32")


def test_l2_normalize():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c , h, w), "float32"))
with ib.function(x) as func:
ib.ret(relay.nn.l2_normalize(x, eps=0.001, axis=[1]))
ib.ret(func)

func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c , h, w), "float32")

if __name__ == "__main__":
test_unary_op()
Expand All @@ -178,3 +202,5 @@ def test_concatenate_infer_type():
test_log_softmax()
test_binary_op()
test_binary_broadcast_op()
test_lrn()
test_l2_normalize()

0 comments on commit e114959

Please sign in to comment.