Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
update operator check
Browse files Browse the repository at this point in the history
  • Loading branch information
arcadiaphy committed May 22, 2019
1 parent ece449f commit 59e0450
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 10 deletions.
2 changes: 1 addition & 1 deletion src/operator/tensor/la_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -920,7 +920,7 @@ Examples::
.set_attr<nnvm::FListInputNames>("FListInputNames", [](const NodeAttrs& attrs)
{ return std::vector<std::string>{"A"}; } )
.set_attr<mxnet::FInferShape>("FInferShape", InverseShape)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
.set_attr<nnvm::FInferType>("FInferType", InverseType)
.set_attr<nnvm::FInplaceOption>("FInplaceOption", [](const NodeAttrs& attrs)
{ return std::vector<std::pair<int, int>>{{0, 0}}; })
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs)
Expand Down
36 changes: 27 additions & 9 deletions src/operator/tensor/la_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -406,10 +406,26 @@ inline bool InverseShape(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const mxnet::TShape& in = (*in_attrs)[0];
if (!ndim_is_known(in)) return false;
const int ndim(in.ndim());
CHECK_GE(ndim, 2) << "Input A's dimension must be >= 2";
CHECK_EQ(in[ndim-2], in[ndim-1]) << "Input A's last two dimension must be equal";
SHAPE_ASSIGN_CHECK(*out_attrs, 0, in);
return shape_is_known(in);
}

// Type inference function for linalg_inverse
inline bool InverseType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_type,
std::vector<int>* out_type) {
using namespace mshadow;
CHECK_EQ(in_type->size(), 1);
CHECK_EQ(out_type->size(), 1);
const int dtype = (*in_type)[0];
if (dtype == -1) return false;
CHECK(dtype == kFloat32 || dtype == kFloat64)
<< "This operation only supports 32-bit and 64-bit floating point";
TYPE_ASSIGN_CHECK(*out_type, 0, dtype);
return true;
}

Expand All @@ -421,6 +437,7 @@ inline bool DetShape(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), onum + 2);
const mxnet::TShape& in = (*in_attrs)[0];
if (!ndim_is_known(in)) return false;
const int ndim(in.ndim());
CHECK_GE(ndim, 2) << "Input A's dimension must be >= 2";
CHECK_EQ(in[ndim-2], in[ndim-1]) << "Input A's last two dimension must be equal";
Expand All @@ -435,7 +452,7 @@ inline bool DetShape(const nnvm::NodeAttrs& attrs,
}
SHAPE_ASSIGN_CHECK(*out_attrs, onum, in); /* LU */
SHAPE_ASSIGN_CHECK(*out_attrs, onum + 1, mxnet::TShape(in.begin(), in.end() - 1)); /* pivot */
return true;
return shape_is_known(in);
}

// Type inference function for det functions in linalg
Expand All @@ -444,16 +461,17 @@ inline bool DetType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_type,
std::vector<int>* out_type) {
using namespace mshadow;
CHECK_EQ(in_type->size(), 1U);
int dtype = (*in_type)[0];
CHECK_NE(dtype, -1) << "Input must have specified type";

out_type->clear();
CHECK_EQ(in_type->size(), 1);
CHECK_EQ(out_type->size(), onum + 2);
const int dtype = (*in_type)[0];
if (dtype == -1) return false;
CHECK(dtype == kFloat32 || dtype == kFloat64)
<< "This operation only supports 32-bit and 64-bit floating point";
for (int i = 0; i < onum; ++i) {
out_type->push_back(dtype); /* sign or det or logdet */
TYPE_ASSIGN_CHECK(*out_type, i, dtype); /* sign or det or logdet */
}
out_type->push_back(dtype); /* LU */
out_type->push_back(mshadow::kInt32); /* pivot */
TYPE_ASSIGN_CHECK(*out_type, onum, dtype); /* LU */
TYPE_ASSIGN_CHECK(*out_type, onum + 1, kInt32); /* pivot */
return true;
}

Expand Down

0 comments on commit 59e0450

Please sign in to comment.