Skip to content

Commit

Permalink
refine code
Browse files Browse the repository at this point in the history
  • Loading branch information
chengduoZH committed Oct 26, 2017
1 parent 05239b6 commit dcb3da5
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 117 deletions.
4 changes: 2 additions & 2 deletions paddle/operators/math/sequence_project.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@ template <typename Place, typename T>
class SequenceProjectFunctor {
public:
void operator()(const platform::DeviceContext& context,
framework::LoDTensor& in, framework::LoDTensor& padding_data,
framework::LoDTensor& col, bool padding_trainable,
framework::LoDTensor& in, framework::Tensor& padding_data,
framework::Tensor& col, bool padding_trainable,
int context_start, int context_length, int context_stride,
int up_pad, int down_pad, bool gradient, bool input_grad,
bool pad_grad) {
Expand Down
14 changes: 7 additions & 7 deletions paddle/operators/sequence_conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,6 @@ class SequenceConvOp : public framework::OperatorWithKernel {
"Input(Filter) of SequenceConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceConvOp should not be null.");
// PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() >
// 0 failed, 0 <= 0)
PADDLE_ENFORCE(ctx->HasInput("PaddingData"),
"Input(PaddingData) of SequenceConvOp should not be null.");

int context_length = ctx->Attrs().Get<int>("context_length");
bool padding_trainable = ctx->Attrs().Get<bool>("padding_trainable");
Expand All @@ -48,6 +44,9 @@ class SequenceConvOp : public framework::OperatorWithKernel {
"number_of_input_features).");

if (padding_trainable) {
PADDLE_ENFORCE(
ctx->HasInput("PaddingData"),
"Input(PaddingData) of SequenceConvOp should not be null.");
framework::DDim padding_dim = ctx->GetInputDim("PaddingData");
int up_pad = std::max(0, -context_start);
int down_pad = std::max(0, context_start + context_length - 1);
Expand Down Expand Up @@ -106,11 +105,12 @@ class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker {
"(A float LoDTensor) the input of SequenceConvOp, a vector of "
"2-D matrix of size (minibatch, number_of_input_features).");
AddInput("PaddingData",
"(A float LoDTensor) the input of SequenceConvOp, a vector of "
"(Tensor) the input of SequenceConvOp, a vector of "
"2-D matrix of size (up_pad + down_pad, "
"number_of_input_features). ");
"number_of_input_features). ")
.AsDispensable();
AddInput("Filter",
"(A float LoDTensor) the input of SequenceConvOp, a vector of "
"(Tensor) the input of SequenceConvOp, a vector of "
"2-D matrix of size (context_length x number_of_input_features).");
AddOutput("Out",
"(A float LoDTensor) the output of SequenceConvOp, a vector "
Expand Down
27 changes: 13 additions & 14 deletions paddle/operators/sequence_conv_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<LoDTensor>("X");
auto* out = context.Output<LoDTensor>("Out");
auto filter = *context.Input<LoDTensor>("Filter");
auto filter = *context.Input<Tensor>("Filter");

out->mutable_data<T>(context.GetPlace());
// out->set_lod(in->lod());
Expand All @@ -50,9 +50,9 @@ class SequenceConvKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(in->lod().size(), 1UL,
"Only support one level sequence now.");

const LoDTensor* padding_data = nullptr;
const Tensor* padding_data = nullptr;
if (padding_trainable) {
padding_data = context.Input<LoDTensor>("PaddingData");
padding_data = context.Input<Tensor>("PaddingData");
}

int up_pad = std::max(0, -context_start);
Expand All @@ -63,7 +63,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
// use col_shape in the im2col calculation
framework::DDim col_shape = {in->dims()[0],
sequence_width * context_length};
LoDTensor col;
Tensor col;
col.mutable_data<T>(col_shape, context.GetPlace());
// Because if padding_trainable is false, padding data should be zeros.
auto temp = framework::EigenVector<T>::Flatten(col);
Expand All @@ -73,7 +73,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
paddle::operators::math::SequenceProjectFunctor<Place, T>
seq_project_functor;
LoDTensor* input = const_cast<LoDTensor*>(in);
LoDTensor* pad_data = const_cast<LoDTensor*>(padding_data);
Tensor* pad_data = const_cast<Tensor*>(padding_data);

seq_project_functor(context.device_context(), *input, *pad_data, col,
padding_trainable, context_start, context_length,
Expand All @@ -91,12 +91,11 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& context) const override {
auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
auto* filter_g =
context.Output<LoDTensor>(framework::GradVarName("Filter"));
auto* filter_g = context.Output<Tensor>(framework::GradVarName("Filter"));
auto* padding_data_g =
context.Output<LoDTensor>(framework::GradVarName("PaddingData"));
context.Output<Tensor>(framework::GradVarName("PaddingData"));
auto* in = context.Input<LoDTensor>("X");
auto* filter = context.Input<LoDTensor>("Filter");
auto* filter = context.Input<Tensor>("Filter");

int context_start = context.Attr<int>("context_start");
int context_length = context.Attr<int>("context_length");
Expand All @@ -115,7 +114,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
// use col_shape in the im2col calculation
framework::DDim col_shape = {in->dims()[0],
sequence_width * context_length};
LoDTensor col;
Tensor col;

if (in_g || filter_g || (padding_trainable && padding_data_g)) {
col.mutable_data<T>(col_shape, context.GetPlace());
Expand Down Expand Up @@ -161,17 +160,17 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
functor(context.device_context(), filter_g, 0);

Tensor filter_grad_ = *filter_g;
Tensor out_grad_ = *out_g;
LoDTensor out_grad_ = *out_g;

const LoDTensor* padding_data = nullptr;
const Tensor* padding_data = nullptr;
if (padding_trainable) {
padding_data = context.Input<LoDTensor>("PaddingData");
padding_data = context.Input<Tensor>("PaddingData");
}

sequence_width = static_cast<int>(in->dims()[1]);

LoDTensor* input = const_cast<LoDTensor*>(in);
LoDTensor* pad_data = const_cast<LoDTensor*>(padding_data);
Tensor* pad_data = const_cast<Tensor*>(padding_data);

seq_project_functor(context.device_context(), *input, *pad_data, col,
padding_trainable, context_start, context_length,
Expand Down
128 changes: 34 additions & 94 deletions python/paddle/v2/framework/tests/test_seq_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,29 @@ def setUp(self):
# one level, batch size
x = np.random.uniform(0.1, 1, [self.input_size[0],
self.input_size[1]]).astype('float32')

self.begin_pad = np.max([0, -self.context_start])
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
self.total_pad = self.begin_pad + self.end_pad
if self.total_pad == 0:
self.total_pad = 1

# PaddingData mast be not empty.
# Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
padding_data = np.random.uniform(
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
w = np.random.uniform(
0.1, 1, [self.context_length, self.input_size[1]]).astype('float32')

begin_pad = np.max([0, -self.context_start])
end_pad = np.max([0, self.context_start + self.context_length - 1])
total_pad = begin_pad + end_pad
padding_data = np.random.uniform(
0.1, 1, [total_pad, self.input_size[1]]).astype('float32')
self.pad_data = padding_data
self.inputs = {
'X': (x, self.lod),
'PaddingData': (padding_data, [[0, self.total_pad]]),
'Filter': (w, [[0, self.context_length]])
'Filter': w,
}
self.inputs_val = ['X', 'Filter']
self.inputs_val_no_x = ['Filter']
self.inputs_val_no_f = ['X']

if total_pad != 0:
self.inputs['PaddingData'] = padding_data
self.inputs_val = ['X', 'PaddingData', 'Filter']
self.inputs_val_no_x = ['PaddingData', 'Filter']
self.inputs_val_no_f = ['PaddingData', 'X']

self.attrs = {
'context_start': self.context_start,
'context_length': self.context_length,
Expand All @@ -51,7 +56,7 @@ def setUp(self):
def compute(self):
x, lod = self.inputs['X']
filter = self.inputs['Filter']
pading_data, _ = self.inputs['PaddingData']
pading_data = self.pad_data
out = np.zeros((self.input_size[0], self.context_length *
self.input_size[1])).astype('float32')
lod = lod[0]
Expand Down Expand Up @@ -90,12 +95,12 @@ def compute(self):
out[out_begin:out_end, j * self.input_size[1]:(j + 1) *
self.input_size[1]] += in_sub

filter_dim = filter[0].shape
filter_dim = filter.shape
output_dim = self.outputs['Out'].shape
filter[0].shape = filter_dim[0] * filter_dim[1]
filter.shape = filter_dim[0] * filter_dim[1]
self.outputs['Out'].shape = (output_dim[0], )
np.dot(out, filter[0], out=self.outputs['Out'])
filter[0].shape = filter_dim
np.dot(out, filter, out=self.outputs['Out'])
filter.shape = filter_dim
self.outputs['Out'].shape = output_dim

def test_check_output(self):
Expand All @@ -104,16 +109,14 @@ def test_check_output(self):
def test_check_grad(self):
if self.padding_trainable:
self.check_grad(
set(['X', 'PaddingData', 'Filter']),
'Out',
max_relative_error=0.05)
set(self.inputs_val), 'Out', max_relative_error=0.05)

def test_check_grad_input(self):
self.check_grad(
['X'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['PaddingData', 'Filter']))
no_grad_set=set(self.inputs_val_no_x))

def test_check_grad_padding_data(self):
if self.padding_trainable:
Expand All @@ -128,27 +131,28 @@ def test_check_grad_Filter(self):
['Filter'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['X', 'PaddingData']))
no_grad_set=set(self.inputs_val_no_f))

def test_check_grad_input_filter(self):
self.check_grad(
['X', 'Filter'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['PaddingData']))
if self.padding_trainable:
self.check_grad(
['X', 'Filter'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['PaddingData']))

def test_check_grad_padding_input(self):
if self.padding_trainable:
self.check_grad(
['X', 'PaddingData'],
self.inputs_val_no_f,
'Out',
max_relative_error=0.05,
no_grad_set=set(['Filter']))

def test_check_grad_padding_filter(self):
if self.padding_trainable:
self.check_grad(
['PaddingData', 'Filter'],
self.inputs_val_no_x,
'Out',
max_relative_error=0.05,
no_grad_set=set(['X']))
Expand Down Expand Up @@ -191,69 +195,5 @@ def init_test_case(self):
[self.input_size[0]]]


'''
class TestSeqProjectCases(TestSeqProject):
def setUp(self):
self.init_test_case()
self.op_type = 'sequence_project'
num = 0
for context_start in [-5, -3, -1, 0, 3]:
for context_length in [1, 2, 5, 7]:
for batch_size in [1, 2, 5, 7]:
for padding_trainable in [False, True]:
if context_length == 1 and context_start == 0 and padding_trainable:
continue
self.context_start = context_start
self.context_length = context_length
self.padding_trainable = padding_trainable
self.input_size = [batch_size, 23]
x = np.random.uniform(0.1, 1,
self.input_size).astype('float32')
self.lod = [[0, self.input_size[0]]]
if self.input_size[0] > 2:
idx = range(self.input_size[0])
del idx[0]
self.lod = [
[0] + np.sort(random.sample(idx, 2)).tolist() +
[self.input_size[0]]
]
self.begin_pad = np.max([0, -self.context_start])
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
self.total_pad = self.begin_pad + self.end_pad
if self.total_pad == 0:
self.total_pad = 1
# PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
padding_data = np.random.uniform(
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
self.inputs = {
'X': (x, self.lod),
'PaddingData': (padding_data, [[0, self.total_pad]])
}
self.attrs = {
'context_start': self.context_start,
'context_length': self.context_length,
'padding_trainable': self.padding_trainable,
'context_stride': self.context_stride
}
out = np.zeros((self.input_size[0], self.input_size[1] *
self.context_length)).astype('float32')
self.outputs = {'Out': out}
print num
print self.attrs
print batch_size
print padding_trainable
print "$$$$$$$$$$$$$"
self.compute()
self.test_check_output()
num += 1
'''

if __name__ == '__main__':
unittest.main()

0 comments on commit dcb3da5

Please sign in to comment.