Skip to content

Commit

Permalink
Fix CPPLint issues in some tests in fluid/framework (#10068)
Browse files Browse the repository at this point in the history
* Fix CPPLint in data_device_transform_test

* Fix compilation error

* Fix compilation error

* Fix CPPLint errors in data_layout_transform_test

* Fix CPPLint errors in data_type_transform_test

* Fix CPPLint errors in data_type_transform_test.cu

* Fix compilation error

* Fix CPPLint issues in threadpool_test

* Fix CPPLInt issues in op_registry_test

* Fix CPPLint issues in operator_test

* Fix compilation error

* test
  • Loading branch information
abhinavarora authored Apr 21, 2018
1 parent 12ae354 commit 6402b59
Show file tree
Hide file tree
Showing 7 changed files with 240 additions and 174 deletions.
19 changes: 10 additions & 9 deletions paddle/fluid/framework/data_device_transform_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,7 @@ static void BuildVar(const std::string& param_name,
}

TEST(Operator, CPUtoGPU) {
using namespace paddle::framework;
using namespace paddle::platform;
InitDevices(true);
paddle::framework::InitDevices(true);

paddle::framework::Scope scope;
paddle::platform::CPUPlace cpu_place;
Expand All @@ -118,8 +116,9 @@ TEST(Operator, CPUtoGPU) {

auto cpu_op = paddle::framework::OpRegistry::CreateOp(cpu_op_desc);
// prepare input
auto* in_t = scope.Var("IN1")->GetMutable<LoDTensor>();
auto* src_ptr = in_t->mutable_data<float>({2, 3}, CPUPlace());
auto* in_t = scope.Var("IN1")->GetMutable<paddle::framework::LoDTensor>();
auto* src_ptr =
in_t->mutable_data<float>({2, 3}, paddle::platform::CPUPlace());
for (int i = 0; i < 2 * 3; ++i) {
src_ptr[i] = static_cast<float>(i);
}
Expand All @@ -128,7 +127,7 @@ TEST(Operator, CPUtoGPU) {
auto* output = scope.Var("OUT1");
cpu_op->Run(scope, cpu_place);

auto* output_ptr = output->Get<LoDTensor>().data<float>();
auto* output_ptr = output->Get<paddle::framework::LoDTensor>().data<float>();
for (int i = 0; i < 2 * 3; ++i) {
ASSERT_EQ(output_ptr[i], static_cast<float>(i) * 2);
}
Expand All @@ -153,12 +152,14 @@ TEST(Operator, CPUtoGPU) {
VLOG(3) << "after gpu_op run";

// auto* output2_ptr = output2->Get<LoDTensor>().data<float>();
DeviceContextPool& pool = DeviceContextPool::Instance();
paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto dev_ctx = pool.Get(cuda_place);

paddle::framework::Tensor output_tensor;
TensorCopy(output2->Get<LoDTensor>(), paddle::platform::CPUPlace(), *dev_ctx,
&output_tensor);
paddle::framework::TensorCopy(output2->Get<paddle::framework::LoDTensor>(),
paddle::platform::CPUPlace(), *dev_ctx,
&output_tensor);

dev_ctx->Wait();
float* output2_ptr = output_tensor.data<float>();
Expand Down
41 changes: 21 additions & 20 deletions paddle/fluid/framework/data_layout_transform_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,27 +18,28 @@
#include "paddle/fluid/platform/device_context.h"

TEST(DataTransform, DataLayoutFunction) {
using namespace paddle::framework;
using namespace paddle::platform;

auto place = CPUPlace();
Tensor in = Tensor();
Tensor out = Tensor();
in.mutable_data<double>(make_ddim({2, 3, 1, 2}), place);
in.set_layout(DataLayout::kNHWC);

auto kernel_nhwc = OpKernelType(proto::VarType::FP32, place,
DataLayout::kNHWC, LibraryType::kPlain);
auto kernel_ncwh = OpKernelType(proto::VarType::FP32, place,
DataLayout::kNCHW, LibraryType::kPlain);

TransDataLayout(kernel_nhwc, kernel_ncwh, in, &out);

EXPECT_TRUE(out.layout() == DataLayout::kNCHW);
EXPECT_TRUE(out.dims() == make_ddim({2, 2, 3, 1}));
auto place = paddle::platform::CPUPlace();
paddle::framework::Tensor in = paddle::framework::Tensor();
paddle::framework::Tensor out = paddle::framework::Tensor();
in.mutable_data<double>(paddle::framework::make_ddim({2, 3, 1, 2}), place);
in.set_layout(paddle::framework::DataLayout::kNHWC);

auto kernel_nhwc = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::FP32, place,
paddle::framework::DataLayout::kNHWC,
paddle::framework::LibraryType::kPlain);
auto kernel_ncwh = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::FP32, place,
paddle::framework::DataLayout::kNCHW,
paddle::framework::LibraryType::kPlain);

paddle::framework::TransDataLayout(kernel_nhwc, kernel_ncwh, in, &out);

EXPECT_TRUE(out.layout() == paddle::framework::DataLayout::kNCHW);
EXPECT_TRUE(out.dims() == paddle::framework::make_ddim({2, 2, 3, 1}));

TransDataLayout(kernel_ncwh, kernel_nhwc, in, &out);

EXPECT_TRUE(in.layout() == DataLayout::kNHWC);
EXPECT_TRUE(in.dims() == make_ddim({2, 3, 1, 2}));
EXPECT_TRUE(in.layout() == paddle::framework::DataLayout::kNHWC);
EXPECT_TRUE(in.dims() == paddle::framework::make_ddim({2, 3, 1, 2}));
}
126 changes: 76 additions & 50 deletions paddle/fluid/framework/data_type_transform_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,43 +17,58 @@ limitations under the License. */
#include "gtest/gtest.h"

TEST(DataTypeTransform, CPUTransform) {
using namespace paddle::framework;
using namespace paddle::platform;

auto place = CPUPlace();

auto kernel_fp16 = OpKernelType(proto::VarType::FP16, place,
DataLayout::kAnyLayout, LibraryType::kPlain);
auto kernel_fp32 = OpKernelType(proto::VarType::FP32, place,
DataLayout::kAnyLayout, LibraryType::kPlain);
auto kernel_fp64 = OpKernelType(proto::VarType::FP64, place,
DataLayout::kAnyLayout, LibraryType::kPlain);
auto kernel_int32 = OpKernelType(proto::VarType::INT32, place,
DataLayout::kAnyLayout, LibraryType::kPlain);
auto kernel_int64 = OpKernelType(proto::VarType::INT64, place,
DataLayout::kAnyLayout, LibraryType::kPlain);
auto kernel_bool = OpKernelType(proto::VarType::BOOL, place,
DataLayout::kAnyLayout, LibraryType::kPlain);
auto place = paddle::platform::CPUPlace();

auto kernel_fp16 = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::FP16, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);

auto kernel_fp32 = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::FP32, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);

auto kernel_fp64 = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::FP64, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);

auto kernel_int32 = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::INT32, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);

auto kernel_int64 = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::INT64, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);

auto kernel_bool = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::BOOL, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);

// data type transform from float32
{
Tensor in;
Tensor out;
paddle::framework::Tensor in;
paddle::framework::Tensor out;

float* ptr = in.mutable_data<float>(make_ddim({2, 3}), place);
float* ptr =
in.mutable_data<float>(paddle::framework::make_ddim({2, 3}), place);
int data_number = 2 * 3;

for (int i = 0; i < data_number; ++i) {
ptr[i] = i / 3;
}

TransDataType(kernel_fp32, kernel_fp64, in, &out);
paddle::framework::TransDataType(kernel_fp32, kernel_fp64, in, &out);
double* out_data_double = out.data<double>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_double[i], static_cast<double>(i / 3));
}

TransDataType(kernel_fp32, kernel_int32, in, &out);
paddle::framework::TransDataType(kernel_fp32, kernel_int32, in, &out);
int* out_data_int = out.data<int>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_int[i], static_cast<int>(i / 3));
Expand All @@ -62,105 +77,116 @@ TEST(DataTypeTransform, CPUTransform) {

// data type transform from/to float16
{
Tensor in;
Tensor out;
paddle::framework::Tensor in;
paddle::framework::Tensor out;

float16* ptr = in.mutable_data<float16>(make_ddim({2, 3}), place);
paddle::platform::float16* ptr = in.mutable_data<paddle::platform::float16>(
paddle::framework::make_ddim({2, 3}), place);
int data_number = 2 * 3;

for (int i = 0; i < data_number; ++i) {
ptr[i] = i;
}

// transform from float16 to other data types
TransDataType(kernel_fp16, kernel_fp32, in, &out);
paddle::framework::TransDataType(kernel_fp16, kernel_fp32, in, &out);
float* out_data_float = out.data<float>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_float[i], static_cast<float>(ptr[i]));
}

TransDataType(kernel_fp16, kernel_fp64, in, &out);
paddle::framework::TransDataType(kernel_fp16, kernel_fp64, in, &out);
double* out_data_double = out.data<double>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_double[i], static_cast<double>(ptr[i]));
}

TransDataType(kernel_fp16, kernel_int32, in, &out);
paddle::framework::TransDataType(kernel_fp16, kernel_int32, in, &out);
int* out_data_int = out.data<int>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_int[i], static_cast<int>(ptr[i]));
}

TransDataType(kernel_fp16, kernel_int64, in, &out);
paddle::framework::TransDataType(kernel_fp16, kernel_int64, in, &out);
int64_t* out_data_int64 = out.data<int64_t>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_int64[i], static_cast<int64_t>(ptr[i]));
}

TransDataType(kernel_fp16, kernel_bool, in, &out);
paddle::framework::TransDataType(kernel_fp16, kernel_bool, in, &out);
bool* out_data_bool = out.data<bool>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_bool[i], static_cast<bool>(ptr[i]));
}

// transform float to float16
float* in_data_float = in.mutable_data<float>(make_ddim({2, 3}), place);
float* in_data_float =
in.mutable_data<float>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_float[i] = i;
}

TransDataType(kernel_fp32, kernel_fp16, in, &out);
ptr = out.data<float16>();
paddle::framework::TransDataType(kernel_fp32, kernel_fp16, in, &out);
ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_float[i]).x);
EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_float[i]).x);
}

// transform double to float16
double* in_data_double = in.mutable_data<double>(make_ddim({2, 3}), place);
double* in_data_double =
in.mutable_data<double>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_double[i] = i;
}

TransDataType(kernel_fp64, kernel_fp16, in, &out);
ptr = out.data<float16>();
paddle::framework::TransDataType(kernel_fp64, kernel_fp16, in, &out);
ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_double[i]).x);
EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_double[i]).x);
}

// transform int to float16
int* in_data_int = in.mutable_data<int>(make_ddim({2, 3}), place);
int* in_data_int =
in.mutable_data<int>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_int[i] = i;
}

TransDataType(kernel_int32, kernel_fp16, in, &out);
ptr = out.data<float16>();
paddle::framework::TransDataType(kernel_int32, kernel_fp16, in, &out);
ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_int[i]).x);
EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_int[i]).x);
}

// transform int64 to float16
int64_t* in_data_int64 = in.mutable_data<int64_t>(make_ddim({2, 3}), place);
int64_t* in_data_int64 =
in.mutable_data<int64_t>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_int64[i] = i;
}

TransDataType(kernel_int64, kernel_fp16, in, &out);
ptr = out.data<float16>();
paddle::framework::TransDataType(kernel_int64, kernel_fp16, in, &out);
ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_int64[i]).x);
EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_int64[i]).x);
}

// transform bool to float16
bool* in_data_bool = in.mutable_data<bool>(make_ddim({2, 3}), place);
bool* in_data_bool =
in.mutable_data<bool>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_bool[i] = i;
}

TransDataType(kernel_bool, kernel_fp16, in, &out);
ptr = out.data<float16>();
paddle::framework::TransDataType(kernel_bool, kernel_fp16, in, &out);
ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_bool[i]).x);
EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_bool[i]).x);
}
}
}
Loading

0 comments on commit 6402b59

Please sign in to comment.