Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/rainyfly/Paddle into upd…
Browse files Browse the repository at this point in the history
…ate_record_interface_using_part2
  • Loading branch information
rainyfly committed Feb 20, 2022
2 parents d79be47 + c6950ab commit 752ca5b
Show file tree
Hide file tree
Showing 885 changed files with 4,358 additions and 4,339 deletions.
11 changes: 7 additions & 4 deletions cmake/pten.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,9 @@ function(kernel_library TARGET)
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/gpu/${TARGET}.cu)
list(APPEND gpu_srcs ${CMAKE_CURRENT_SOURCE_DIR}/gpu/${TARGET}.cu)
endif()
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/gpu/${TARGET}.cu.cc)
list(APPEND gpu_srcs ${CMAKE_CURRENT_SOURCE_DIR}/gpu/${TARGET}.cu.cc)
endif()
endif()
if (WITH_XPU)
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/xpu/${TARGET}.cc)
Expand Down Expand Up @@ -161,7 +164,7 @@ function(kernel_library TARGET)

# Build Target according different src organization
if((${cpu_srcs_len} GREATER 0 OR ${gpu_srcs_len} GREATER 0 OR
${xpu_srcs_len} GREATER 0) AND (${common_srcs_len} GREATER 0 OR
${xpu_srcs_len} GREATER 0) AND (${common_srcs_len} GREATER 0 OR
${selected_rows_srcs_len} GREATER 0))
# If the common_srcs/selected_rows_srcs depends on specific device srcs, build target using this rule.
if (WITH_GPU)
Expand Down Expand Up @@ -225,11 +228,11 @@ function(kernel_library TARGET)
cc_library(${TARGET} SRCS ${selected_rows_srcs} DEPS ${kernel_library_DEPS} ${kernel_deps})
endif()
else()
message(FATAL_ERROR "Cannot find any implementation for ${TARGET}")
message(FATAL_ERROR "Cannot find any implementation for ${TARGET}")
endif()

if (${common_srcs_len} GREATER 0 OR ${cpu_srcs_len} GREATER 0 OR
${gpu_srcs_len} GREATER 0 OR ${xpu_srcs_len} GREATER 0 OR
${gpu_srcs_len} GREATER 0 OR ${xpu_srcs_len} GREATER 0 OR
${selected_rows_srcs_len} GREATER 0)
# append target into PTEN_KERNELS property
get_property(pten_kernels GLOBAL PROPERTY PTEN_KERNELS)
Expand Down Expand Up @@ -285,7 +288,7 @@ function(append_op_util_declare TARGET)
string(REGEX MATCH "(PT_REGISTER_BASE_KERNEL_NAME|PT_REGISTER_ARG_MAPPING_FN)\\([ \t\r\n]*[a-z0-9_]*" util_registrar "${target_content}")
string(REPLACE "PT_REGISTER_ARG_MAPPING_FN" "PT_DECLARE_ARG_MAPPING_FN" util_declare "${util_registrar}")
string(REPLACE "PT_REGISTER_BASE_KERNEL_NAME" "PT_DECLARE_BASE_KERNEL_NAME" util_declare "${util_declare}")
string(APPEND util_declare ");")
string(APPEND util_declare ");\n")
file(APPEND ${op_utils_header} "${util_declare}")
endfunction()

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/common/sparse_sharding_merge.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@
#include "glog/logging.h"
#include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/framework/blocking_queue.h"
#include "paddle/fluid/framework/dim.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/string/split.h"
#include "paddle/pten/core/utils/dim.h"

constexpr int FG = 256 * 1024 * 1024;
constexpr int Q_SIZE = 10000;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/fleet_executor/dist_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ bool LoadDataFromDistModelTensor(const DistModelTensor &input_data,
framework::LoDTensor *input_tensor,
const platform::Place &place) {
VLOG(3) << "Loading data from DistModelTensor for " << input_data.name;
framework::DDim dims = framework::make_ddim(input_data.shape);
framework::DDim dims = pten::make_ddim(input_data.shape);
void *input_tensor_ptr;
if (input_data.dtype == DistModelDataType::INT64) {
input_tensor_ptr = input_tensor->mutable_data<int64_t>(dims, place);
Expand Down Expand Up @@ -518,7 +518,7 @@ bool DistModel::FetchResults(std::vector<DistModelTensor> *output_data,
template <typename T>
bool DistModel::FetchResult(const framework::LoDTensor &fetch,
DistModelTensor *output_data) {
auto shape = framework::vectorize(fetch.dims());
auto shape = pten::vectorize(fetch.dims());
output_data->shape.assign(shape.begin(), shape.end());
const T *data = fetch.data<T>();
int64_t num_elems = fetch.numel();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ limitations under the License. */
#include "paddle/fluid/framework/program_desc.h"

USE_OP_ITSELF(elementwise_add);
USE_OP(fill_constant);
USE_OP_ITSELF(fill_constant);

namespace paddle {
namespace distributed {

std::vector<framework::OperatorBase*> GetOps() {
framework::AttributeMap attrs;
attrs["dtype"] = framework::proto::VarType::FP32;
attrs["shape"] = framework::vectorize<int>({2, 3});
attrs["shape"] = pten::vectorize<int>({2, 3});
attrs["value"] = 1.0f;

auto zero_op = framework::OpRegistry::CreateOp("fill_constant", {},
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/service/brpc_ps_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1227,7 +1227,7 @@ int32_t BrpcPsClient::recv_and_save_table(const uint64_t table_id,
framework::LoDTensor *var_tensor = var->GetMutable<framework::LoDTensor>();

std::vector<int64_t> vec_dim = {var_num, var_shape};
var_tensor->Resize(framework::make_ddim(vec_dim));
var_tensor->Resize(pten::make_ddim(vec_dim));

// copy and save
float *tensor_data = var_tensor->mutable_data<float>(place);
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/distributed/ps/service/brpc_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ void SerializeLodTensor(framework::Variable* var,
}
var_msg->set_data_type(static_cast<VarMsg::Type>(
framework::TransToProtoVarType(tensor->dtype())));
for (auto& dim : framework::vectorize(tensor->dims())) {
for (auto& dim : pten::vectorize(tensor->dims())) {
var_msg->add_dims(dim);
}
// IO Buffer
Expand Down Expand Up @@ -148,7 +148,7 @@ void SerializeSelectedRows(framework::Variable* var,
memcpy(data_ptr, &((*rows)[0]), rows->size() * sizeof(int64_t));
var_msg->set_data_type(static_cast<VarMsg::Type>(
framework::TransToProtoVarType(tensor->dtype())));
for (auto& dim : framework::vectorize(tensor->dims())) {
for (auto& dim : pten::vectorize(tensor->dims())) {
var_msg->add_dims(dim);
}
// IO Buffer
Expand Down Expand Up @@ -224,7 +224,7 @@ void DeserializeLodTensor(framework::Variable* var, const VarMsg& msg,
for (auto& x : msg.dims()) {
vec_dim.push_back(x);
}
tensor->Resize(framework::make_ddim(vec_dim));
tensor->Resize(pten::make_ddim(vec_dim));

framework::LoD lod;
for (int i = 0; i < msg.lod_level(); ++i) {
Expand Down Expand Up @@ -278,7 +278,7 @@ void DeserializeSelectedRows(
for (auto& x : msg.dims()) {
vec_dim.push_back(x);
}
tensor->Resize(framework::make_ddim(vec_dim));
tensor->Resize(pten::make_ddim(vec_dim));
void* tensor_data = tensor->mutable_data(
place,
framework::TransToPtenDataType(VarMessageToVarType(msg.data_type())));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,7 @@ bool AsyncCommunicator::Check(const std::vector<std::string> &var_tables) {
VLOG(3) << "send step_counter into queue";
auto tmp_var = std::make_shared<Variable>();
auto *tensor = tmp_var->GetMutable<framework::LoDTensor>();
tensor->Resize(framework::make_ddim({1}));
tensor->Resize(pten::make_ddim({1}));
auto *out_d = tensor->mutable_data<int64_t>(platform::CPUPlace());
out_d[0] = 1;
send_varname_to_queue_[table_name]->Push(tmp_var);
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/distributed/test/brpc_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ void CreateVarsOnScope(framework::Scope* scope, platform::Place* place,
// var 1
framework::Variable* var1 = scope->Var("x1");
auto* tensor1 = var1->GetMutable<framework::LoDTensor>();
tensor1->Resize(framework::make_ddim({512, 8, 4, 2}));
tensor1->Resize(pten::make_ddim({512, 8, 4, 2}));
framework::LoD lod1;
lod1.push_back(framework::Vector<size_t>({1, 3, 8}));
tensor1->set_lod(lod1);
Expand All @@ -46,7 +46,7 @@ void CreateVarsOnScope(framework::Scope* scope, platform::Place* place,
// var 2
framework::Variable* var2 = scope->Var("x2");
auto* tensor2 = var2->GetMutable<framework::LoDTensor>();
tensor2->Resize(framework::make_ddim({1000, 64}));
tensor2->Resize(pten::make_ddim({1000, 64}));
framework::LoD lod2;
lod2.push_back(framework::Vector<size_t>({1, 1}));
tensor2->set_lod(lod2);
Expand All @@ -59,7 +59,7 @@ void CreateVarsOnScope(framework::Scope* scope, platform::Place* place,
slr->set_height(564);
auto* tensor3 = slr->mutable_value();
auto* rows = slr->mutable_rows();
tensor3->Resize(framework::make_ddim({564, 128}));
tensor3->Resize(pten::make_ddim({564, 128}));
tensor3->mutable_data<float>(*place);
pten::funcs::set_constant(ctx, tensor3, 32.7);
for (int i = 0; i < 564; ++i) rows->push_back(i);
Expand Down Expand Up @@ -92,7 +92,7 @@ void RunMultiVarMsg(platform::Place place) {
// check var1
framework::Variable* var1 = scope_recv.FindVar("x1");
auto* tensor1 = var1->GetMutable<framework::LoDTensor>();
EXPECT_EQ(tensor1->dims(), framework::make_ddim({512, 8, 4, 2}));
EXPECT_EQ(tensor1->dims(), pten::make_ddim({512, 8, 4, 2}));
// EXPECT_EQ(tensor1->lod(), framework::Vector<size_t>({1, 3, 8}));
auto* tensor_data1 = const_cast<float*>(tensor1->data<float>());
int tensor_numel1 = 512 * 8 * 4 * 2;
Expand All @@ -102,7 +102,7 @@ void RunMultiVarMsg(platform::Place place) {
// check var2
framework::Variable* var2 = scope_recv.FindVar("x2");
auto* tensor2 = var2->GetMutable<framework::LoDTensor>();
EXPECT_EQ(tensor2->dims(), framework::make_ddim({1000, 64}));
EXPECT_EQ(tensor2->dims(), pten::make_ddim({1000, 64}));
// EXPECT_EQ(tensor2->lod(), framework::Vector<size_t>({1, 1}));
auto* tensor_data2 = const_cast<int*>(tensor2->data<int>());
int tensor_numel2 = 1000 * 64;
Expand All @@ -117,7 +117,7 @@ void RunMultiVarMsg(platform::Place place) {
}

auto* tensor3 = slr->mutable_value();
EXPECT_EQ(tensor3->dims(), framework::make_ddim({564, 128}));
EXPECT_EQ(tensor3->dims(), pten::make_ddim({564, 128}));
auto* tensor_data3 = const_cast<float*>(tensor3->data<float>());
int tensor_numel3 = 564 * 128;
for (int i = 0; i < tensor_numel3; ++i)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ void ScaleAPI(const paddle::experimental::Tensor& x, float scale, float bias,
auto tensor_meta = pten::DenseTensorMeta(
dense_tensor->dtype(), dense_tensor->dims(), dense_tensor->layout());
auto place = dense_tensor->place();
size_t bytes_size = paddle::framework::product(dense_tensor->dims()) *
SizeOf(dense_tensor->dtype());
size_t bytes_size =
pten::product(dense_tensor->dims()) * SizeOf(dense_tensor->dtype());
auto dense_out = std::make_shared<pten::DenseTensor>(
pten::make_intrusive<paddle::experimental::SharedStorage>(
paddle::memory::Alloc(place, bytes_size)),
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/eager/api/utils/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ paddle::experimental::Tensor CreateTensorWithValue(
const pten::DataType& dtype, const pten::DataLayout& layout, float value,
bool is_leaf) {
paddle::experimental::Tensor out = paddle::experimental::full(
paddle::framework::vectorize(ddim), paddle::experimental::Scalar(value),
dtype, pten::TransToPtenBackend(place));
pten::vectorize(ddim), paddle::experimental::Scalar(value), dtype,
pten::TransToPtenBackend(place));

auto meta = EagerUtils::autograd_meta(&out);
if (is_leaf) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def GeneratePythonCFunction(fwd_api_name, forward_inputs_position_map,
get_eager_tensor_str += f" auto& {name} = GetTensorFromArgs(\"{fwd_api_name}\", \"{name}\", args, {pos}, false);\n"
dygraph_function_call_list[pos] = f"{name}"

parse_attributes_str = " paddle::framework::AttributeMap attrs;\n"
parse_attributes_str = ""
# Get Attributes
for name, atype, _, pos in forward_attrs_list:
parsing_function = FindParsingFunctionFromAttributeType(atype)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ using namespace egr; // NOLINT

TEST(AccumulationNode, Tensor) {
// Construct Eager Tensor
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT16, paddle::framework::make_ddim({1, 1}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT16, pten::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt0 = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ TEST(AutogradMeta, MemberFunction) {
VLOG(6) << "Test Grad";
CHECK(tmp_auto->Grad().defined() == false);
auto* grad_t = tmp_auto->MutableGrad();
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ TEST(Tensor, Constructor) {
CHECK_EQ(et1.defined(), false);
CHECK_EQ(et2.name(), "et2");

pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down Expand Up @@ -63,8 +63,8 @@ TEST(Tensor, Constructor) {

TEST(Tensor, MemberFunction) {
paddle::experimental::Tensor et3;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand All @@ -84,7 +84,7 @@ TEST(Tensor, MemberFunction) {
CHECK_EQ(et3.is_cpu(), true);
CHECK_EQ(et3.is_cuda(), false);
CHECK_EQ(et3.numel(), 2);
auto expected_dim = paddle::framework::make_ddim({1, 2});
auto expected_dim = pten::make_ddim({1, 2});
CHECK_EQ(et3.dims(), expected_dim);
CHECK_EQ(et3.type(), paddle::experimental::DataType::FLOAT32);
CHECK_EQ(et3.layout(), paddle::experimental::DataLayout::NCHW);
Expand Down Expand Up @@ -117,8 +117,8 @@ TEST(Tensor, MemberFunction) {

TEST(EagerVariable, Constructor) {
paddle::experimental::Tensor t3;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ TEST(GradNodeInfo, GradNodeBase) {
/* val */ 5.0, /* in_num */ 2, /* out_num */ 2);
auto grad_test_node1 = std::make_shared<eager_test::GradTestNode>();
std::vector<std::vector<paddle::experimental::Tensor>> grads;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down Expand Up @@ -96,8 +96,8 @@ TEST(GradNodeInfo, GradNodeBase) {
auto gradient_hook = [](
const paddle::experimental::Tensor& et) -> paddle::experimental::Tensor {
paddle::experimental::Tensor res;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ class GradTestNode : public egr::GradNodeBase {
override {
val_ = std::dynamic_pointer_cast<pten::DenseTensor>(grads[0][0].impl())
->data<float>()[0];
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ TEST(GradTensorHolder, Constructor) {
GradTensorHolder grad_tensor_holder2 = GradTensorHolder(grad_tensor_holder);

// Construct Eager Tensor
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({2, 2}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({2, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand All @@ -51,8 +51,8 @@ TEST(GradTensorHolder, Constructor) {

TEST(GradTensorHolder, Interfaces) {
// Construct Eager Tensor
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 1}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 1}));
std::shared_ptr<pten::DenseTensor> dt0 = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down Expand Up @@ -115,8 +115,7 @@ TEST(GradTensorHolder, SelectedRowsMergeAdd) {
auto sr2 = std::make_shared<pten::SelectedRows>(rows, table_size);

// initialize a sparse table 1
sr1->mutable_value()->Resize(
pten::framework::make_ddim({table_size, embedding_width}));
sr1->mutable_value()->Resize(pten::make_ddim({table_size, embedding_width}));
auto* data_sr1 = sr1->mutable_value()->mutable_data<float>(cpu);
for (int64_t i = 0; i < table_size; ++i) {
for (int64_t j = 0; j < embedding_width; ++j) {
Expand All @@ -125,8 +124,7 @@ TEST(GradTensorHolder, SelectedRowsMergeAdd) {
}

// initialize a sparse table 2
sr2->mutable_value()->Resize(
pten::framework::make_ddim({table_size, embedding_width}));
sr2->mutable_value()->Resize(pten::make_ddim({table_size, embedding_width}));
auto* data_sr2 = sr2->mutable_value()->mutable_data<float>(cpu);
for (int64_t i = 0; i < table_size; ++i) {
for (int64_t j = 0; j < embedding_width; ++j) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
TEST(TensorWrapper, Basic) {
VLOG(6) << "Test Full reserved";
paddle::experimental::Tensor et1;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
pten::DenseTensorMeta meta =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand All @@ -49,8 +49,8 @@ TEST(TensorWrapper, Basic) {
egr::EagerUtils::OutRankInfo(et1).second);
VLOG(6) << "Test reconstruct";
paddle::experimental::Tensor et2;
pten::DenseTensorMeta meta2 = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
pten::DenseTensorMeta meta2 =
pten::DenseTensorMeta(pten::DataType::FLOAT32, pten::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt2 = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
Expand Down
Loading

0 comments on commit 752ca5b

Please sign in to comment.