Skip to content

Commit

Permalink
Refactored eager legacy namespace (PaddlePaddle#37659)
Browse files Browse the repository at this point in the history
  • Loading branch information
jim19930609 authored and Zjq9409 committed Dec 10, 2021
1 parent 00428ec commit 9ac9a4e
Show file tree
Hide file tree
Showing 14 changed files with 61 additions and 27 deletions.
8 changes: 4 additions & 4 deletions paddle/fluid/eager/auto_code_generator/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -779,7 +779,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
,ConstructDuplicableOutput(Out1Num)} };
// According to op_proto->attrs()
egr::RunOp("op_type", ins, outs, attr_map,
egr::legacy::RunOp("op_type", ins, outs, attr_map,
Controller.Instance().GetExpectedPlace(), {});
// According to fwd_outputs_names
Expand Down Expand Up @@ -894,7 +894,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_TRACE_OP_TEMPLATE =
" paddle::framework::AttributeMap attrs = attr_map;\n"
" paddle::framework::AttributeMap default_attrs;\n"
" egr::RunOp(\"%s\", ins, outs, attrs, \n"
" egr::legacy::RunOp(\"%s\", ins, outs, attrs, \n"
" egr::Controller::Instance().GetExpectedPlace(),\n"
" &default_attrs, true, {});\n";
std::string trace_op_str =
Expand Down Expand Up @@ -1052,7 +1052,7 @@ static std::string GenerateGradNodeCCContents(
// Visit each OpBase
for(auto iter = "grad_node->begin()"; iter < "grad_node->end()"; iter++) {
// Simply pass entire attribute map to kernels
egr::RunOp("iter->Type()", ins, outs, this->attr_map_,
egr::legacy::RunOp("iter->Type()", ins, outs, this->attr_map_,
egr::Controller::Instance().ExpectedPlace(), false, {});
}
Expand Down Expand Up @@ -1180,7 +1180,7 @@ static std::string GenerateGradNodeCCContents(
" // Pass the entire attribute map to TraceOp\n"
" // The underlying kernel will pickup whatever attribute they need "
"at runtime\n"
" egr::RunOp(\"%s\", ins, outs, this->attr_map_,\n"
" egr::legacy::RunOp(\"%s\", ins, outs, this->attr_map_,\n"
" egr::Controller::Instance().GetExpectedPlace(),\n"
" &this->default_attr_map_, false, {});\n";
trace_opbase_str = paddle::string::Sprintf(TRACE_OP_TEMPLATE, op_base_type);
Expand Down
22 changes: 13 additions & 9 deletions paddle/fluid/eager/legacy/amp_auto_cast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "paddle/fluid/framework/operator.h"

namespace egr {
namespace legacy {

AmpOperators::AmpOperators()
: allow_ops_(new std::unordered_set<std::string>()),
Expand Down Expand Up @@ -85,12 +86,12 @@ std::ostream& operator<<(std::ostream& os, AmpOperators& ops) {
inline std::string GetDtypeStr(
const std::shared_ptr<egr::EagerTensor>& tensor) {
return paddle::framework::DataTypeToString(
egr::GetDtypeFromVar(tensor->Var()));
egr::legacy::GetDtypeFromVar(tensor->Var()));
}

inline bool NeedCast(const std::shared_ptr<egr::EagerTensor>& tensor) {
auto place = egr::GetPlaceFromVar(tensor->Var());
auto data_type = egr::GetDtypeFromVar(tensor->Var());
auto place = egr::legacy::GetPlaceFromVar(tensor->Var());
auto data_type = egr::legacy::GetDtypeFromVar(tensor->Var());
if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place)) {
Expand All @@ -109,7 +110,7 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
const std::shared_ptr<egr::EagerTensor>& tensor,
const paddle::framework::proto::VarType::Type dst_type) {
NameTensorMap ins = {{"X", {tensor}}};
auto in_data_type = egr::GetDtypeFromVar(tensor->Var());
auto in_data_type = egr::legacy::GetDtypeFromVar(tensor->Var());
paddle::framework::AttributeMap attrs = {{"in_dtype", in_data_type},
{"out_dtype", dst_type}};
auto out = std::shared_ptr<egr::EagerTensor>(new egr::EagerTensor());
Expand All @@ -127,7 +128,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
static inline std::shared_ptr<egr::EagerTensor> CastToFP16(
const std::shared_ptr<egr::EagerTensor>& tensor) {
auto dst_type = paddle::framework::proto::VarType::FP16;
if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) {
if (NeedCast(tensor) &&
(egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) {
return CastToType(tensor, dst_type);
}
return tensor;
Expand All @@ -136,7 +138,8 @@ static inline std::shared_ptr<egr::EagerTensor> CastToFP16(
static inline std::shared_ptr<egr::EagerTensor> CastToFP32(
const std::shared_ptr<egr::EagerTensor>& tensor) {
auto dst_type = paddle::framework::proto::VarType::FP32;
if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) {
if (NeedCast(tensor) &&
(egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) {
return CastToType(tensor, dst_type);
}
return tensor;
Expand All @@ -147,9 +150,9 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
auto dst_type = paddle::framework::proto::VarType::FP16;
for (const auto& pair : ins) {
for (const auto& tensor : pair.second) {
if (egr::GetDtypeFromVar(tensor->Var()) ==
if (egr::legacy::GetDtypeFromVar(tensor->Var()) ==
paddle::framework::proto::VarType::FP32) {
dst_type = egr::GetDtypeFromVar(tensor->Var());
dst_type = egr::legacy::GetDtypeFromVar(tensor->Var());
break;
}
}
Expand All @@ -160,7 +163,7 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType(
if (op_type == "moving_average_abs_max_scale") {
for (const auto& pair : ins) {
if (pair.first == "X" &&
egr::GetDtypeFromVar(pair.second.front()->Var()) ==
egr::legacy::GetDtypeFromVar(pair.second.front()->Var()) ==
paddle::framework::proto::VarType::FP16) {
dst_type = paddle::framework::proto::VarType::FP16;
}
Expand Down Expand Up @@ -255,4 +258,5 @@ NameTensorMap CastPureFp16Inputs(const std::string& op_type,
return new_ins;
}

} // namespace legacy
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/legacy/amp_auto_cast.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "paddle/fluid/eager/legacy/type_def.h"

namespace egr {
namespace legacy {

// NOTE(zhiqiu): only O1 and O2 are valid now
enum class AmpLevel {
Expand Down Expand Up @@ -92,4 +93,5 @@ NameTensorMap AutoCastInputs(const std::string& op_type,
NameTensorMap CastPureFp16Inputs(const std::string& op_type,
const NameTensorMap& ins);

} // namespace legacy
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/legacy/execution_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/variable.h"
namespace egr {
namespace legacy {

class EagerExecutionContext : public paddle::framework::ExecutionContext {
using Variable = paddle::framework::Variable;
Expand Down Expand Up @@ -209,4 +210,5 @@ class EagerExecutionContext : public paddle::framework::ExecutionContext {
const paddle::framework::AttributeMap& default_attrs_;
};

} // namespace legacy
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/legacy/infer_shape_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/var_type.h"
namespace egr {
namespace legacy {

class EagerInferShapeContext : public paddle::framework::InferShapeContext {
using DDim = paddle::framework::DDim;
Expand Down Expand Up @@ -401,4 +402,5 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext {
const std::string op_type_;
};

} // namespace legacy
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/legacy/infer_var_type_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include "paddle/pten/include/core.h"

namespace egr {
namespace legacy {

// infer var type context for imperative mode
class TensorRuntimeInferVarTypeContext
Expand Down Expand Up @@ -255,4 +256,5 @@ class TensorRuntimeInferVarTypeContext
const paddle::framework::AttributeMap& default_attrs_;
};

} // namespace legacy
} // namespace egr
13 changes: 8 additions & 5 deletions paddle/fluid/eager/legacy/op_runner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ DECLARE_string(tracer_mkldnn_ops_on);
DECLARE_string(tracer_mkldnn_ops_off);

namespace egr {
namespace legacy {

void OpRunImpl(const paddle::framework::OperatorBase& op,
const NameTensorMap& ins, const NameTensorMap& outs,
Expand All @@ -43,8 +44,8 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
"Only support operator with kernel in Dygraph mode."));
auto& info = op.Info();
if (info.infer_var_type_) {
egr::TensorRuntimeInferVarTypeContext infer_var_type_ctx(ins, outs, attrs,
default_attrs);
egr::legacy::TensorRuntimeInferVarTypeContext infer_var_type_ctx(
ins, outs, attrs, default_attrs);
info.infer_var_type_(&infer_var_type_ctx);
}

Expand Down Expand Up @@ -76,10 +77,10 @@ void OpRunImpl(const paddle::framework::OperatorBase& op,
* after the execution of op, but the original input is directly
* overwritten in the previous dynamic graph implemention.
*/
auto prepared_op = egr::PreparedOp::Prepare(ins, outs, *op_kernel, place,
attrs, default_attrs);
auto prepared_op = egr::legacy::PreparedOp::Prepare(
ins, outs, *op_kernel, place, attrs, default_attrs);
auto tmp_ins_ptr =
egr::PrepareData(*op_kernel, ins, prepared_op.kernel_type());
egr::legacy::PrepareData(*op_kernel, ins, prepared_op.kernel_type());
if (tmp_ins_ptr == nullptr) {
prepared_op.Run(ins, outs, attrs, default_attrs);
} else {
Expand Down Expand Up @@ -188,4 +189,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
// program_desc_tracer_->InsertOp(type, new_ins, outs, attrs);
// }
}

} // namespace legacy
} // namespace egr
6 changes: 4 additions & 2 deletions paddle/fluid/eager/legacy/op_runner.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,18 @@

#pragma once
#include "paddle/fluid/eager/legacy/type_def.h"
// TODO(Jiabin): We should not depends on this header remove it later
#include "paddle/fluid/imperative/jit/program_desc_tracer.h"
#include "paddle/pten/core/tensor_meta.h"

namespace egr {
namespace legacy {

void RunOp(const std::string& type, const NameTensorMap& ins,
const NameTensorMap& outs, paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map,
const std::map<std::string, std::string>& inplace_map = {});
}

} // namespace legacy
} // namespace egr
9 changes: 6 additions & 3 deletions paddle/fluid/eager/legacy/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ DECLARE_bool(check_nan_inf);
DECLARE_bool(run_pten_kernel);

namespace egr {
namespace legacy {

const paddle::framework::Tensor* GetTensorFromVar(
const paddle::framework::Variable& var) {
Expand Down Expand Up @@ -96,9 +97,9 @@ PreparedOp PrepareImpl(const NameTensorMap& ins, const NameTensorMap& outs,
#endif

// 1. get expected kernel key
auto dygraph_exe_ctx =
egr::EagerExecutionContext(op, paddle::framework::Scope(), *dev_ctx, ctx,
ins, outs, attrs, default_attrs);
auto dygraph_exe_ctx = egr::legacy::EagerExecutionContext(
op, paddle::framework::Scope(), *dev_ctx, ctx, ins, outs, attrs,
default_attrs);
auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

Expand Down Expand Up @@ -251,4 +252,6 @@ std::shared_ptr<NameTensorMap> PrepareData(
}
return tmp_ins_ptr;
}

} // namespace legacy
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/legacy/prepared_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ class DeviceContext;
} // namespace paddle

namespace egr {
namespace legacy {

const paddle::framework::Tensor* GetTensorFromVar(
const paddle::framework::Variable& var);
Expand Down Expand Up @@ -79,4 +80,5 @@ class PreparedOp {
paddle::platform::DeviceContext* dev_ctx_;
};

} // namespace legacy
} // namespace egr
3 changes: 3 additions & 0 deletions paddle/fluid/eager/legacy/tensor_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "paddle/fluid/platform/place.h"

namespace egr {
namespace legacy {

void InitializeVariable(paddle::framework::Variable *var,
paddle::framework::proto::VarType::Type var_type) {
Expand Down Expand Up @@ -108,4 +109,6 @@ const paddle::platform::Place &GetPlaceFromVar(
paddle::framework::ToTypeName(var.Type())));
}
}

} // namespace legacy
} // namespace egr
6 changes: 5 additions & 1 deletion paddle/fluid/eager/legacy/tensor_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include "paddle/pten/api/all.h"
#include "paddle/pten/include/core.h"
namespace egr {
namespace legacy {

void InitializeVariable(paddle::framework::Variable* var,
paddle::framework::proto::VarType::Type var_type);
paddle::framework::proto::VarType::Type GetDtypeFromVar(
Expand All @@ -27,4 +29,6 @@ const paddle::platform::Place& GetPlaceFromVar(
const paddle::framework::Variable& var);
void CopyVariable(const paddle::framework::Variable& src_var,
paddle::framework::Variable* dst_var);
}

} // namespace legacy
} // namespace egr
5 changes: 5 additions & 0 deletions paddle/fluid/eager/legacy/type_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@
namespace egr {

class EagerTensor;

namespace legacy {

namespace details {
template <typename T>
struct NameVarMapTrait {};
Expand All @@ -36,4 +39,6 @@ template <typename T>
using NameMap = typename details::NameVarMapTrait<T>::Type;

using NameTensorMap = NameMap<EagerTensor>;

} // namespace legacy
} // namespace egr
6 changes: 3 additions & 3 deletions paddle/fluid/framework/details/nan_inf_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,9 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type,
}

template <typename TensorType>
static void CheckOpHasNanOrInfInEager(const std::string& op_type,
const egr::NameMap<TensorType>& op_outs,
platform::Place place) {
static void CheckOpHasNanOrInfInEager(
const std::string& op_type, const egr::legacy::NameMap<TensorType>& op_outs,
platform::Place place) {
for (const auto& pair : op_outs) {
for (const auto& tensor : pair.second) {
auto* var = tensor->MutableVar();
Expand Down

0 comments on commit 9ac9a4e

Please sign in to comment.