diff --git a/cmake/cinn.cmake b/cmake/cinn.cmake index a8ebe6a9a46ae..44d502fc4b792 100644 --- a/cmake/cinn.cmake +++ b/cmake/cinn.cmake @@ -164,8 +164,8 @@ cinn_cc_library( add_dependencies(cinnapi GEN_LLVM_RUNTIME_IR_HEADER ZLIB::ZLIB) add_dependencies(cinnapi GEN_LLVM_RUNTIME_IR_HEADER ${core_deps}) if(NOT CINN_ONLY) - target_link_libraries(cinnapi pd_dialect phi) - add_dependencies(cinnapi pd_dialect phi) + target_link_libraries(cinnapi pd_op_dialect phi) + add_dependencies(cinnapi pd_op_dialect phi) endif() target_link_libraries(cinnapi ${PYTHON_LIBRARIES}) @@ -222,8 +222,8 @@ function(gen_cinncore LINKTYPE) add_dependencies(${CINNCORE_TARGET} GEN_LLVM_RUNTIME_IR_HEADER ZLIB::ZLIB) add_dependencies(${CINNCORE_TARGET} GEN_LLVM_RUNTIME_IR_HEADER ${core_deps}) if(NOT CINN_ONLY) - target_link_libraries(${CINNCORE_TARGET} pd_dialect phi) - add_dependencies(${CINNCORE_TARGET} pd_dialect phi) + target_link_libraries(${CINNCORE_TARGET} pd_op_dialect phi) + add_dependencies(${CINNCORE_TARGET} pd_op_dialect phi) endif() add_dependencies(${CINNCORE_TARGET} pybind) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 13fce9613650f..f73b20d389ef4 100755 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -269,10 +269,10 @@ else() DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) endif() if(WITH_SHARED_IR) - set(paddle_ir_lib ${PADDLE_BINARY_DIR}/paddle/ir/libir.*) + set(paddle_pir_lib ${PADDLE_BINARY_DIR}/paddle/pir/libpir.*) copy( inference_lib_dist - SRCS ${paddle_ir_lib} + SRCS ${paddle_pir_lib} DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) endif() endif() diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 92e302eb15acc..b5f2ffa394a89 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -3,7 +3,7 @@ set(PYTHON_TESTS_DIR CACHE INTERNAL "python tests directory") add_subdirectory(utils) -add_subdirectory(ir) +add_subdirectory(pir) add_subdirectory(scripts) add_subdirectory(testing) add_subdirectory(phi) diff --git a/paddle/cinn/hlir/dialect/CMakeLists.txt b/paddle/cinn/hlir/dialect/CMakeLists.txt index 5d30ab6d34504..3787fdf2b4b08 100755 --- a/paddle/cinn/hlir/dialect/CMakeLists.txt +++ b/paddle/cinn/hlir/dialect/CMakeLists.txt @@ -1,2 +1,2 @@ -add_subdirectory(cinn_dialect) -add_subdirectory(runtime_dialect) +add_subdirectory(operator) +add_subdirectory(runtime) diff --git a/paddle/cinn/hlir/dialect/generated/cinn_ops.parsed.yaml b/paddle/cinn/hlir/dialect/generated/cinn_ops.parsed.yaml deleted file mode 100644 index b345bb699084e..0000000000000 --- a/paddle/cinn/hlir/dialect/generated/cinn_ops.parsed.yaml +++ /dev/null @@ -1,31 +0,0 @@ -- name: add - inputs: - - typename: Tensor - name: x - optional: false - no_need_buffer: false - data_transform: {} - - typename: Tensor - name: y - optional: false - no_need_buffer: false - data_transform: {} - attrs: [] - outputs: - - {typename: Tensor, name: out, optional: false, intermediate: false} - no_need_buffer: null - data_transform: null - infer_meta: - func: ElementwiseInferMeta - param: [x, y] - kernel: - func: [add] - param: [x, y] - backend: null - layout: null - data_type: null - dispatch: {add: null} - force_backend: null - inplace: {out: x} - view: null - backward: null diff --git a/paddle/cinn/hlir/dialect/cinn_dialect/CMakeLists.txt b/paddle/cinn/hlir/dialect/operator/CMakeLists.txt similarity index 100% rename from paddle/cinn/hlir/dialect/cinn_dialect/CMakeLists.txt rename to paddle/cinn/hlir/dialect/operator/CMakeLists.txt diff --git a/paddle/cinn/hlir/dialect/cinn_dialect/ir/CMakeLists.txt b/paddle/cinn/hlir/dialect/operator/ir/CMakeLists.txt similarity index 71% rename from paddle/cinn/hlir/dialect/cinn_dialect/ir/CMakeLists.txt rename to paddle/cinn/hlir/dialect/operator/ir/CMakeLists.txt index 5fa53f74cc4a9..896a727f7e59f 100644 --- a/paddle/cinn/hlir/dialect/cinn_dialect/ir/CMakeLists.txt +++ b/paddle/cinn/hlir/dialect/operator/ir/CMakeLists.txt @@ -1,31 +1,30 @@ -# TODO(Aurelius84): new_ir_compiler depends on pd_dialect and could +# TODO(Aurelius84): new_ir_compiler depends on pd_op_dialect and could # not found under CINN_ONLY mode if(NOT CINN_ONLY) set(CINN_DIALECT_BINARY_DIR - "${PADDLE_BINARY_DIR}/paddle/cinn/hlir/dialect/cinn_dialect/ir") + "${PADDLE_BINARY_DIR}/paddle/cinn/hlir/dialect/operator/ir") - # Generate cinn_dialect files defining op using op_gen_file + # Generate cinn_op_dialect files defining op using op_gen_file set(cinn_op_gen_parsed_yaml_file ${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parse_op.py) set(cinn_op_gen_file - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/op_gen.py) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/op_generator/op_gen.py) set(cinn_op_compat_yaml_file ${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml) set(cinn_op_yaml_file - ${PADDLE_SOURCE_DIR}/paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_ops.yaml - ) + ${PADDLE_SOURCE_DIR}/paddle/cinn/hlir/dialect/operator/ir/ops.yaml) set(parsed_op_dir ${PADDLE_SOURCE_DIR}/paddle/cinn/hlir/dialect/generated) - set(cinn_op_parsed_yaml_file ${parsed_op_dir}/cinn_ops.parsed.yaml) + set(cinn_op_parsed_yaml_file ${parsed_op_dir}/ops.parsed.yaml) set(cinn_op_parsed_yaml_files ${cinn_op_parsed_yaml_file}) set(cinn_op_namespace cinn,dialect) - set(cinn_dialect_name cinn) + set(cinn_op_dialect_name cinn_op) set(cinn_op_header_file ${CINN_DIALECT_BINARY_DIR}/cinn_op.h) set(cinn_op_source_file ${CINN_DIALECT_BINARY_DIR}/cinn_op.cc) set(cinn_op_header_file_tmp ${cinn_op_header_file}.tmp) @@ -44,7 +43,7 @@ if(NOT CINN_ONLY) ${PYTHON_EXECUTABLE} ${cinn_op_gen_file} --op_yaml_files ${cinn_op_parsed_yaml_files} --op_compat_yaml_file ${cinn_op_compat_yaml_file} --namespaces ${cinn_op_namespace} - --dialect_name ${cinn_dialect_name} --op_def_h_file + --dialect_name ${cinn_op_dialect_name} --op_def_h_file ${cinn_op_header_file_tmp} --op_def_cc_file ${cinn_op_source_file_tmp} COMMAND ${CMAKE_COMMAND} -E copy_if_different ${cinn_op_header_file_tmp} ${cinn_op_header_file} @@ -54,8 +53,8 @@ if(NOT CINN_ONLY) ${cinn_op_compat_yaml_file} VERBATIM) - cinn_cc_library(cinn_dialect SRCS cinn_dialect.cc ${cinn_op_source_file} DEPS - pd_dialect) + cinn_cc_library(cinn_op_dialect SRCS op_dialect.cc ${cinn_op_source_file} + DEPS pd_op_dialect) - target_include_directories(cinn_dialect PRIVATE ${CINN_DIALECT_BINARY_DIR}) + target_include_directories(cinn_op_dialect PRIVATE ${CINN_DIALECT_BINARY_DIR}) endif() diff --git a/paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_dialect.cc b/paddle/cinn/hlir/dialect/operator/ir/op_dialect.cc similarity index 68% rename from paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_dialect.cc rename to paddle/cinn/hlir/dialect/operator/ir/op_dialect.cc index 9e8ccfb6492e4..d8a3bc7b8b35a 100644 --- a/paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_dialect.cc +++ b/paddle/cinn/hlir/dialect/operator/ir/op_dialect.cc @@ -12,31 +12,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_dialect.h" +#include "paddle/cinn/hlir/dialect/operator/ir/op_dialect.h" // NOTE(chenxi67): File cinn_op.h is generated by op_gen.py, see details in // paddle/cinn/hlir/dialect/CMakeLists.txt. -#include "paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_op.h" +#include "paddle/cinn/hlir/dialect/operator/ir/cinn_op.h" namespace cinn { namespace dialect { -CinnDialect::CinnDialect(::ir::IrContext* context) - : ::ir::Dialect( - name(), context, ::ir::TypeId::get()) { +OperatorDialect::OperatorDialect(::pir::IrContext* context) + : ::pir::Dialect(name(), + context, + ::pir::TypeId::get()) { this->initialize(); } -void CinnDialect::initialize() { +void OperatorDialect::initialize() { // NOTE(chenxi67): GET_OP_LIST is defined in cinn_op.h which is // generated by op_gen.py, see details in // paddle/cinn/hlir/dialect/CMakeLists.txt. RegisterOps< #define GET_OP_LIST -#include "paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_op.h" // NOLINT +#include "paddle/cinn/hlir/dialect/operator/ir/cinn_op.h" // NOLINT >(); } } // namespace dialect } // namespace cinn -IR_DEFINE_EXPLICIT_TYPE_ID(cinn::dialect::CinnDialect) +IR_DEFINE_EXPLICIT_TYPE_ID(cinn::dialect::OperatorDialect) diff --git a/paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_dialect.h b/paddle/cinn/hlir/dialect/operator/ir/op_dialect.h similarity index 75% rename from paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_dialect.h rename to paddle/cinn/hlir/dialect/operator/ir/op_dialect.h index 77fb96863ad37..58a0487e9e8f9 100644 --- a/paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_dialect.h +++ b/paddle/cinn/hlir/dialect/operator/ir/op_dialect.h @@ -14,16 +14,16 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" namespace cinn { namespace dialect { -class CinnDialect : public ::ir::Dialect { +class OperatorDialect : public ::pir::Dialect { public: - explicit CinnDialect(::ir::IrContext* context); + explicit OperatorDialect(::pir::IrContext* context); - static const char* name() { return "cinn"; } + static const char* name() { return "cinn_op"; } private: void initialize(); @@ -32,4 +32,4 @@ class CinnDialect : public ::ir::Dialect { } // namespace dialect } // namespace cinn -IR_DECLARE_EXPLICIT_TYPE_ID(cinn::dialect::CinnDialect) +IR_DECLARE_EXPLICIT_TYPE_ID(cinn::dialect::OperatorDialect) diff --git a/paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_ops.yaml b/paddle/cinn/hlir/dialect/operator/ir/ops.yaml similarity index 100% rename from paddle/cinn/hlir/dialect/cinn_dialect/ir/cinn_ops.yaml rename to paddle/cinn/hlir/dialect/operator/ir/ops.yaml diff --git a/paddle/cinn/hlir/dialect/runtime_dialect/CMakeLists.txt b/paddle/cinn/hlir/dialect/runtime/CMakeLists.txt similarity index 100% rename from paddle/cinn/hlir/dialect/runtime_dialect/CMakeLists.txt rename to paddle/cinn/hlir/dialect/runtime/CMakeLists.txt diff --git a/paddle/cinn/hlir/dialect/runtime/ir/CMakeLists.txt b/paddle/cinn/hlir/dialect/runtime/ir/CMakeLists.txt new file mode 100644 index 0000000000000..6023117faee09 --- /dev/null +++ b/paddle/cinn/hlir/dialect/runtime/ir/CMakeLists.txt @@ -0,0 +1,4 @@ +if(NOT CINN_ONLY) + cinn_cc_library(cinn_runtime_dialect SRCS runtime_dialect.cc jit_kernel_op.cc + DEPS pir_core) +endif() diff --git a/paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.cc b/paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.cc similarity index 80% rename from paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.cc rename to paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.cc index 49e3685a8475a..ed3d4a4045c59 100644 --- a/paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.cc +++ b/paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/enforce.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/enforce.h" namespace cinn { namespace dialect { @@ -28,13 +28,13 @@ void JitKernelOp::Verify() { auto& attributes = this->attributes(); IR_ENFORCE(attributes.count(kAttrName) > 0 && - attributes.at(kAttrName).isa<::ir::PointerAttribute>(), + attributes.at(kAttrName).isa<::pir::PointerAttribute>(), "Type of attribute: instruction is not right."); } hlir::framework::Instruction* JitKernelOp::instruction() { void* ptr = - attributes().at(kAttrName).dyn_cast().data(); + attributes().at(kAttrName).dyn_cast<::pir::PointerAttribute>().data(); return reinterpret_cast(ptr); } diff --git a/paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h b/paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h similarity index 91% rename from paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h rename to paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h index 37b9c66bb6e17..f410e4d46c021 100644 --- a/paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h +++ b/paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/op_base.h" namespace cinn { @@ -40,10 +40,10 @@ namespace dialect { * temporarily, and will spilt executor information like * scope, inputs, outputs into InterpretorCore module. */ -class JitKernelOp : public ::ir::Op { +class JitKernelOp : public ::pir::Op { public: using Op::Op; - static const char* name() { return "cinn.jit_kernel"; } + static const char* name() { return "cinn_runtime.jit_kernel"; } // TODO(Aurelius84): Think deeply what should contains static constexpr uint32_t attributes_num = 1; static constexpr char* kAttrName = "instruction"; diff --git a/paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.cc b/paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.cc similarity index 73% rename from paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.cc rename to paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.cc index c21d21f11213e..40fd092e1329e 100644 --- a/paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.cc +++ b/paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.cc @@ -12,15 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h" namespace cinn { namespace dialect { -RuntimeDialect::RuntimeDialect(::ir::IrContext* context) - : ::ir::Dialect( - name(), context, ::ir::TypeId::get()) { +RuntimeDialect::RuntimeDialect(::pir::IrContext* context) + : ::pir::Dialect(name(), + context, + ::pir::TypeId::get()) { this->initialize(); } diff --git a/paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h b/paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h similarity index 81% rename from paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h rename to paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h index a35c7a24b8d7f..8ba0af9334498 100644 --- a/paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h +++ b/paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h @@ -14,16 +14,16 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" namespace cinn { namespace dialect { -class RuntimeDialect : public ::ir::Dialect { +class RuntimeDialect : public ::pir::Dialect { public: - explicit RuntimeDialect(::ir::IrContext* context); + explicit RuntimeDialect(::pir::IrContext* context); - static const char* name() { return "cinn"; } + static const char* name() { return "cinn_runtime"; } private: void initialize(); diff --git a/paddle/cinn/hlir/dialect/runtime_dialect/ir/CMakeLists.txt b/paddle/cinn/hlir/dialect/runtime_dialect/ir/CMakeLists.txt deleted file mode 100644 index 1df80a5bb3f75..0000000000000 --- a/paddle/cinn/hlir/dialect/runtime_dialect/ir/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -if(NOT CINN_ONLY) - cinn_cc_library(runtime_dialect SRCS runtime_dialect.cc jit_kernel_op.cc DEPS - ir_core) -endif() diff --git a/paddle/cinn/hlir/framework/CMakeLists.txt b/paddle/cinn/hlir/framework/CMakeLists.txt index d14ffa70234fc..5e202578b125c 100755 --- a/paddle/cinn/hlir/framework/CMakeLists.txt +++ b/paddle/cinn/hlir/framework/CMakeLists.txt @@ -23,13 +23,13 @@ gather_srcs( accuracy_checker.cc visualize_helper.cc) -# TODO(Aurelius84): new_ir_compiler depends on pd_dialect and could +# TODO(Aurelius84): new_ir_compiler depends on pd_op_dialect and could # not found under CINN_ONLY mode if(NOT CINN_ONLY) cinn_cc_library(new_ir_compiler SRCS new_ir_compiler.cc DEPS cinnapi - pd_dialect) + pd_op_dialect) cinn_cc_library(convert_to_dialect SRCS convert_to_dialect.cc DEPS cinnapi - cinn_dialect) + cinn_op_dialect) endif() if(WITH_CUDA) diff --git a/paddle/cinn/hlir/framework/convert_to_dialect.cc b/paddle/cinn/hlir/framework/convert_to_dialect.cc index 306e27dc1fea5..f76b49a54555f 100644 --- a/paddle/cinn/hlir/framework/convert_to_dialect.cc +++ b/paddle/cinn/hlir/framework/convert_to_dialect.cc @@ -17,34 +17,34 @@ #include #include -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h" #include "paddle/cinn/hlir/framework/program.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/program.h" namespace cinn { namespace hlir { namespace framework { -std::unique_ptr<::ir::Program> ConvertToRuntimeDialect( +std::unique_ptr<::pir::Program> ConvertToRuntimeDialect( const hlir::framework::Program& program) { - ::ir::IrContext* ctx = ::ir::IrContext::Instance(); + ::pir::IrContext* ctx = ::pir::IrContext::Instance(); ctx->GetOrRegisterDialect(); - auto ir_program = std::make_unique<::ir::Program>(ctx); + auto ir_program = std::make_unique<::pir::Program>(ctx); std::string jit_op_name = dialect::JitKernelOp::name(); - ::ir::OpInfo op_info = ctx->GetRegisteredOpInfo(jit_op_name); + ::pir::OpInfo op_info = ctx->GetRegisteredOpInfo(jit_op_name); auto& instrs = program.GetRunInstructions(); for (auto& instr : instrs) { - std::unordered_map op_attrs{ + std::unordered_map op_attrs{ {dialect::JitKernelOp::kAttrName, - ::ir::PointerAttribute::get(ctx, instr.get())}, + ::pir::PointerAttribute::get(ctx, instr.get())}, }; - ::ir::Operation* cinn_op = - ::ir::Operation::Create({}, op_attrs, {}, op_info); + ::pir::Operation* cinn_op = + ::pir::Operation::Create({}, op_attrs, {}, op_info); ir_program->block()->push_back(cinn_op); } return std::move(ir_program); diff --git a/paddle/cinn/hlir/framework/convert_to_dialect.h b/paddle/cinn/hlir/framework/convert_to_dialect.h index a88b5222b63bd..7ea0a2ace40c7 100644 --- a/paddle/cinn/hlir/framework/convert_to_dialect.h +++ b/paddle/cinn/hlir/framework/convert_to_dialect.h @@ -16,16 +16,16 @@ #include -namespace ir { +namespace pir { class Program; -} // namespace ir +} // namespace pir namespace cinn { namespace hlir { namespace framework { class Program; -std::unique_ptr<::ir::Program> ConvertToRuntimeDialect( +std::unique_ptr<::pir::Program> ConvertToRuntimeDialect( const hlir::framework::Program& program); } // namespace framework diff --git a/paddle/cinn/hlir/framework/new_ir/group.h b/paddle/cinn/hlir/framework/new_ir/group.h index b62c315873c70..1a67a02e58ca9 100644 --- a/paddle/cinn/hlir/framework/new_ir/group.h +++ b/paddle/cinn/hlir/framework/new_ir/group.h @@ -18,7 +18,7 @@ #include "paddle/cinn/hlir/framework/new_ir/utils.h" #include "paddle/cinn/hlir/framework/op.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/operation.h" namespace cinn { namespace hlir { @@ -29,12 +29,12 @@ using framework::OpPatternKind; // TODO(Aurelius84): Need to be replaced with CinnGroupOp struct Group { public: - explicit Group(const std::vector<::ir::Operation*>& group_ops) + explicit Group(const std::vector<::pir::Operation*>& group_ops) : ops(group_ops) { Initialize(); } - explicit Group(std::initializer_list<::ir::Operation*> group_ops) + explicit Group(std::initializer_list<::pir::Operation*> group_ops) : ops(group_ops) { Initialize(); } @@ -42,7 +42,7 @@ struct Group { int group_id; std::string fn_name; OpPatternKind op_pattern_kind; - std::vector<::ir::Operation*> ops; + std::vector<::pir::Operation*> ops; std::vector input_names; std::vector output_names; diff --git a/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.cc b/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.cc index d291aba2e406e..235d545dc331f 100644 --- a/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.cc +++ b/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.cc @@ -23,7 +23,7 @@ #include "paddle/cinn/hlir/framework/new_ir/utils.h" #include "paddle/cinn/lang/placeholder.h" #include "paddle/cinn/utils/attribute_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" #include "paddle/phi/core/ddim.h" PD_DECLARE_bool(cinn_use_cuda_vectorize); @@ -39,7 +39,7 @@ using framework::OpPatternKind; using framework::StrategyFunction; namespace details { -ir::Tensor GetTensor(const ::ir::Value& value) { +ir::Tensor GetTensor(const ::pir::Value& value) { auto type_info = value.type().dyn_cast(); auto in_shape = phi::vectorize(type_info.dims()); auto dtype = type_info.dtype(); @@ -49,9 +49,9 @@ ir::Tensor GetTensor(const ::ir::Value& value) { } std::vector CollectInputTensor( - const ::ir::Operation* op, + const ::pir::Operation* op, std::vector* func_args, - std::unordered_map<::ir::Value, ir::Tensor>* tensor_map) { + std::unordered_map<::pir::Value, ir::Tensor>* tensor_map) { std::vector tensors; for (auto& operand : op->operands()) { CHECK(operand); @@ -72,7 +72,7 @@ std::vector CollectInputTensor( return tensors; } -void CollectOutputInfo(const ::ir::Operation* op, +void CollectOutputInfo(const ::pir::Operation* op, std::vector* out_types, std::vector>* out_shapes) { auto op_results = op->results(); @@ -88,7 +88,7 @@ void CollectOutputInfo(const ::ir::Operation* op, } } -NodeAttr CollectAttrs(const ::ir::Operation& op) { +NodeAttr CollectAttrs(const ::pir::Operation& op) { NodeAttr node_attrs; VLOG(4) << "op.attributes():" << op.attributes().size(); auto attrs = utils::ConvertAttributes(op.attributes()); @@ -134,18 +134,18 @@ std::vector OpLowererImpl::Lower(const GroupPtr& group, } } -bool OpLowererImpl::ElementwiseScheduleDetermineFunction(::ir::Operation* op) { +bool OpLowererImpl::ElementwiseScheduleDetermineFunction(::pir::Operation* op) { return true; } -bool OpLowererImpl::ReduceScheduleDetermineFunction(::ir::Operation* op) { +bool OpLowererImpl::ReduceScheduleDetermineFunction(::pir::Operation* op) { // TODO(Aurelius84): Support this. // auto& op_pattern_dict = Operator::GetAttrs("OpPattern"); // return op_pattern_dict[op] == framework::kReduction; return true; } -bool OpLowererImpl::NonFusibleScheduleDetermineFunction(::ir::Operation* op) { +bool OpLowererImpl::NonFusibleScheduleDetermineFunction(::pir::Operation* op) { return true; } @@ -160,7 +160,7 @@ std::vector OpLowererImpl::LowerGroup( return LowerCustomCall(group); } std::vector group_func_arg_tensors; - std::unordered_map<::ir::Value, ir::Tensor> tensor_map; + std::unordered_map<::pir::Value, ir::Tensor> tensor_map; bool do_op_schedule = apply_group_schedule || apply_op_schedule; std::vector func_bodies = LowerOps(ops, do_op_schedule, @@ -191,8 +191,8 @@ std::vector OpLowererImpl::LowerCustomCall( const GroupPtr& group) { auto& ops = group->ops; CHECK_EQ(ops.size(), 1); - ::ir::Operation* op = ops[0]; - std::unordered_map<::ir::Value, ir::Tensor> tensor_map; + ::pir::Operation* op = ops[0]; + std::unordered_map<::pir::Value, ir::Tensor> tensor_map; std::vector op_func_arg_tensors = details::CollectInputTensor(op, nullptr, &tensor_map); VLOG(4) << "inputs.size(): " << op_func_arg_tensors.size(); @@ -234,7 +234,7 @@ std::vector OpLowererImpl::LowerCustomCall( std::vector OpLowererImpl::PostProcess( const GroupPtr& group, - const std::unordered_map<::ir::Value, ir::Tensor>& tensor_map, + const std::unordered_map<::pir::Value, ir::Tensor>& tensor_map, bool done_op_schedule, ir::IRSchedule* ir_sch, std::vector* group_func_arg_tensors) { @@ -313,11 +313,11 @@ std::vector OpLowererImpl::PostProcess( } std::vector OpLowererImpl::LowerOps( - const std::vector<::ir::Operation*>& ops, + const std::vector<::pir::Operation*>& ops, bool apply_op_schedule, ScheduleDetermineFunction schedule_determine_func, std::vector* group_func_arg_tensors, - std::unordered_map<::ir::Value, ir::Tensor>* tensor_map) { + std::unordered_map<::pir::Value, ir::Tensor>* tensor_map) { auto& strategy = Operator::GetAttrs("CINNStrategy"); std::vector func_bodies; for (auto* op : ops) { @@ -359,8 +359,8 @@ std::vector OpLowererImpl::LowerOps( std::vector OpLowererImpl::DoOpLower( std::shared_ptr op_impl, - const ::ir::Operation* op, - std::unordered_map<::ir::Value, ir::Tensor>* tensor_map, + const ::pir::Operation* op, + std::unordered_map<::pir::Value, ir::Tensor>* tensor_map, std::vector* op_func_arg_tensors) { VLOG(4) << "Do lower with Compute, op: " << op->name(); std::vector cinn_inputs; diff --git a/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.h b/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.h index ffa6218299100..81e36d8bb7b3b 100644 --- a/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.h +++ b/paddle/cinn/hlir/framework/new_ir/op_lowering_impl.h @@ -26,7 +26,7 @@ #include "paddle/cinn/ir/schedule/ir_schedule.h" #include "paddle/cinn/ir/schedule/ir_schedule_util.h" #include "paddle/cinn/lang/packed_func.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/operation.h" // Fusion Op lowering, there are four kinds of lowering function: // Elementwise/Broadcast/Injective,Reduce,OutEWiseFusable,NonFusible. @@ -43,7 +43,7 @@ using GroupPtr = std::shared_ptr; using common::Target; class OpLowererImpl; -typedef bool (OpLowererImpl::*ScheduleDetermineFunction)(::ir::Operation*); +typedef bool (OpLowererImpl::*ScheduleDetermineFunction)(::pir::Operation*); class OpLowererImpl : public OpLowererImplBase { public: @@ -96,7 +96,7 @@ class OpLowererImpl : public OpLowererImplBase { */ std::vector PostProcess( const GroupPtr& group, - const std::unordered_map<::ir::Value, ir::Tensor>& tensor_map, + const std::unordered_map<::pir::Value, ir::Tensor>& tensor_map, bool done_op_schedule, ir::IRSchedule* ir_sch, std::vector* group_func_arg_tensors); @@ -114,11 +114,11 @@ class OpLowererImpl : public OpLowererImplBase { * @return The lowered func bodies of Op set. */ std::vector LowerOps( - const std::vector<::ir::Operation*>& ops, + const std::vector<::pir::Operation*>& ops, bool apply_op_schedule, ScheduleDetermineFunction schedule_determine_func, std::vector* group_func_arg_tensors, - std::unordered_map<::ir::Value, ir::Tensor>* tensor_map); + std::unordered_map<::pir::Value, ir::Tensor>* tensor_map); /** * @brief Lower an Op to CINN IR. The Compute and Lower processes will be @@ -131,8 +131,8 @@ class OpLowererImpl : public OpLowererImplBase { */ std::vector DoOpLower( std::shared_ptr op_impl, - const ::ir::Operation* op, - std::unordered_map<::ir::Value, ir::Tensor>* tensor_map, + const ::pir::Operation* op, + std::unordered_map<::pir::Value, ir::Tensor>* tensor_map, std::vector* op_func_arg_tensors); /** @@ -148,9 +148,9 @@ class OpLowererImpl : public OpLowererImplBase { // Functions used to determine which Ops to schedule at op level, define a // policy for each type of group. - inline bool ReduceScheduleDetermineFunction(::ir::Operation* op); - inline bool ElementwiseScheduleDetermineFunction(::ir::Operation* op); - inline bool NonFusibleScheduleDetermineFunction(::ir::Operation* op); + inline bool ReduceScheduleDetermineFunction(::pir::Operation* op); + inline bool ElementwiseScheduleDetermineFunction(::pir::Operation* op); + inline bool NonFusibleScheduleDetermineFunction(::pir::Operation* op); private: Target target_; diff --git a/paddle/cinn/hlir/framework/new_ir/utils.cc b/paddle/cinn/hlir/framework/new_ir/utils.cc index 38bfcf05776e0..b027992af8c47 100644 --- a/paddle/cinn/hlir/framework/new_ir/utils.cc +++ b/paddle/cinn/hlir/framework/new_ir/utils.cc @@ -20,9 +20,9 @@ namespace framework { namespace newir { const std::unordered_map CompatibleInfo::OP_NAMES = { - {"pd.full", "fill_constant"}}; + {"pd_op.full", "fill_constant"}}; -std::string CompatibleInfo::OpName(const ::ir::Operation& op) { +std::string CompatibleInfo::OpName(const ::pir::Operation& op) { std::string name = op.name(); if (OP_NAMES.count(name)) { return OP_NAMES.at(name); @@ -36,12 +36,12 @@ std::string CompatibleInfo::OpName(const ::ir::Operation& op) { return cinn_op_name; } -std::string CompatibleInfo::ValueName(const ::ir::Value& value) { +std::string CompatibleInfo::ValueName(const ::pir::Value& value) { return CompatibleInfo::kNamePrefix + - std::to_string(std::hash<::ir::Value>()(value)); + std::to_string(std::hash<::pir::Value>()(value)); } -std::string CompatibleInfo::OpFuncName(const ::ir::Operation& op) { +std::string CompatibleInfo::OpFuncName(const ::pir::Operation& op) { std::string op_name = OpName(op); std::string func_name = cinn::common::Context::Global().NewName("fn_" + op_name); @@ -49,7 +49,7 @@ std::string CompatibleInfo::OpFuncName(const ::ir::Operation& op) { } std::string CompatibleInfo::GroupOpsName( - const std::vector<::ir::Operation*>& ops) { + const std::vector<::pir::Operation*>& ops) { std::string name = "fn"; for (auto* op : ops) { std::string op_name = OpName(*op); @@ -58,7 +58,7 @@ std::string CompatibleInfo::GroupOpsName( return name; } -std::vector CompatibleInfo::InputNames(const ::ir::Operation& op, +std::vector CompatibleInfo::InputNames(const ::pir::Operation& op, bool allow_duplicate) { std::vector names; std::unordered_set repeat; @@ -75,7 +75,7 @@ std::vector CompatibleInfo::InputNames(const ::ir::Operation& op, } std::vector CompatibleInfo::OutputNames( - const ::ir::Operation& op) { + const ::pir::Operation& op) { std::vector names; for (int i = 0; i < op.num_results(); ++i) { auto value = op.result(i); diff --git a/paddle/cinn/hlir/framework/new_ir/utils.h b/paddle/cinn/hlir/framework/new_ir/utils.h index 4c437dd19ef8a..2a70cd9eedc17 100644 --- a/paddle/cinn/hlir/framework/new_ir/utils.h +++ b/paddle/cinn/hlir/framework/new_ir/utils.h @@ -16,7 +16,7 @@ #include #include #include "paddle/cinn/common/context.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/operation.h" namespace cinn { namespace hlir { @@ -29,18 +29,18 @@ struct CompatibleInfo { // macros or attempt to unify Op name with Paddle and CINN. static const std::unordered_map OP_NAMES; - static std::string OpName(const ::ir::Operation& op); + static std::string OpName(const ::pir::Operation& op); - static std::string ValueName(const ::ir::Value& value); + static std::string ValueName(const ::pir::Value& value); - static std::string OpFuncName(const ::ir::Operation& op); + static std::string OpFuncName(const ::pir::Operation& op); - static std::string GroupOpsName(const std::vector<::ir::Operation*>& ops); + static std::string GroupOpsName(const std::vector<::pir::Operation*>& ops); - static std::vector InputNames(const ::ir::Operation& op, + static std::vector InputNames(const ::pir::Operation& op, bool allow_duplicate = false); - static std::vector OutputNames(const ::ir::Operation& op); + static std::vector OutputNames(const ::pir::Operation& op); }; } // namespace newir diff --git a/paddle/cinn/hlir/framework/new_ir_compiler.cc b/paddle/cinn/hlir/framework/new_ir_compiler.cc index bcc7c0f1c2a05..9172a1d8b052f 100644 --- a/paddle/cinn/hlir/framework/new_ir_compiler.cc +++ b/paddle/cinn/hlir/framework/new_ir_compiler.cc @@ -17,8 +17,8 @@ #include #include "paddle/cinn/hlir/framework/new_ir/utils.h" #include "paddle/cinn/utils/attribute_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/ir/core/builtin_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/pir/core/builtin_type.h" namespace cinn { namespace hlir { @@ -33,7 +33,7 @@ std::unique_ptr NewIRCompiler::Build() { std::vector groups; for (auto it = program_.block()->begin(); it != program_.block()->end(); ++it) { - std::vector<::ir::Operation*> ops = {*it}; + std::vector<::pir::Operation*> ops = {*it}; groups.push_back(std::make_shared(ops)); } VLOG(4) << "Groups size: " << groups.size(); @@ -123,11 +123,11 @@ std::vector> NewIRCompiler::BuildInstructions( } std::shared_ptr BuildScope(const Target& target, - const ::ir::Program& program) { - std::unordered_set<::ir::Value> visited; + const ::pir::Program& program) { + std::unordered_set<::pir::Value> visited; auto scope = std::make_shared(); - auto create_var = [&](::ir::Value value) { + auto create_var = [&](::pir::Value value) { if (visited.count(value) > 0) return; visited.emplace(value); diff --git a/paddle/cinn/hlir/framework/new_ir_compiler.h b/paddle/cinn/hlir/framework/new_ir_compiler.h index bb18da54bc4f3..62c3d97a21a41 100644 --- a/paddle/cinn/hlir/framework/new_ir_compiler.h +++ b/paddle/cinn/hlir/framework/new_ir_compiler.h @@ -17,7 +17,7 @@ #include #include #include "paddle/cinn/common/macros.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/program.h" #include "paddle/cinn/hlir/framework/graph_compiler.h" #include "paddle/cinn/hlir/framework/op_lowering.h" @@ -30,7 +30,7 @@ namespace framework { // the co-existance with GraphCompiler. class NewIRCompiler final { public: - NewIRCompiler(const ::ir::Program& prog, + NewIRCompiler(const ::pir::Program& prog, const Target& target, const std::shared_ptr& scope) : program_(prog), @@ -45,14 +45,14 @@ class NewIRCompiler final { private: CINN_DISALLOW_COPY_AND_ASSIGN(NewIRCompiler); - std::vector GetOpFunc(const ::ir::Operation& op, int idx); + std::vector GetOpFunc(const ::pir::Operation& op, int idx); void ProcessFunction(const std::vector& lowered_funcs); std::vector> BuildInstructions( const std::vector& groups); - const ::ir::Program& program_; + const ::pir::Program& program_; ir::Module::Builder m_builder_; std::unique_ptr compiler_{nullptr}; Target target_; @@ -60,7 +60,7 @@ class NewIRCompiler final { std::unordered_map func_names_; }; -std::shared_ptr BuildScope(const Target&, const ::ir::Program&); +std::shared_ptr BuildScope(const Target&, const ::pir::Program&); } // namespace framework } // namespace hlir diff --git a/paddle/cinn/runtime/cuda/float16.h b/paddle/cinn/runtime/cuda/float16.h index cae59186dc832..d64731387d596 100644 --- a/paddle/cinn/runtime/cuda/float16.h +++ b/paddle/cinn/runtime/cuda/float16.h @@ -597,9 +597,9 @@ __host__ __device__ inline bool(isfinite)(const float16& a) { __host__ __device__ inline float16(abs)(const float16& a) { #if defined(CINN_CUDA_FP16) && (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530) - return float16(__habs(a.to_half())); + return static_cast(__habs(a.to_half())); #else - return float16(fabsf(static_cast(a))); + return static_cast(fabsf(static_cast(a))); #endif } diff --git a/paddle/cinn/utils/attribute_util.h b/paddle/cinn/utils/attribute_util.h index aaffed7085c7b..17c1471c38c2d 100644 --- a/paddle/cinn/utils/attribute_util.h +++ b/paddle/cinn/utils/attribute_util.h @@ -18,29 +18,29 @@ #include "paddle/cinn/common/type.h" #include "paddle/cinn/utils/type_defs.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/ir/core/builtin_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" #include "paddle/phi/common/data_type.h" +#include "paddle/pir/core/builtin_type.h" namespace cinn { namespace utils { -using NewIR_AttributeMap = std::unordered_map; +using NewIR_AttributeMap = std::unordered_map; -Attribute ConvertAttribute(const ::ir::Attribute& src_attr) { +Attribute ConvertAttribute(const ::pir::Attribute& src_attr) { Attribute dst_attr; - if (src_attr.isa<::ir::BoolAttribute>()) { - dst_attr = src_attr.dyn_cast<::ir::BoolAttribute>().data(); - } else if (src_attr.isa<::ir::FloatAttribute>()) { - dst_attr = src_attr.dyn_cast<::ir::FloatAttribute>().data(); - } else if (src_attr.isa<::ir::Int32Attribute>()) { - dst_attr = src_attr.dyn_cast<::ir::Int32Attribute>().data(); - } else if (src_attr.isa<::ir::StrAttribute>()) { - dst_attr = src_attr.dyn_cast<::ir::StrAttribute>().AsString(); - } else if (src_attr.isa<::ir::Int64Attribute>()) { - dst_attr = src_attr.dyn_cast<::ir::Int64Attribute>().data(); - } else if (src_attr.isa<::ir::DoubleAttribute>()) { - dst_attr = src_attr.dyn_cast<::ir::DoubleAttribute>().data(); + if (src_attr.isa<::pir::BoolAttribute>()) { + dst_attr = src_attr.dyn_cast<::pir::BoolAttribute>().data(); + } else if (src_attr.isa<::pir::FloatAttribute>()) { + dst_attr = src_attr.dyn_cast<::pir::FloatAttribute>().data(); + } else if (src_attr.isa<::pir::Int32Attribute>()) { + dst_attr = src_attr.dyn_cast<::pir::Int32Attribute>().data(); + } else if (src_attr.isa<::pir::StrAttribute>()) { + dst_attr = src_attr.dyn_cast<::pir::StrAttribute>().AsString(); + } else if (src_attr.isa<::pir::Int64Attribute>()) { + dst_attr = src_attr.dyn_cast<::pir::Int64Attribute>().data(); + } else if (src_attr.isa<::pir::DoubleAttribute>()) { + dst_attr = src_attr.dyn_cast<::pir::DoubleAttribute>().data(); } else if (src_attr.isa()) { auto& arr = src_attr.dyn_cast() .data() @@ -75,10 +75,10 @@ AttributeMap ConvertAttributes(const NewIR_AttributeMap& src_attrs) { } #define CASE_TYPE(src, dst) \ - else if (type.isa<::ir::src>()) return common::dst(); + else if (type.isa<::pir::src>()) return common::dst(); -common::Type ConvertIRType(::ir::Type type) { - if (type.isa<::ir::BFloat16Type>()) return common::BF16(); +common::Type ConvertIRType(::pir::Type type) { + if (type.isa<::pir::BFloat16Type>()) return common::BF16(); CASE_TYPE(Float16Type, F16) CASE_TYPE(Float32Type, F32) CASE_TYPE(Float64Type, F64) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index 628bf6d00c11c..c8e35ad43a36b 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -8,7 +8,7 @@ add_subdirectory(pybind) add_subdirectory(eager) add_subdirectory(prim) add_subdirectory(jit) -add_subdirectory(ir) +add_subdirectory(pir) add_subdirectory(ir_adaptor) add_subdirectory(primitive) # NOTE: please add subdirectory inference at last. diff --git a/paddle/fluid/distributed/fleet_executor/interceptor.h b/paddle/fluid/distributed/fleet_executor/interceptor.h index 7c9cf9c8112ef..7645abf24cfd3 100644 --- a/paddle/fluid/distributed/fleet_executor/interceptor.h +++ b/paddle/fluid/distributed/fleet_executor/interceptor.h @@ -29,8 +29,8 @@ #include "paddle/fluid/platform/errors.h" #include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/place.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt index e27310dea5629..25d2f4dacfd16 100644 --- a/paddle/fluid/eager/auto_code_generator/CMakeLists.txt +++ b/paddle/fluid/eager/auto_code_generator/CMakeLists.txt @@ -65,7 +65,7 @@ if(WIN32) add_custom_command( OUTPUT ${eager_generator_path}/ir.dll COMMAND ${CMAKE_COMMAND} -E copy ${IR_LIB} ${eager_generator_path} - DEPENDS ir) + DEPENDS pir) list(APPEND EAGER_CODEGEN_DEPS ${eager_generator_path}/ir.dll) endif() diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index 72c61c1723a3b..b3d598712b803 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -19,13 +19,13 @@ #include "paddle/fluid/eager/tensor_wrapper.h" #include "paddle/fluid/framework/new_executor/interpretercore.h" #include "paddle/fluid/framework/variable_helper.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" #include "paddle/fluid/operators/run_program_op.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/profiler/event_tracing.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" PHI_DECLARE_bool(enable_new_ir_in_executor); diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 3befea7d0fd2b..439fb3abc19f8 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -1035,7 +1035,7 @@ cc_library( executor_cache SRCS executor_cache.cc DEPS parallel_executor standalone_executor phi_kernel_adaptor pd_inplace_pass - pd_op_to_kernel_pass ir) + pd_op_to_kernel_pass pir) if(WITH_PSCORE) get_property(RPC_DEPS GLOBAL PROPERTY RPC_DEPS) if(WITH_HETERPS) diff --git a/paddle/fluid/framework/executor_cache.cc b/paddle/fluid/framework/executor_cache.cc index f5c4c745cfd51..2c9eaebdd577f 100644 --- a/paddle/fluid/framework/executor_cache.cc +++ b/paddle/fluid/framework/executor_cache.cc @@ -16,14 +16,14 @@ #include "paddle/fluid/framework/new_executor/interpretercore.h" #include "paddle/fluid/framework/op_info.h" -#include "paddle/fluid/ir/transforms/inplace_pass.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" #include "paddle/fluid/ir_adaptor/translator/translate.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_manager.h" +#include "paddle/fluid/pir/transforms/inplace_pass.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" #include "paddle/phi/core/flags.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_manager.h" PHI_DECLARE_bool(new_ir_apply_inplace_pass); @@ -325,7 +325,7 @@ std::shared_ptr CreateProgramInterpreterCoreInfoToCache( } std::shared_ptr CreateNewIRInterpreterCoreInfoToCache( - std::unique_ptr<::ir::Program> ir_program, + std::unique_ptr<::pir::Program> ir_program, const platform::Place &place, bool is_grad, int64_t program_id, @@ -352,14 +352,14 @@ std::shared_ptr CreateNewIRInterpreterCoreInfoToCache( return core; } -std::unique_ptr<::ir::Program> ConstructFowardIrProgram( +std::unique_ptr<::pir::Program> ConstructFowardIrProgram( const paddle::framework::BlockDesc *forward_global_block, const paddle::framework::BlockDesc *backward_global_block, const std::vector output_names, const std::vector &x, const std::vector ¶ms) { - auto ir_ctx = ::ir::IrContext::Instance(); - auto program = std::make_unique<::ir::Program>(ir_ctx); + auto ir_ctx = ::pir::IrContext::Instance(); + auto program = std::make_unique<::pir::Program>(ir_ctx); std::set set_output_names; auto local_program = @@ -448,22 +448,22 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram( auto ir_res = paddle::dialect::PdOpLowerToKernelPass(program.get()); if (FLAGS_new_ir_apply_inplace_pass) { - ::ir::PassManager pm(::ir::IrContext::Instance(), 3); - pm.AddPass(::ir::CreateInplacePass()); + ::pir::PassManager pm(::pir::IrContext::Instance(), 3); + pm.AddPass(::pir::CreateInplacePass()); pm.Run(ir_res.get()); } return ir_res; } -std::unique_ptr<::ir::Program> ConstructBackwardIrProgram( +std::unique_ptr<::pir::Program> ConstructBackwardIrProgram( const paddle::framework::BlockDesc *backward_global_block, const std::vector &out_grad, const std::vector &x_grad, const std::vector ¶ms_grad, const paddle::framework::Scope *scope) { - auto ir_ctx = ::ir::IrContext::Instance(); - auto program = std::make_unique<::ir::Program>(ir_ctx); + auto ir_ctx = ::pir::IrContext::Instance(); + auto program = std::make_unique<::pir::Program>(ir_ctx); auto local_program = paddle::framework::ProgramDesc(*(backward_global_block->Program())); @@ -527,8 +527,8 @@ std::unique_ptr<::ir::Program> ConstructBackwardIrProgram( auto res = paddle::dialect::PdOpLowerToKernelPass(program.get()); if (FLAGS_new_ir_apply_inplace_pass) { - ::ir::PassManager pm(::ir::IrContext::Instance(), 3); - pm.AddPass(::ir::CreateInplacePass()); + ::pir::PassManager pm(::pir::IrContext::Instance(), 3); + pm.AddPass(::pir::CreateInplacePass()); pm.Run(res.get()); } diff --git a/paddle/fluid/framework/executor_cache.h b/paddle/fluid/framework/executor_cache.h index edbbc0e9420af..f9999b6358603 100644 --- a/paddle/fluid/framework/executor_cache.h +++ b/paddle/fluid/framework/executor_cache.h @@ -30,9 +30,9 @@ #include "paddle/fluid/string/string_helper.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" PHI_DECLARE_bool(enable_new_ir_in_executor); namespace paddle { @@ -243,20 +243,20 @@ std::shared_ptr CreateProgramInterpreterCoreInfoToCache( framework::Scope* scope); std::shared_ptr CreateNewIRInterpreterCoreInfoToCache( - std::unique_ptr<::ir::Program> ir_prog, + std::unique_ptr<::pir::Program> ir_prog, const platform::Place& place, bool is_grad, int64_t program_id, framework::Scope* scope); -std::unique_ptr<::ir::Program> ConstructFowardIrProgram( +std::unique_ptr<::pir::Program> ConstructFowardIrProgram( const paddle::framework::BlockDesc* forward_global_block, const paddle::framework::BlockDesc* backward_global_block, const std::vector output_names, const std::vector& x, const std::vector& params); -std::unique_ptr<::ir::Program> ConstructBackwardIrProgram( +std::unique_ptr<::pir::Program> ConstructBackwardIrProgram( const paddle::framework::BlockDesc* backward_global_block, const std::vector& out_grad, const std::vector& x_grad, diff --git a/paddle/fluid/framework/new_executor/CMakeLists.txt b/paddle/fluid/framework/new_executor/CMakeLists.txt index 16b18c2d7d6bd..ae30121bc930b 100644 --- a/paddle/fluid/framework/new_executor/CMakeLists.txt +++ b/paddle/fluid/framework/new_executor/CMakeLists.txt @@ -11,13 +11,13 @@ set(STANDALONE_EXECUTOR_DEPS interpreter interpretercore_garbage_collector workqueue - pd_dialect + pd_op_dialect pd_op_to_kernel_pass phi_kernel_adaptor program_translator instruction_base pd_inplace_pass - ir) + pir) cc_library( standalone_executor diff --git a/paddle/fluid/framework/new_executor/instruction/CMakeLists.txt b/paddle/fluid/framework/new_executor/instruction/CMakeLists.txt index 8a9247859b85f..7706e462fef76 100644 --- a/paddle/fluid/framework/new_executor/instruction/CMakeLists.txt +++ b/paddle/fluid/framework/new_executor/instruction/CMakeLists.txt @@ -8,5 +8,5 @@ if(WITH_CINN AND NOT CINN_ONLY) cc_library( cinn_jit_instruction NOT_FOR_INFER SRCS cinn_jit_instruction.cc - DEPS phi cinnapi cinn_dialect runtime_dialect) + DEPS phi cinnapi cinn_op_dialect cinn_runtime_dialect) endif() diff --git a/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.cc b/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.cc index d56ccc7b7ba6b..b880cbf97435c 100644 --- a/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.cc +++ b/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.cc @@ -14,8 +14,8 @@ #include "paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h" #include "paddle/cinn/hlir/framework/instruction.h" #include "paddle/fluid/framework/paddle2cinn/transform_type.h" @@ -93,7 +93,7 @@ class CinnJitInstruction::Impl { CinnJitInstruction::CinnJitInstruction(size_t id, const platform::Place& place, - ::ir::Operation* op, + ::pir::Operation* op, Scope* scope) : InstructionBase(id, place) { // TODO(Aurelius84): We shall simplify members of JitKernelOp to make it diff --git a/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.h b/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.h index b20f6e08d9afc..633b9b216f8d5 100644 --- a/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.h +++ b/paddle/fluid/framework/new_executor/instruction/cinn_jit_instruction.h @@ -17,7 +17,7 @@ #include #include "paddle/fluid/framework/new_executor/instruction/instruction_base.h" -namespace ir { +namespace pir { class Operation; } @@ -29,7 +29,7 @@ class CinnJitInstruction : public InstructionBase { public: CinnJitInstruction(size_t id, const platform::Place& place, - ::ir::Operation* op, + ::pir::Operation* op, Scope* scope); // TODO(Aurelius84): Only implement core interface and need implement GC and diff --git a/paddle/fluid/framework/new_executor/instruction/instruction_base.cc b/paddle/fluid/framework/new_executor/instruction/instruction_base.cc index 56dafd3132c03..6836a7f306daa 100644 --- a/paddle/fluid/framework/new_executor/instruction/instruction_base.cc +++ b/paddle/fluid/framework/new_executor/instruction/instruction_base.cc @@ -20,7 +20,7 @@ #include "paddle/fluid/framework/new_executor/interpreter/stream_analyzer.h" #include "paddle/fluid/platform/collective_helper.h" -#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_attribute.h" namespace paddle { namespace framework { @@ -90,28 +90,28 @@ void InstructionBase::AddInplace(Variable* in, Variable* out) { void InstructionBase::ClearInplace() { vec_inplace_in_to_out_.clear(); } void InstructionBase::SetInputs( - const std::unordered_map>& inputs) { + const std::unordered_map>& inputs) { input_index_ = inputs; } void InstructionBase::SetOutputs( - const std::unordered_map>& outputs) { + const std::unordered_map>& outputs) { output_index_ = outputs; } void InstructionBase::InitInputsOutputsIds( - ::ir::Operation* op, + ::pir::Operation* op, Scope* inner_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name) { auto op_attributes = op->attributes(); auto op_name = - op_attributes.at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); - std::unordered_map> inputs; + op_attributes.at("op_name").dyn_cast().AsString(); + std::unordered_map> inputs; for (size_t i = 0; i < op->num_operands(); i++) { - ir::Value value = op->operand_source(i); + pir::Value value = op->operand_source(i); if (value) { PADDLE_ENFORCE_NE( value_2_var_name.find(value), @@ -130,9 +130,9 @@ void InstructionBase::InitInputsOutputsIds( } SetInputs(inputs); VLOG(8) << "finish process inputs_index"; - std::unordered_map> outputs; + std::unordered_map> outputs; for (size_t i = 0; i < op->num_results(); i++) { - ir::Value value = op->result(i); + pir::Value value = op->result(i); if (value && value.type()) { PADDLE_ENFORCE_NE( value_2_var_name.find(value), diff --git a/paddle/fluid/framework/new_executor/instruction/instruction_base.h b/paddle/fluid/framework/new_executor/instruction/instruction_base.h index b8271a0ea0012..b4eb34d069348 100644 --- a/paddle/fluid/framework/new_executor/instruction/instruction_base.h +++ b/paddle/fluid/framework/new_executor/instruction/instruction_base.h @@ -22,9 +22,9 @@ #include "paddle/fluid/framework/new_executor/new_executor_defs.h" #include "paddle/fluid/platform/event.h" -namespace ir { +namespace pir { class Value; -} // namespace ir +} // namespace pir namespace paddle { namespace framework { @@ -107,29 +107,29 @@ class InstructionBase { std::map& GetMutableInplaceBackMap() { return inplace_back_map_; } const std::map& GetInplaceBackMap() { return inplace_back_map_; } - const std::unordered_map<::ir::Value, std::vector>& Inputs() const { + const std::unordered_map<::pir::Value, std::vector>& Inputs() const { return input_index_; } - std::unordered_map<::ir::Value, std::vector>& GetMutableInputs() { + std::unordered_map<::pir::Value, std::vector>& GetMutableInputs() { return input_index_; } void SetInputs( - const std::unordered_map<::ir::Value, std::vector>& inputs); + const std::unordered_map<::pir::Value, std::vector>& inputs); - const std::unordered_map<::ir::Value, std::vector>& Outputs() const { + const std::unordered_map<::pir::Value, std::vector>& Outputs() const { return output_index_; } - std::unordered_map<::ir::Value, std::vector>& GetMutableOutputs() { + std::unordered_map<::pir::Value, std::vector>& GetMutableOutputs() { return output_index_; } void SetOutputs( - const std::unordered_map<::ir::Value, std::vector>& outputs); + const std::unordered_map<::pir::Value, std::vector>& outputs); - const std::unordered_set<::ir::Value>& NoNeedBuffer() const { + const std::unordered_set<::pir::Value>& NoNeedBuffer() const { return no_need_buffer_values_; } void SetNoNeedBuffer( - const std::unordered_set<::ir::Value>& no_need_buffer_values) { + const std::unordered_set<::pir::Value>& no_need_buffer_values) { no_need_buffer_values_ = no_need_buffer_values; } @@ -138,9 +138,9 @@ class InstructionBase { virtual const std::string& Name() const = 0; void InitInputsOutputsIds( - ::ir::Operation* op, + ::pir::Operation* op, Scope* inner_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map<::pir::Value, std::string>& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name); @@ -176,11 +176,11 @@ class InstructionBase { std::map inplace_back_map_; - std::unordered_map<::ir::Value, std::vector> input_index_; + std::unordered_map<::pir::Value, std::vector> input_index_; - std::unordered_map<::ir::Value, std::vector> output_index_; + std::unordered_map<::pir::Value, std::vector> output_index_; - std::unordered_set<::ir::Value> no_need_buffer_values_; + std::unordered_set<::pir::Value> no_need_buffer_values_; }; } // namespace framework diff --git a/paddle/fluid/framework/new_executor/instruction/instruction_util.cc b/paddle/fluid/framework/new_executor/instruction/instruction_util.cc index dd6aa26a1ae53..fdcceb4896a81 100644 --- a/paddle/fluid/framework/new_executor/instruction/instruction_util.cc +++ b/paddle/fluid/framework/new_executor/instruction/instruction_util.cc @@ -22,22 +22,22 @@ #include "paddle/fluid/framework/new_executor/new_executor_defs.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/event.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/value.h" #include "paddle/fluid/framework/new_executor/interpreter/interpreter_util.h" #include "paddle/fluid/framework/new_executor/interpreter/stream_analyzer.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h" #include "paddle/fluid/platform/collective_helper.h" namespace paddle { namespace framework { std::vector GetValueIds( - ir::Value value, + pir::Value value, Scope* inner_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name) { @@ -56,14 +56,14 @@ std::vector GetValueIds( } platform::DeviceContext* ParseDeviceContext( - ir::Operation* op, + pir::Operation* op, platform::DeviceContext* origin_dev_ctx, const platform::Place& place, const std::string& execution_stream, const int stream_priority) { auto& op_attributes = op->attributes(); auto op_name = - op_attributes.at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); + op_attributes.at("op_name").dyn_cast().AsString(); interpreter::ContextManager& ctx_manager = interpreter::ContextManager::Instance(); @@ -109,10 +109,10 @@ platform::DeviceContext* ParseDeviceContext( // c_allreduce_op.h). Now it is just a temporary solution for ONLY // c_allreduce_sum which is used in ResNet50 distributed training. if (op_name == "c_allreduce_sum" && op_attributes.at("use_calc_stream") - .dyn_cast<::ir::BoolAttribute>() + .dyn_cast() .data() == false) { int ring_id = - op_attributes.at("ring_id").dyn_cast<::ir::Int32Attribute>().data(); + op_attributes.at("ring_id").dyn_cast().data(); return platform::NCCLCommContext::Instance() .Get(ring_id, place) ->dev_context(); @@ -126,8 +126,7 @@ platform::DeviceContext* ParseDeviceContext( return origin_dev_ctx; } -OpFuncType AnalyseOpFuncType(::ir::Operation* op, - const platform::Place& place) { +OpFuncType AnalyseOpFuncType(pir::Operation* op, const platform::Place& place) { if (platform::is_cpu_place(place)) { return OpFuncType::kCpuSync; } @@ -151,12 +150,12 @@ OpFuncType AnalyseOpFuncType(::ir::Operation* op, // and so that they would be dispatched to host thread. auto& op_attributes = op->attributes(); auto op_name = - op_attributes.at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); + op_attributes.at("op_name").dyn_cast().AsString(); if (op_name == kCoalesceTensor && (!platform::is_xpu_place(place) || - op->attribute("persist_output").data() == false) && - op->attribute("set_constant").data() == false && - op->attribute("copy_data").data() == false) { + op->attribute("persist_output").data() == false) && + op->attribute("set_constant").data() == false && + op->attribute("copy_data").data() == false) { return OpFuncType::kGpuSync; } diff --git a/paddle/fluid/framework/new_executor/instruction/instruction_util.h b/paddle/fluid/framework/new_executor/instruction/instruction_util.h index a41ce07957e4a..c555a101d8366 100644 --- a/paddle/fluid/framework/new_executor/instruction/instruction_util.h +++ b/paddle/fluid/framework/new_executor/instruction/instruction_util.h @@ -22,28 +22,29 @@ #include "paddle/fluid/framework/new_executor/new_executor_defs.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/event.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace framework { std::vector GetValueIds( - ir::Value value, + pir::Value value, Scope* inner_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map<::pir::Value, std::string>& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name); platform::DeviceContext* ParseDeviceContext( - ir::Operation* op, + pir::Operation* op, platform::DeviceContext* origin_dev_ctx, const platform::Place& place, const std::string& execution_stream, const int stream_priority); -OpFuncType AnalyseOpFuncType(::ir::Operation* op, const platform::Place& place); +OpFuncType AnalyseOpFuncType(::pir::Operation* op, + const platform::Place& place); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.cc b/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.cc index 88037b15193d8..ac8c57c556d4b 100644 --- a/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.cc +++ b/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.cc @@ -18,11 +18,11 @@ #include "paddle/fluid/framework/new_executor/interpreter/interpreter_util.h" #include "paddle/fluid/framework/new_executor/interpreter/stream_analyzer.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/phi/core/infermeta_utils.h" @@ -35,18 +35,19 @@ namespace framework { LegacyKernelInstruction::LegacyKernelInstruction( size_t id, const platform::Place& place, - ir::Operation* op, + pir::Operation* op, Scope* scope, Scope* local_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name) : InstructionBase(id, place) { auto& op_attributes = op->attributes(); auto op_name = - op_attributes.at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); - ir::OpInfo op_info = ir::IrContext::Instance()->GetRegisteredOpInfo(op_name); + op_attributes.at("op_name").dyn_cast().AsString(); + pir::OpInfo op_info = + pir::IrContext::Instance()->GetRegisteredOpInfo(op_name); legacy_op_name_ = op_name; VLOG(6) << "construct phi kernel instruction for: " << legacy_op_name_; @@ -55,17 +56,17 @@ LegacyKernelInstruction::LegacyKernelInstruction( // if (op_attributes.count("dist_attr") != 0) { // if (op_attributes.count("execution_stream") != 0) { // SetExecutionStream(op_attributes.at("execution_stream") - // .dyn_cast<::ir::StrAttribute>() + // .dyn_cast() // .data()); // } // if (op_attributes.count("stream_priority") != 0) { // SetStreamPriority(op_attributes.at("stream_priority") - // .dyn_cast<::ir::Int32Attribute>() + // .dyn_cast() // .data()); // } // if (op_attributes.count("scheduling_priority") != 0) { // SetSchedulingPriority(op_attributes.at("scheduling_priority") - // .dyn_cast<::ir::Int64Attribute>() + // .dyn_cast() // .data()); // } // } else { @@ -98,7 +99,7 @@ LegacyKernelInstruction::LegacyKernelInstruction( VLOG(6) << "finish process yaml_info_parser"; if (infer_meta_interface_) { - ::ir::BuildPhiContext< + pir::BuildPhiContext< phi::InferMetaContext, phi::MetaTensor, phi::MetaTensor, @@ -114,7 +115,7 @@ LegacyKernelInstruction::LegacyKernelInstruction( VLOG(6) << "finish process infer meta context"; auto kernel_name = - op_attributes.at("kernel_name").dyn_cast().AsString(); + op_attributes.at("kernel_name").dyn_cast().AsString(); auto kernel_key = op_attributes.at("kernel_key") .dyn_cast() .data(); @@ -127,7 +128,7 @@ LegacyKernelInstruction::LegacyKernelInstruction( Scope* inner_scope = local_scope == nullptr ? scope : local_scope; - operator_base_ = ir::BuildOperatorBase( + operator_base_ = pir::BuildOperatorBase( op, value_2_var_name, yaml_info_parser, variable_2_var_name, inner_scope); paddle::framework::VariableValueMap in_map; paddle::framework::VariableValueMap out_map; @@ -136,12 +137,12 @@ LegacyKernelInstruction::LegacyKernelInstruction( runtime_context_ = std::make_shared( paddle::framework::RuntimeContext(in_map, out_map)); - ir::BuildRuntimeContext(op, - value_2_var_name, - scope, - local_scope, - yaml_info_parser, - runtime_context_.get()); + pir::BuildRuntimeContext(op, + value_2_var_name, + scope, + local_scope, + yaml_info_parser, + runtime_context_.get()); kernel_context_ = new paddle::framework::ExecutionContext( *operator_base_, *local_scope, *dev_ctx, *(runtime_context_.get())); @@ -160,7 +161,7 @@ LegacyKernelInstruction::LegacyKernelInstruction( VLOG(6) << "finish process inputs outputs index"; auto& no_need_buffer_ids = yaml_info_parser.NoNeedBufferIds(); - std::unordered_set<::ir::Value> no_need_buffer_values; + std::unordered_set no_need_buffer_values; for (size_t id = 0; id < no_need_buffer_ids.size(); id++) { no_need_buffer_values.insert(op->operand_source(no_need_buffer_ids[id])); } diff --git a/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.h b/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.h index 27c1cb133bec0..cb3738cecb1f1 100644 --- a/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.h +++ b/paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.h @@ -16,10 +16,10 @@ #include "paddle/fluid/framework/new_executor/instruction/instruction_base.h" -namespace ir { +namespace pir { class Operation; class Value; -} // namespace ir +} // namespace pir namespace paddle { namespace framework { @@ -30,10 +30,10 @@ class LegacyKernelInstruction : public InstructionBase { LegacyKernelInstruction( size_t id, const platform::Place& place, - ::ir::Operation* op, + ::pir::Operation* op, Scope* scope, Scope* local_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map<::pir::Value, std::string>& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name); diff --git a/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.cc b/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.cc index 093435f8b98a2..7563e368ea386 100644 --- a/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.cc +++ b/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.cc @@ -17,20 +17,20 @@ #include "paddle/fluid/framework/new_executor/interpreter/interpreter_util.h" #include "paddle/fluid/framework/new_executor/interpreter/stream_analyzer.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/core/type_defs.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/value.h" #include "paddle/fluid/framework/new_executor/instruction/instruction_util.h" namespace paddle { @@ -39,18 +39,19 @@ namespace framework { PhiKernelInstruction::PhiKernelInstruction( size_t id, const platform::Place& place, - ir::Operation* op, + pir::Operation* op, Scope* scope, Scope* local_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name) : InstructionBase(id, place) { auto op_attributes = op->attributes(); auto op_name = - op_attributes.at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); - ir::OpInfo op_info = ir::IrContext::Instance()->GetRegisteredOpInfo(op_name); + op_attributes.at("op_name").dyn_cast().AsString(); + pir::OpInfo op_info = + pir::IrContext::Instance()->GetRegisteredOpInfo(op_name); phi_op_name_ = op_name; VLOG(6) << "construct phi kernel instruction for: " << phi_op_name_; @@ -59,17 +60,17 @@ PhiKernelInstruction::PhiKernelInstruction( // if (op_attributes.count("dist_attr") != 0) { // if (op_attributes.count("execution_stream") != 0) { // SetExecutionStream(op_attributes.at("execution_stream") - // .dyn_cast<::ir::StrAttribute>() + // .dyn_cast() // .data()); // } // if (op_attributes.count("stream_priority") != 0) { // SetStreamPriority(op_attributes.at("stream_priority") - // .dyn_cast<::ir::Int32Attribute>() + // .dyn_cast() // .data()); // } // if (op_attributes.count("scheduling_priority") != 0) { // SetSchedulingPriority(op_attributes.at("scheduling_priority") - // .dyn_cast<::ir::Int64Attribute>() + // .dyn_cast() // .data()); // } // } else { @@ -102,7 +103,7 @@ PhiKernelInstruction::PhiKernelInstruction( VLOG(6) << "finish process yaml_info_parser"; if (infer_meta_interface_) { - ::ir::BuildPhiContext< + pir::BuildPhiContext< phi::InferMetaContext, phi::MetaTensor, phi::MetaTensor, @@ -118,7 +119,7 @@ PhiKernelInstruction::PhiKernelInstruction( VLOG(6) << "finish process infer meta context"; auto kernel_name = - op_attributes.at("kernel_name").dyn_cast().AsString(); + op_attributes.at("kernel_name").dyn_cast().AsString(); auto kernel_key = op_attributes.at("kernel_key") .dyn_cast() .data(); @@ -129,17 +130,17 @@ PhiKernelInstruction::PhiKernelInstruction( phi_kernel_->IsValid(), true, "not found kernel for [%s]", kernel_name); VLOG(6) << "finish process select kernel"; - ::ir::BuildPhiContext, - paddle::small_vector, - true>(op, - value_2_var_name, - scope, - local_scope, - yaml_info_parser, - &kernel_context_); + pir::BuildPhiContext, + paddle::small_vector, + true>(op, + value_2_var_name, + scope, + local_scope, + yaml_info_parser, + &kernel_context_); kernel_context_.SetDeviceContext(phi::DeviceContextPool::Instance().Get( phi::TransToPhiPlace(kernel_key.backend()))); VLOG(6) << "finish process kernel context"; @@ -159,7 +160,7 @@ PhiKernelInstruction::PhiKernelInstruction( VLOG(6) << "finish process inputs outputs index"; auto& no_need_buffer_ids = yaml_info_parser.NoNeedBufferIds(); - std::unordered_set<::ir::Value> no_need_buffer_values; + std::unordered_set no_need_buffer_values; for (size_t id = 0; id < no_need_buffer_ids.size(); id++) { no_need_buffer_values.insert(op->operand_source(no_need_buffer_ids[id])); } diff --git a/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.h b/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.h index c637cce8651fb..e7b5e12b3405b 100644 --- a/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.h +++ b/paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.h @@ -16,9 +16,9 @@ #include "paddle/fluid/framework/new_executor/instruction/instruction_base.h" -namespace ir { +namespace pir { class Operation; -} // namespace ir +} // namespace pir namespace paddle { namespace framework { @@ -30,10 +30,10 @@ class PhiKernelInstruction : public InstructionBase { PhiKernelInstruction( size_t id, const platform::Place& place, - ::ir::Operation* op, + ::pir::Operation* op, Scope* scope, Scope* local_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_var_name, + const std::unordered_map<::pir::Value, std::string>& value_2_var_name, const std::map& var_name_2_id, const std::unordered_map& variable_2_var_name); diff --git a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc index a717a3ed09531..fe17c07700a5f 100644 --- a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc +++ b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc @@ -23,15 +23,15 @@ #include "paddle/fluid/framework/new_executor/interpreter/data_transfer.h" #include "paddle/fluid/framework/new_executor/interpreter/execution_config.h" #include "paddle/fluid/framework/new_executor/interpreter/static_build.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h" #include "paddle/fluid/memory/stats.h" #include "paddle/fluid/operators/controlflow/conditional_block_op_helper.h" #include "paddle/fluid/operators/controlflow/recurrent_op_helper.h" #include "paddle/fluid/operators/controlflow/while_op_helper.h" #include "paddle/fluid/operators/ops_extra_info.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h" #include "paddle/fluid/platform/flags.h" #include "paddle/phi/core/distributed/comm_context_manager.h" #include "paddle/phi/core/kernel_context.h" @@ -191,7 +191,7 @@ bool IsMemcpyH2D(Instruction* instr) { } bool IsMemcpyH2D(paddle::framework::InstructionBase* instr) { - return instr->Name() == "pd.memcpy_h2d"; + return instr->Name() == "pd_op.memcpy_h2d"; } bool IsMemcpyOp(const Instruction& instr) { @@ -1016,23 +1016,23 @@ void BuildOpFuncList(const platform::Place& place, void BuildOpFuncList( const platform::Place& place, - ::ir::Block* block, + pir::Block* block, std::vector* vec_func_list, framework::Scope* scope, framework::Scope* local_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_name_map, + const std::unordered_map& value_2_name_map, const ExecutionConfig& execution_config) { vec_func_list->reserve(block->size()); - ::ir::IrContext* ctx = ir::IrContext::Instance(); + pir::IrContext* ctx = pir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); for (auto op : *block) { OpFuncNode op_func_node; auto attr_map = op->attributes(); auto op_name = - attr_map.at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); + attr_map.at("op_name").dyn_cast().AsString(); op_func_node.phi_op_name_ = op_name; if (GetSpecialOpNames().count(op_name)) { @@ -1040,7 +1040,7 @@ void BuildOpFuncList( continue; } - ::ir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); auto impl = op_info.GetInterfaceImpl(); @@ -1051,7 +1051,7 @@ void BuildOpFuncList( VLOG(6) << "op name" << op_func_node.phi_op_name_; dialect::OpYamlInfoParser op_yaml_info_parser(impl->get_op_info_()); if (op_func_node.infer_meta_interface_) { - ::ir::BuildPhiContext< + pir::BuildPhiContext< phi::InferMetaContext, phi::MetaTensor, phi::MetaTensor, @@ -1066,7 +1066,7 @@ void BuildOpFuncList( } auto kernel_name = - attr_map.at("kernel_name").dyn_cast().AsString(); + attr_map.at("kernel_name").dyn_cast().AsString(); auto kernel_key = attr_map.at("kernel_key") .dyn_cast() .data(); @@ -1081,17 +1081,17 @@ void BuildOpFuncList( "not found kernel for [%s]", kernel_name); - ::ir::BuildPhiContext, - paddle::small_vector, - true>(op, - value_2_name_map, - scope, - local_scope, - op_yaml_info_parser, - &(op_func_node.kernel_context_)); + pir::BuildPhiContext, + paddle::small_vector, + true>(op, + value_2_name_map, + scope, + local_scope, + op_yaml_info_parser, + &(op_func_node.kernel_context_)); VLOG(6) << "finish process kernel context"; op_func_node.kernel_context_.SetDeviceContext( @@ -1184,12 +1184,12 @@ void SetDeviceCommContext(framework::OperatorBase* operator_base, } } -void SetDeviceCommContext(::ir::Operation* op, +void SetDeviceCommContext(pir::Operation* op, platform::DeviceContext* dev_ctx) { auto op_attributes = op->attributes(); if (op_attributes.count("ring_id") != 0) { int ring_id = - op_attributes.at("ring_id").dyn_cast<::ir::Int32Attribute>().data(); + op_attributes.at("ring_id").dyn_cast().data(); const auto& comm_context_manager = phi::distributed::CommContextManager::GetInstance(); if (comm_context_manager.Has(std::to_string(ring_id))) { @@ -1200,7 +1200,7 @@ void SetDeviceCommContext(::ir::Operation* op, } else { VLOG(3) << "op: " << op_attributes.at("op_name") - .dyn_cast<::ir::StrAttribute>() + .dyn_cast() .AsString() << ", ring_id: " << ring_id << ", get comm_context failed!"; } @@ -1211,11 +1211,11 @@ std::unordered_set GetSpecialOpNames() { return { "builtin.combine", "builtin.slice", - "pd.feed", + "pd_op.feed", "builtin.set_parameter", "builtin.get_parameter", - "pd.data", - "pd.shadow_output", + "pd_op.data", + "pd_op.shadow_output", }; } diff --git a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.h b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.h index 33b89cac542d4..661b05b561b4b 100644 --- a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.h +++ b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.h @@ -106,11 +106,11 @@ void BuildOpFuncList(const platform::Place& place, void BuildOpFuncList( const platform::Place& place, - ::ir::Block* block, + ::pir::Block* block, std::vector* vec_func_list, framework::Scope* scope, framework::Scope* local_scope, - const std::unordered_map<::ir::Value, std::string>& value_2_name_map, + const std::unordered_map<::pir::Value, std::string>& value_2_name_map, const ExecutionConfig& execution_config); void BuildVariableScope(const framework::BlockDesc& block, @@ -124,7 +124,7 @@ void LogDeviceMemoryStats(const platform::Place& place); void SetDeviceCommContext(framework::OperatorBase* operator_base, platform::DeviceContext* dev_ctx); -void SetDeviceCommContext(::ir::Operation* op, +void SetDeviceCommContext(::pir::Operation* op, platform::DeviceContext* dev_ctx); std::unordered_set GetSpecialOpNames(); diff --git a/paddle/fluid/framework/new_executor/interpreter/plan.cc b/paddle/fluid/framework/new_executor/interpreter/plan.cc index 0217219302f6d..ce2f8b2718ff3 100644 --- a/paddle/fluid/framework/new_executor/interpreter/plan.cc +++ b/paddle/fluid/framework/new_executor/interpreter/plan.cc @@ -41,7 +41,7 @@ Plan::Plan(const std::vector>& job_list, Plan::Plan( const std::vector>& job_list, - const std::unordered_map>& + const std::unordered_map>& type_to_ir_program) : job_list_(job_list), type_to_ir_program_(type_to_ir_program), @@ -69,7 +69,7 @@ const ProgramDesc* Plan::Program(const std::string& job_type) const { return type_to_program_.at(job_type); } -std::shared_ptr<::ir::Program> Plan::IrProgram( +std::shared_ptr<::pir::Program> Plan::IrProgram( const std::string& job_type) const { return type_to_ir_program_.at(job_type); } diff --git a/paddle/fluid/framework/new_executor/interpreter/plan.h b/paddle/fluid/framework/new_executor/interpreter/plan.h index aac750a38f97b..8ce66db821305 100644 --- a/paddle/fluid/framework/new_executor/interpreter/plan.h +++ b/paddle/fluid/framework/new_executor/interpreter/plan.h @@ -21,8 +21,8 @@ #include "paddle/fluid/framework/new_executor/interpreter/job.h" #include "paddle/fluid/framework/program_desc.h" -#include "paddle/ir/core/program.h" #include "paddle/phi/core/macros.h" +#include "paddle/pir/core/program.h" namespace paddle { namespace framework { @@ -33,7 +33,7 @@ class Plan final { Plan(const std::vector>& job_list, const std::unordered_map& type_to_program); Plan(const std::vector>& job_list, - const std::unordered_map>& + const std::unordered_map>& type_to_ir_program); ~Plan() = default; @@ -41,14 +41,14 @@ class Plan final { const std::vector>& JobList() const; const ProgramDesc* Program(const std::string& job_type) const; - std::shared_ptr<::ir::Program> IrProgram(const std::string& job_type) const; + std::shared_ptr<::pir::Program> IrProgram(const std::string& job_type) const; int64_t MicroBatchNum() const; private: const std::vector> job_list_; const std::unordered_map type_to_program_; - const std::unordered_map> + const std::unordered_map> type_to_ir_program_; int64_t micro_batch_num_; }; diff --git a/paddle/fluid/framework/new_executor/interpreter/stream_analyzer.cc b/paddle/fluid/framework/new_executor/interpreter/stream_analyzer.cc index 3dc9175dbfd4b..ea5b4c6b29689 100644 --- a/paddle/fluid/framework/new_executor/interpreter/stream_analyzer.cc +++ b/paddle/fluid/framework/new_executor/interpreter/stream_analyzer.cc @@ -257,7 +257,7 @@ const std::unordered_set no_need_buffer_ins(Instruction* instr) { return std::unordered_set(); } -const std::unordered_set no_need_buffer_ins( +const std::unordered_set no_need_buffer_ins( const paddle::framework::InstructionBase* instr) { return instr->NoNeedBuffer(); } @@ -471,9 +471,9 @@ void analyse_event_info_for_two_instructions< // fused_var share the same tensor. However, as the dependency is implicit, we // can only add event for it with the help of depend_op. - if (has_data_dependency( + if (has_data_dependency( instructions[cur_instr_id], instructions[next_instr_id]) || - instructions[next_instr_id]->Name() == "pd.depend") { + instructions[next_instr_id]->Name() == "pd_op.depend") { waiter_instr_ids->insert(next_instr_id); return; } diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index 384c668ed2e56..a2c3c49e1c634 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -16,8 +16,8 @@ #include "paddle/fluid/framework/new_executor/new_ir_interpreter.h" #include "paddle/fluid/framework/new_executor/program_interpreter.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" PADDLE_DEFINE_EXPORTED_bool( new_executor_serial_run, @@ -50,7 +50,7 @@ InterpreterCore::InterpreterCore(const platform::Place& place, InterpreterCore::InterpreterCore( const platform::Place& place, const std::vector& fetch_var_names, - std::unique_ptr<::ir::Program> ir_prog, + std::unique_ptr<::pir::Program> ir_prog, framework::Scope* scope, const ExecutionConfig& execution_config) { VLOG(4) << "InterpreterCore(): " << this << " on " << place; diff --git a/paddle/fluid/framework/new_executor/interpretercore.h b/paddle/fluid/framework/new_executor/interpretercore.h index f01c12b27c3a1..52df30cbfd976 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.h +++ b/paddle/fluid/framework/new_executor/interpretercore.h @@ -17,9 +17,9 @@ PD_DECLARE_bool(new_executor_use_local_scope); -namespace ir { +namespace pir { class Program; -} // namespace ir +} // namespace pir namespace paddle { namespace framework { @@ -38,7 +38,7 @@ class InterpreterCore { // This constructor is for New IR. InterpreterCore(const platform::Place& place, const std::vector& fetch_var_names, - std::unique_ptr<::ir::Program> ir_prog, + std::unique_ptr<::pir::Program> ir_prog, Scope* scope, const ExecutionConfig& execution_config = ExecutionConfig()); ~InterpreterCore(); diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.h b/paddle/fluid/framework/new_executor/new_executor_defs.h index bf0c0880f385d..ee9f17034a45f 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.h +++ b/paddle/fluid/framework/new_executor/new_executor_defs.h @@ -20,7 +20,7 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/variable_helper.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" #include "paddle/fluid/platform/device_event_base.h" #include "paddle/fluid/platform/event.h" #include "paddle/phi/core/utils/rw_lock.h" diff --git a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc index 94ef1e3af217e..393ce0f641136 100644 --- a/paddle/fluid/framework/new_executor/new_ir_interpreter.cc +++ b/paddle/fluid/framework/new_executor/new_ir_interpreter.cc @@ -41,16 +41,15 @@ #endif #include "paddle/fluid/framework/new_executor/instruction/legacy_kernel_instruction.h" #include "paddle/fluid/framework/new_executor/instruction/phi_kernel_instruction.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h" -#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_op.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h" +#include "paddle/pir/core/builtin_attribute.h" PHI_DECLARE_bool(enable_new_ir_in_executor); - PHI_DECLARE_bool(enable_new_ir_in_executor_trace_run); namespace paddle { @@ -59,7 +58,7 @@ namespace framework { NewIRInterpreter::NewIRInterpreter( const platform::Place& place, const std::vector& fetch_var_names, - std::unique_ptr<::ir::Program> ir_prog, + std::unique_ptr<::pir::Program> ir_prog, framework::Scope* scope, const ExecutionConfig& execution_config) : place_(place), @@ -349,79 +348,79 @@ void NewIRInterpreter::UpdateSyncOpNum() { void NewIRInterpreter::UpdateNcclOpNum() { static std::set nccl_op_set = { - "pd.c_softmax_with_cross_entropy", - "pd.c_allgather", - "pd.c_allreduce_max", - "pd.c_allreduce_min", - "pd.c_allreduce_sum", - "pd.c_allreduce_prod", - "pd.c_reduce_max", - "pd.c_reduce_min", - "pd.c_reduce_prod", - "pd.c_reducescatter", - "pd.c_broadcast", - "pd.c_broadcast_", - "pd.c_scatter", - "pd.partial_send", - "pd.partial_recv", - "pd.partial_allgather", - "pd.recv_v2", - "pd.send_v2", - "pd.mp_allreduce_sum", - "pd.barrier", - "pd.alltoall", - "pd.global_gather", - "pd.distributed_fused_lamb", - "pd.margin_cross_entropy", - "pd.sync_batch_norm", - "pd.sync_batch_norm_", - "pd.data_norm", - "pd.class_center_sample", - "pd.all_to_all", - "pd.dist_concat", - "pd.all_gather", - "pd.broadcast", - "pd.p_recv", - "pd.p_send", - "pd.reduce_scatter", - "pd.all_reduce", - "pd.reduce", - "pd.c_softmax_with_cross_entropy_grad", - "pd.c_allgather_grad", - "pd.c_allreduce_max_grad", - "pd.c_allreduce_min_grad", - "pd.c_allreduce_sum_grad", - "pd.c_allreduce_prod_grad", - "pd.c_reduce_max_grad", - "pd.c_reduce_min_grad", - "pd.c_reduce_prod_grad", - "pd.c_reducescatter_grad", - "pd.c_broadcast_grad", - "pd.c_scatter_grad", - "pd.partial_send_grad", - "pd.partial_recv_grad", - "pd.partial_allgather_grad", - "pd.recv_v2_grad", - "pd.send_v2_grad", - "pd.mp_allreduce_sum_grad", - "pd.barrier_grad", - "pd.alltoall_grad", - "pd.global_gather_grad", - "pd.distributed_fused_lamb_grad", - "pd.margin_cross_entropy_grad", - "pd.margin_cross_entropy_grad_" - "pd.sync_batch_norm_grad", - "pd.data_norm_grad", - "pd.class_center_sample_grad", - "pd.all_to_all_grad", - "pd.dist_concat_grad", - "pd.all_gather_grad", - "pd.broadcast_grad", - "pd.p_recv_grad", - "pd.p_send_grad", - "pd.reduce_scatter_grad", - "pd.all_reduce_grad", - "pd.reduce_grad"}; + "pd_op.c_softmax_with_cross_entropy", + "pd_op.c_allgather", + "pd_op.c_allreduce_max", + "pd_op.c_allreduce_min", + "pd_op.c_allreduce_sum", + "pd_op.c_allreduce_prod", + "pd_op.c_reduce_max", + "pd_op.c_reduce_min", + "pd_op.c_reduce_prod", + "pd_op.c_reducescatter", + "pd_op.c_broadcast", + "pd_op.c_broadcast_", + "pd_op.c_scatter", + "pd_op.partial_send", + "pd_op.partial_recv", + "pd_op.partial_allgather", + "pd_op.recv_v2", + "pd_op.send_v2", + "pd_op.mp_allreduce_sum", + "pd_op.barrier", + "pd_op.alltoall", + "pd_op.global_gather", + "pd_op.distributed_fused_lamb", + "pd_op.margin_cross_entropy", + "pd_op.sync_batch_norm", + "pd_op.sync_batch_norm_", + "pd_op.data_norm", + "pd_op.class_center_sample", + "pd_op.all_to_all", + "pd_op.dist_concat", + "pd_op.all_gather", + "pd_op.broadcast", + "pd_op.p_recv", + "pd_op.p_send", + "pd_op.reduce_scatter", + "pd_op.all_reduce", + "pd_op.reduce", + "pd_op.c_softmax_with_cross_entropy_grad", + "pd_op.c_allgather_grad", + "pd_op.c_allreduce_max_grad", + "pd_op.c_allreduce_min_grad", + "pd_op.c_allreduce_sum_grad", + "pd_op.c_allreduce_prod_grad", + "pd_op.c_reduce_max_grad", + "pd_op.c_reduce_min_grad", + "pd_op.c_reduce_prod_grad", + "pd_op.c_reducescatter_grad", + "pd_op.c_broadcast_grad", + "pd_op.c_scatter_grad", + "pd_op.partial_send_grad", + "pd_op.partial_recv_grad", + "pd_op.partial_allgather_grad", + "pd_op.recv_v2_grad", + "pd_op.send_v2_grad", + "pd_op.mp_allreduce_sum_grad", + "pd_op.barrier_grad", + "pd_op.alltoall_grad", + "pd_op.global_gather_grad", + "pd_op.distributed_fused_lamb_grad", + "pd_op.margin_cross_entropy_grad", + "pd_op.margin_cross_entropy_grad_" + "pd_op.sync_batch_norm_grad", + "pd_op.data_norm_grad", + "pd_op.class_center_sample_grad", + "pd_op.all_to_all_grad", + "pd_op.dist_concat_grad", + "pd_op.all_gather_grad", + "pd_op.broadcast_grad", + "pd_op.p_recv_grad", + "pd_op.p_send_grad", + "pd_op.reduce_scatter_grad", + "pd_op.all_reduce_grad", + "pd_op.reduce_grad"}; int64_t nccl_op_num = 0; for (auto& ins : vec_instruction_base_) { if (nccl_op_set.count(ins->Name())) { @@ -512,7 +511,7 @@ void NewIRInterpreter::BuildInstruction() { } else if (op->dialect()->name() == "pd_kernel") { auto op_name = op->attributes() .at("op_name") - .dyn_cast<::ir::StrAttribute>() + .dyn_cast<::pir::StrAttribute>() .AsString(); if (interpreter::GetSpecialOpNames().count(op_name)) { VLOG(6) << "skip process " << op_name; @@ -542,7 +541,7 @@ void NewIRInterpreter::BuildInstruction() { variable_2_var_name_)); } #ifdef PADDLE_WITH_CINN - } else if (op->dialect()->name() == "cinn") { + } else if (op->dialect()->name() == "cinn_runtime") { vec_instruction_base_.emplace_back( std::make_unique(op_idx++, place_, op, scope_)); #endif @@ -634,7 +633,7 @@ void NewIRInterpreter::BuildInstructionDependences() { void NewIRInterpreter::RecordMemcpyD2H(InstructionBase* instr_node) { // NOTE(zhiqiu): hot fix for jit input var - if (instr_node->Name() == "pd.memcpy_d2h") { + if (instr_node->Name() == "pd_op.memcpy_d2h") { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto* default_dev_ctx = pool.Get(place_); for (auto& event : instr_node->EventsToWait()) { @@ -781,14 +780,14 @@ void NewIRInterpreter::CalculateLastLiveOps() { InstructionBase* instr = vec_instruction_base_[op_idx].get(); std::set gc_check_vars; - const std::unordered_map<::ir::Value, std::vector>& ins = + const std::unordered_map<::pir::Value, std::vector>& ins = instr->Inputs(); - const std::unordered_map<::ir::Value, std::vector>& outs = + const std::unordered_map<::pir::Value, std::vector>& outs = instr->Outputs(); - std::unordered_multimap<::ir::Value, std::vector> ins_and_outs{ + std::unordered_multimap<::pir::Value, std::vector> ins_and_outs{ ins.begin(), ins.end()}; - if (instr->Name() != "pd.fetch") { + if (instr->Name() != "pd_op.fetch") { ins_and_outs.insert(outs.begin(), outs.end()); } @@ -879,7 +878,8 @@ void NewIRInterpreter::ConstructEventForJitInput() { for (size_t i = 0; i < dependecy_count_->size(); ++i) { if ((*dependecy_count_)[i] == 0) { InstructionBase* inst = vec_instruction_base_[i].get(); - if (inst->Name() == "pd.memcpy_d2h" && platform::is_gpu_place(place_)) { + if (inst->Name() == "pd_op.memcpy_d2h" && + platform::is_gpu_place(place_)) { for (auto& item : inst->Inputs()) { for (auto var_id : item.second) { auto name = GetNameById(var_id); @@ -919,13 +919,13 @@ FetchList NewIRInterpreter::Run(const std::vector& feed_names, // Build std::stringstream ss; ss << this; - ::ir::BuildScope(*ir_program_->block(), - InnerScope(), - ss.str(), - &value_2_var_name_, - &variable_2_var_name_, - &var_name_2_id_, - &variable_list_); + ::pir::BuildScope(*ir_program_->block(), + InnerScope(), + ss.str(), + &value_2_var_name_, + &variable_2_var_name_, + &var_name_2_id_, + &variable_list_); interpreter::BuildId2VarName(var_name_2_id_, &id_2_var_name_); @@ -1281,7 +1281,7 @@ void NewIRInterpreter::PreAnalysis() { VLOG(4) << "Done UpdateNcclOpNum"; } -::ir::Value NewIRInterpreter::GetValueByName(const std::string& var_name) { +::pir::Value NewIRInterpreter::GetValueByName(const std::string& var_name) { for (auto kv : value_2_var_name_) { if (kv.second == var_name) { return kv.first; @@ -1293,16 +1293,16 @@ ::ir::Value NewIRInterpreter::GetValueByName(const std::string& var_name) { void NewIRInterpreter::SolvePersisableVarNames() { VLOG(6) << "SolvePersisableVarNames"; for (auto kv : value_2_var_name_) { - ::ir::Value value = kv.first; + ::pir::Value value = kv.first; const std::string& var_name = kv.second; - ::ir::OpResult result = value.dyn_cast<::ir::OpResult>(); + ::pir::OpResult result = value.dyn_cast<::pir::OpResult>(); auto* defining_op = value.GetDefiningOp(); if (defining_op->HasAttribute(kAttrIsPersisable)) { auto is_persisables = defining_op->attribute(kAttrIsPersisable) - .dyn_cast<::ir::ArrayAttribute>() + .dyn_cast<::pir::ArrayAttribute>() .AsVector(); if (is_persisables[result.GetResultIndex()] - .dyn_cast<::ir::BoolAttribute>() + .dyn_cast<::pir::BoolAttribute>() .data()) { VLOG(6) << "parameter_var_names_ include: " << var_name; parameter_var_names_.insert(var_name); diff --git a/paddle/fluid/framework/new_executor/new_ir_interpreter.h b/paddle/fluid/framework/new_executor/new_ir_interpreter.h index b37b26d107560..c0681a277d5f7 100644 --- a/paddle/fluid/framework/new_executor/new_ir_interpreter.h +++ b/paddle/fluid/framework/new_executor/new_ir_interpreter.h @@ -16,7 +16,7 @@ #include #include "paddle/fluid/framework/new_executor/instruction/instruction_base.h" #include "paddle/fluid/framework/new_executor/interpreter_base_impl.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/value.h" namespace ir { class Program; @@ -36,7 +36,7 @@ class NewIRInterpreter : public InterpreterBaseImpl { public: NewIRInterpreter(const platform::Place& place, const std::vector& fetch_var_names, - std::unique_ptr<::ir::Program> ir_prog, + std::unique_ptr<::pir::Program> ir_prog, Scope* scope, const ExecutionConfig& execution_config = ExecutionConfig()); @@ -184,7 +184,7 @@ class NewIRInterpreter : public InterpreterBaseImpl { void RecordMemcpyD2H(InstructionBase* instr_node); - ::ir::Value GetValueByName(const std::string& var_name); + ::pir::Value GetValueByName(const std::string& var_name); void CheckGC(InstructionBase* instr); @@ -198,11 +198,11 @@ class NewIRInterpreter : public InterpreterBaseImpl { InstructionSchedulingPriorityLess ir_instruction_scheduling_priority_less; - std::unique_ptr<::ir::Program> ir_program_{nullptr}; + std::unique_ptr<::pir::Program> ir_program_{nullptr}; std::vector> vec_instruction_base_; - std::unordered_map<::ir::Value, std::string> value_2_var_name_; + std::unordered_map<::pir::Value, std::string> value_2_var_name_; std::unordered_map variable_2_var_name_; diff --git a/paddle/fluid/framework/new_executor/standalone_executor.cc b/paddle/fluid/framework/new_executor/standalone_executor.cc index ed109f9cd0b96..a2ae422b814a3 100644 --- a/paddle/fluid/framework/new_executor/standalone_executor.cc +++ b/paddle/fluid/framework/new_executor/standalone_executor.cc @@ -19,13 +19,13 @@ #include "paddle/fluid/platform/profiler/event_tracing.h" #include "paddle/phi/core/flags.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" -#include "paddle/fluid/ir/transforms/inplace_pass.h" #include "paddle/fluid/ir_adaptor/translator/translate.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_manager.h" +#include "paddle/fluid/pir/transforms/inplace_pass.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_manager.h" PHI_DECLARE_bool(enable_new_ir_in_executor); PHI_DECLARE_bool(enable_new_ir_api); @@ -54,7 +54,7 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place, for (const auto& job : jobs) { const std::string& job_type = job->Type(); std::shared_ptr program = nullptr; - std::shared_ptr<::ir::Program> ir_program = nullptr; + std::shared_ptr<::pir::Program> ir_program = nullptr; if (FLAGS_enable_new_ir_api) { ir_program = plan_.IrProgram(job_type); } else { @@ -79,18 +79,18 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place, // TODO(phlrain) we only support cpu for now if (FLAGS_enable_new_ir_in_executor) { - std::shared_ptr<::ir::Program> base_program = ir_program; + std::shared_ptr<::pir::Program> base_program = ir_program; if (!FLAGS_enable_new_ir_api) { VLOG(6) << "begin to translate" << std::endl; base_program = paddle::TranslateLegacyProgramToProgram(*program); } auto block = base_program->block(); for (auto it = block->begin(); it != block->end(); ++it) { - if ((*it)->name() == "pd.fetch") { + if ((*it)->name() == "pd_op.fetch") { size_t index = (*it) ->attributes() .at("col") - .dyn_cast() + .dyn_cast() .data(); if (fetch_var_names_.size() < index + 1) { @@ -100,7 +100,7 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place, fetch_var_names_[index] = (*it) ->attributes() .at("name") - .dyn_cast() + .dyn_cast() .AsString() + "@fetch"; } @@ -109,8 +109,8 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place, paddle::dialect::PdOpLowerToKernelPass(base_program.get(), place); if (FLAGS_new_ir_apply_inplace_pass) { - ir::PassManager pm(ir::IrContext::Instance(), 3); - pm.AddPass(ir::CreateInplacePass()); + pir::PassManager pm(pir::IrContext::Instance(), 3); + pm.AddPass(pir::CreateInplacePass()); pm.Run(kernel_program.get()); } diff --git a/paddle/fluid/framework/new_executor/standalone_executor.h b/paddle/fluid/framework/new_executor/standalone_executor.h index bec52add981bf..e9ee5509d20be 100644 --- a/paddle/fluid/framework/new_executor/standalone_executor.h +++ b/paddle/fluid/framework/new_executor/standalone_executor.h @@ -24,7 +24,7 @@ #include "paddle/fluid/framework/new_executor/new_executor_defs.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/platform/place.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/program.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc b/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc index e0ddafd37da70..ff898db3819f6 100644 --- a/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc +++ b/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc @@ -48,9 +48,9 @@ #include "paddle/fluid/operators/cinn/cinn_launch_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/string/string_helper.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/core/flags.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" #include "paddle/utils/flags.h" PHI_DECLARE_bool(enable_pe_launch_cinn); diff --git a/paddle/fluid/framework/type_info.cc b/paddle/fluid/framework/type_info.cc index cb7dae540d119..03086f46ad216 100644 --- a/paddle/fluid/framework/type_info.cc +++ b/paddle/fluid/framework/type_info.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/raw_tensor.h" #include "paddle/fluid/framework/string_array.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h" +#include "paddle/fluid/pir/dialect/operator/ir/meta_tensor.h" #include "paddle/fluid/prim/utils/static/desc_tensor.h" #include "paddle/fluid/primitive/type/lazy_tensor.h" diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 48c9f79f34de1..da39c21e84c03 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -103,8 +103,8 @@ set(SHARED_INFERENCE_SRCS # NOTE(Aurelius84): For inference library, some DEPS is usless # such as non-infer operator related targets et.al. -list(REMOVE_ITEM fluid_modules cinn_dialect) -# NOTE(Aurelisu84): Remove ir dialect related target DEPS for inference +list(REMOVE_ITEM fluid_modules cinn_op_dialect) +# NOTE(Aurelisu84): Remove pir dialect related target DEPS for inference # shared library to prune library size. list(REMOVE_ITEM fluid_modules ${not_infer_modules}) diff --git a/paddle/fluid/ir/dialect/CMakeLists.txt b/paddle/fluid/ir/dialect/CMakeLists.txt deleted file mode 100644 index 7500642867f30..0000000000000 --- a/paddle/fluid/ir/dialect/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_subdirectory(paddle_dialect) -add_subdirectory(paddle_kernel_dialect) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/interface/CMakeLists.txt b/paddle/fluid/ir/dialect/paddle_dialect/interface/CMakeLists.txt deleted file mode 100644 index 5ee2f3510ca93..0000000000000 --- a/paddle/fluid/ir/dialect/paddle_dialect/interface/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -# All source files of pd_dialect, except for the source file of op, which is generated in the compilation directory. -file(GLOB PD_INTERFACE_SRCS "*.cc") - -cc_library( - pd_interface - SRCS ${PD_INTERFACE_SRCS} - DEPS ir_core phi_utils) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h deleted file mode 100644 index c8a5e1658ec4d..0000000000000 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifdef GET_MANUAL_OP_LIST -#undef GET_MANUAL_OP_LIST -paddle::dialect::AddNOp, paddle::dialect::SplitGradOp, paddle::dialect::IfOp - -#else - -#pragma once -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/ir_printer.h" -#include "paddle/ir/core/op_base.h" -#include "paddle/ir/core/operation_utils.h" -#include "paddle/phi/core/infermeta_utils.h" - -namespace paddle { -namespace dialect { - -class AddNOp : public ir::Op { - public: - using Op::Op; - static const char *name() { return "pd.add_n"; } - static constexpr const char **attributes_name = nullptr; - static constexpr uint32_t attributes_num = 0; - static OpInfoTuple GetOpInfo(); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult inputs); - - void Verify(); - ir::Value inputs() { return operand_source(0); } - ir::OpResult out() { return result(0); } - static void InferMeta(phi::InferMetaContext *infer_meta); -}; - -class AddN_Op : public ir::Op { - public: - using Op::Op; - static const char *name() { return "pd.add_n_"; } - static constexpr const char **attributes_name = nullptr; - static constexpr uint32_t attributes_num = 0; - static OpInfoTuple GetOpInfo(); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult inputs_); - - void Verify(); - ir::Value inputs() { return operand_source(0); } - ir::OpResult out() { return result(0); } - - static void InferMeta(phi::InferMetaContext *infer_meta); -}; - -class AddNWithKernelOp : public ir::Op { - public: - using Op::Op; - static const char *name() { return "pd.add_n_with_kernel"; } - static constexpr const char **attributes_name = nullptr; - static constexpr uint32_t attributes_num = 0; - static OpInfoTuple GetOpInfo(); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult inputs_); - - void Verify(); - ir::Value inputs() { return operand_source(0); } - ir::OpResult out() { return result(0); } - - static void InferMeta(phi::InferMetaContext *infer_meta); -}; - -class FusedGemmEpilogueOp : public ir::Op { - public: - using Op::Op; - static const char *name() { return "pd.fused_gemm_epilogue"; } - static const char *attributes_name[3]; - static constexpr uint32_t attributes_num = 3; - static OpInfoTuple GetOpInfo(); - - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult x_, - ir::OpResult y_, - ir::OpResult bias_, - ir::AttributeMap attributes); - void Verify(); - ir::Value x() { return operand_source(0); } - ir::Value y() { return operand_source(1); } - ir::Value bias() { return operand_source(2); } - ir::OpResult out() { return result(0); } - ir::OpResult reserve_space() { return result(1); } - - static void InferMeta(phi::InferMetaContext *infer_meta); -}; - -class FusedGemmEpilogueGradOp - : public ir::Op { - public: - using Op::Op; - static const char *name() { return "pd.fused_gemm_epilogue_grad"; } - static const char *attributes_name[3]; - static constexpr uint32_t attributes_num = 3; - static OpInfoTuple GetOpInfo(); - - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult x_, - ir::OpResult y_, - ir::OpResult reserve_space_, - ir::OpResult out_grad_, - ir::AttributeMap attributes); - void Verify(); - ir::Value x() { return operand_source(0); } - ir::Value y() { return operand_source(1); } - ir::Value reserve_space() { return operand_source(2); } - ir::Value out_grad() { return operand_source(3); } - ir::OpResult x_grad() { return result(0); } - ir::OpResult y_grad() { return result(1); } - ir::OpResult bias_grad() { return result(2); } - - static void InferMeta(phi::InferMetaContext *infer_meta); -}; - -class SplitGradOp : public ir::Op { - public: - using Op::Op; - static const char *name() { return "pd.split_grad"; } - static const char *attributes_name[1]; - static constexpr uint32_t attributes_num = 1; - static OpInfoTuple GetOpInfo(); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult x_, - float axis = 0); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult out_grad_, - ir::OpResult axis_); - - void Verify(); - ir::Value out_grad() { return operand_source(0); } - ir::Value axis() { return operand_source(1); } - ir::OpResult x_grad() { return result(0); } - static void InferMeta(phi::InferMetaContext *infer_meta); -}; - -class IfOp : public ir::Op { - public: - using Op::Op; - static const char *name() { return "pd.if"; } - static constexpr const char **attributes_name = nullptr; - static constexpr uint32_t attributes_num = 0; - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult cond, - std::vector &&output_types); - ir::Value cond() { return operand_source(0); } - ir::Block *true_block(); - ir::Block *false_block(); - void Print(ir::IrPrinter &printer); // NOLINT - void Verify(); -}; - -} // namespace dialect -} // namespace paddle - -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp) -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp) -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op) -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp) -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::FusedGemmEpilogueOp) -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::FusedGemmEpilogueGradOp) - -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::IfOp) -#endif diff --git a/paddle/fluid/ir_adaptor/translator/CMakeLists.txt b/paddle/fluid/ir_adaptor/translator/CMakeLists.txt index 632411383db56..4ac1dc065143f 100644 --- a/paddle/fluid/ir_adaptor/translator/CMakeLists.txt +++ b/paddle/fluid/ir_adaptor/translator/CMakeLists.txt @@ -20,4 +20,4 @@ file(GLOB PD_PROGRAM_TRANSLATOR_SRCS "*.cc") cc_library( program_translator SRCS ${PD_PROGRAM_TRANSLATOR_SRCS} ${op_compat_source_file} - DEPS proto_desc pd_dialect ir framework_proto) + DEPS proto_desc pd_op_dialect pir framework_proto) diff --git a/paddle/fluid/ir_adaptor/translator/attribute_translator.cc b/paddle/fluid/ir_adaptor/translator/attribute_translator.cc index f6a4b94f2bfdf..ebb58cc0ebf61 100644 --- a/paddle/fluid/ir_adaptor/translator/attribute_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/attribute_translator.cc @@ -17,14 +17,14 @@ #include #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/ir/core/enforce.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/int_array.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/common/place.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/core/utils/data_type.h" +#include "paddle/pir/core/enforce.h" #include "paddle/utils/variant.h" namespace paddle { @@ -32,127 +32,128 @@ namespace translator { class AttributeVisitor { public: - ir::IrContext* ctx; - AttributeVisitor() { ctx = ir::IrContext::Instance(); } + pir::IrContext* ctx; + AttributeVisitor() { ctx = pir::IrContext::Instance(); } ~AttributeVisitor() = default; public: - virtual ir::Attribute operator()(int i) { + virtual pir::Attribute operator()(int i) { VLOG(10) << "translating int"; - return ir::Int32Attribute::get(ctx, i); + return pir::Int32Attribute::get(ctx, i); } - virtual ir::Attribute operator()(int64_t i) { + virtual pir::Attribute operator()(int64_t i) { VLOG(10) << "translating int"; - return ir::Int64Attribute::get(ctx, i); + return pir::Int64Attribute::get(ctx, i); } - virtual ir::Attribute operator()(float f) { + virtual pir::Attribute operator()(float f) { VLOG(10) << "translating float"; - return ir::FloatAttribute::get(ctx, f); + return pir::FloatAttribute::get(ctx, f); } - virtual ir::Attribute operator()(bool b) { + virtual pir::Attribute operator()(bool b) { VLOG(10) << "translating bool"; - return ir::BoolAttribute::get(ctx, b); + return pir::BoolAttribute::get(ctx, b); } - virtual ir::Attribute operator()(double d) { + virtual pir::Attribute operator()(double d) { VLOG(10) << "translating double"; - return ir::DoubleAttribute::get(ctx, d); + return pir::DoubleAttribute::get(ctx, d); } - virtual ir::Attribute operator()(const std::string& str) { + virtual pir::Attribute operator()(const std::string& str) { VLOG(10) << "translating string"; - return ir::StrAttribute::get(ctx, str); + return pir::StrAttribute::get(ctx, str); } - virtual ir::Attribute operator()(const paddle::experimental::Scalar& scalar) { + virtual pir::Attribute operator()( + const paddle::experimental::Scalar& scalar) { VLOG(10) << "translating scalar"; IR_THROW("not support translating paddle::experimental::Scalar"); } - virtual ir::Attribute operator()(const std::vector& strs) { + virtual pir::Attribute operator()(const std::vector& strs) { VLOG(10) << "translating vector"; - std::vector attrs; + std::vector attrs; attrs.reserve(strs.size()); for (const auto& v : strs) { - attrs.push_back(ir::StrAttribute::get(ctx, v)); + attrs.push_back(pir::StrAttribute::get(ctx, v)); } - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - virtual ir::Attribute operator()(const std::vector& fs) { + virtual pir::Attribute operator()(const std::vector& fs) { VLOG(10) << "translating vector"; - std::vector attrs; + std::vector attrs; attrs.reserve(fs.size()); for (const auto& v : fs) { - attrs.push_back(ir::FloatAttribute::get(ctx, v)); + attrs.push_back(pir::FloatAttribute::get(ctx, v)); } - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - virtual ir::Attribute operator()(const std::vector& is) { + virtual pir::Attribute operator()(const std::vector& is) { VLOG(10) << "translating vector"; - std::vector attrs; + std::vector attrs; attrs.reserve(is.size()); for (const auto& v : is) { - attrs.push_back(ir::Int32Attribute::get(ctx, v)); + attrs.push_back(pir::Int32Attribute::get(ctx, v)); } - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - virtual ir::Attribute operator()(const std::vector& bs) { + virtual pir::Attribute operator()(const std::vector& bs) { VLOG(10) << "translating vector"; - std::vector attrs; + std::vector attrs; attrs.reserve(bs.size()); for (const auto& v : bs) { - attrs.push_back(ir::BoolAttribute::get(ctx, v)); + attrs.push_back(pir::BoolAttribute::get(ctx, v)); } - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - virtual ir::Attribute operator()(const std::vector& i64s) { + virtual pir::Attribute operator()(const std::vector& i64s) { VLOG(10) << "translating vector size: " << i64s.size(); - std::vector attrs; + std::vector attrs; attrs.reserve(i64s.size()); for (const auto& v : i64s) { - attrs.push_back(ir::Int64Attribute::get(ctx, v)); + attrs.push_back(pir::Int64Attribute::get(ctx, v)); } - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - virtual ir::Attribute operator()(const std::vector& ds) { + virtual pir::Attribute operator()(const std::vector& ds) { VLOG(10) << "translating vector"; - std::vector attrs; + std::vector attrs; attrs.reserve(ds.size()); for (const auto& v : ds) { - attrs.push_back(ir::DoubleAttribute::get(ctx, v)); + attrs.push_back(pir::DoubleAttribute::get(ctx, v)); } - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - virtual ir::Attribute operator()( + virtual pir::Attribute operator()( const std::vector& ss) { VLOG(10) << "translating vector"; - std::vector attrs; + std::vector attrs; attrs.reserve(ss.size()); for (const auto& v : ss) { attrs.push_back(dialect::ScalarAttribute::get(ctx, v)); } VLOG(10) << "translating vector Done"; - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - virtual ir::Attribute operator()(const paddle::blank& blank) { + virtual pir::Attribute operator()(const paddle::blank& blank) { VLOG(10) << "translating paddle::blank"; - return ir::Attribute(nullptr); + return pir::Attribute(nullptr); } template - ir::Attribute operator()(T attr) { + pir::Attribute operator()(T attr) { VLOG(10) << "translating null type"; - return ir::Attribute(nullptr); + return pir::Attribute(nullptr); } }; @@ -160,19 +161,19 @@ class Int64ArrayAttributeVisitor : public AttributeVisitor { public: using AttributeVisitor::AttributeVisitor; - ir::Attribute operator()(const std::vector& is) override { + pir::Attribute operator()(const std::vector& is) override { VLOG(10) << "translating vector"; - std::vector attrs; + std::vector attrs; attrs.reserve(is.size()); for (const auto& v : is) { - attrs.push_back(ir::Int64Attribute::get(ctx, v)); + attrs.push_back(pir::Int64Attribute::get(ctx, v)); } - return ir::ArrayAttribute::get(ctx, attrs); + return pir::ArrayAttribute::get(ctx, attrs); } - ir::Attribute operator()(const paddle::blank& blank) override { + pir::Attribute operator()(const paddle::blank& blank) override { VLOG(10) << "translating paddle::blank to int64[]"; - return ir::ArrayAttribute::get(ctx, {}); + return pir::ArrayAttribute::get(ctx, {}); } }; @@ -180,22 +181,22 @@ class Int64AttributeVisitor : public AttributeVisitor { public: using AttributeVisitor::AttributeVisitor; - ir::Attribute operator()(int is) override { + pir::Attribute operator()(int is) override { VLOG(10) << "translating int to Int64Attribute"; - return ir::Int64Attribute::get(ctx, is); + return pir::Int64Attribute::get(ctx, is); } }; class IntArrayAttributeVisitor : public AttributeVisitor { public: using AttributeVisitor::AttributeVisitor; - ir::Attribute operator()(const std::vector& is) override { + pir::Attribute operator()(const std::vector& is) override { VLOG(10) << "translating vector to IntArray"; phi::IntArray data(is); return paddle::dialect::IntArrayAttribute::get(ctx, data); } - ir::Attribute operator()(const std::vector& is) override { + pir::Attribute operator()(const std::vector& is) override { VLOG(10) << "translating vector to IntArray"; phi::IntArray data(is); return paddle::dialect::IntArrayAttribute::get(ctx, data); @@ -205,14 +206,14 @@ class IntArrayAttributeVisitor : public AttributeVisitor { class DataTypeAttributeVisitor : public AttributeVisitor { public: using AttributeVisitor::AttributeVisitor; - ir::Attribute operator()(int i) override { + pir::Attribute operator()(int i) override { VLOG(10) << "translating int to DataType: " << i; auto phi_dtype = phi::TransToPhiDataType(i); return paddle::dialect::DataTypeAttribute::get(ctx, phi_dtype); } - ir::Attribute operator()(const paddle::blank& blank) override { + pir::Attribute operator()(const paddle::blank& blank) override { VLOG(10) << "translating paddle::blank to DataType::UNDEFINED"; return paddle::dialect::DataTypeAttribute::get(ctx, phi::DataType()); } @@ -222,7 +223,7 @@ class PlaceAttributeVisitor : public AttributeVisitor { public: using AttributeVisitor::AttributeVisitor; - ir::Attribute operator()(const paddle::blank& blank) override { + pir::Attribute operator()(const paddle::blank& blank) override { VLOG(10) << "translating paddle::blank to Place::UNDEFINED"; phi::Place data(phi::AllocationType::UNDEFINED); return paddle::dialect::PlaceAttribute::get(ctx, data); @@ -237,17 +238,17 @@ AttributeTranslator::AttributeTranslator() { new DataTypeAttributeVisitor(); special_visitors["paddle::dialect::PlaceAttribute"] = new PlaceAttributeVisitor(); - special_visitors["ir::ArrayAttribute"] = + special_visitors["pir::ArrayAttribute"] = new Int64ArrayAttributeVisitor(); - special_visitors["ir::Int64Attribute"] = new Int64AttributeVisitor(); + special_visitors["pir::Int64Attribute"] = new Int64AttributeVisitor(); } -ir::Attribute AttributeTranslator::operator()( +pir::Attribute AttributeTranslator::operator()( const framework::Attribute& attr) { return paddle::visit(*general_visitor, attr); } -ir::Attribute AttributeTranslator::operator()( +pir::Attribute AttributeTranslator::operator()( const std::string& target_type, const framework::Attribute& attr) { if (special_visitors.find(target_type) == special_visitors.end()) { VLOG(10) << "[" << target_type << "] not found"; diff --git a/paddle/fluid/ir_adaptor/translator/attribute_translator.h b/paddle/fluid/ir_adaptor/translator/attribute_translator.h index ea509c7e34673..2a716b0ef7d18 100644 --- a/paddle/fluid/ir_adaptor/translator/attribute_translator.h +++ b/paddle/fluid/ir_adaptor/translator/attribute_translator.h @@ -17,9 +17,9 @@ #include "paddle/fluid/framework/attribute.h" #include "paddle/fluid/framework/type_defs.h" -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/ir_context.h" #pragma once @@ -45,9 +45,9 @@ class AttributeTranslator { return attribute_translator; } - ir::Attribute operator()(const framework::Attribute& attr); - ir::Attribute operator()(const std::string& target_type, - const framework::Attribute& attr); + pir::Attribute operator()(const framework::Attribute& attr); + pir::Attribute operator()(const std::string& target_type, + const framework::Attribute& attr); }; } // namespace translator diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 39a6acdd21b55..8e4f32fcf92a5 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -23,27 +23,27 @@ #include #include "paddle/fluid/framework/op_desc.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" #include "paddle/fluid/ir_adaptor/translator/attribute_translator.h" #include "paddle/fluid/ir_adaptor/translator/op_compat_info.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" #include "paddle/fluid/ir_adaptor/translator/type_translator.h" #include "paddle/fluid/ir_adaptor/translator/utils.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/value.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/value.h" // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in -// paddle/fluid/ir/dialect/CMakeLists.txt. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" +// paddle/fluid/pir/dialect/CMakeLists.txt. +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" namespace paddle { namespace translator { @@ -56,7 +56,7 @@ using ResultIdx = std::tuple; using OpDesc = paddle::framework::OpDesc; using BlockDesc = paddle::framework::BlockDesc; using VarDesc = paddle::framework::VarDesc; -using OpOutputTypeList = std::vector; +using OpOutputTypeList = std::vector; using OpOutputMapping = std::unordered_map; using OpInputInfo = dialect::OpInputInfo; using OpInputInfoList = std::vector; @@ -64,16 +64,16 @@ using OpAttributeInfo = dialect::OpAttributeInfo; using OpAttributeInfoList = std::vector; using OpOutputInfo = dialect::OpOutputInfo; using OpOutputInfoList = std::vector; -using InputHandlerFn = std::function; -using AttributeHandlerFn = std::function; -constexpr char kTargetDialectPrefix[] = "pd."; // NOLINT -constexpr char kEmptyVarName[] = "@EMPTY@"; // NOLINT +using InputHandlerFn = std::function; +using AttributeHandlerFn = std::function; +constexpr char kTargetDialectPrefix[] = "pd_op."; // NOLINT +constexpr char kEmptyVarName[] = "@EMPTY@"; // NOLINT static const std::unordered_set SpecialNonInplaceOps = {}; @@ -126,47 +126,46 @@ inline std::string OpNameCompatibleMapping(std::string op_name) { return op_normalizer[op_name]; } -inline ir::Operation* InsertCombineOperationForTarget( - ir::IrContext* ctx, +inline pir::Operation* InsertCombineOperationForTarget( + pir::IrContext* ctx, TranslationContext* param_map, - ir::Program* program, + pir::Program* program, const std::vector& args) { - std::string combine_op_name(ir::CombineOp::name()); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(combine_op_name); + std::string combine_op_name(pir::CombineOp::name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(combine_op_name); - std::vector src_values; - std::vector types_in_vec; + std::vector src_values; + std::vector types_in_vec; for (const auto& arg_name : args) { auto defining_info = param_map->at(arg_name); src_values.push_back(defining_info.value); types_in_vec.push_back(defining_info.value.type()); } - ir::Type target_vec_type = ir::VectorType::get(ctx, types_in_vec); - ir::Operation* operation = - ir::Operation::Create(src_values, {}, {target_vec_type}, op_info); + pir::Type target_vec_type = pir::VectorType::get(ctx, types_in_vec); + pir::Operation* operation = + pir::Operation::Create(src_values, {}, {target_vec_type}, op_info); program->block()->push_back(operation); return operation; } -inline ir::Operation* InsertFullOperationForAttributeInput(ir::IrContext* ctx, - ir::Program* program, - ir::Attribute attr) { +inline pir::Operation* InsertFullOperationForAttributeInput( + pir::IrContext* ctx, pir::Program* program, pir::Attribute attr) { float data = 0.0f; phi::DataType dtype = phi::DataType::UNDEFINED; - if (attr.isa()) { - data = attr.dyn_cast().data(); + if (attr.isa()) { + data = attr.dyn_cast().data(); dtype = phi::DataType::FLOAT32; - } else if (attr.isa()) { - data = static_cast(attr.dyn_cast().data()); + } else if (attr.isa()) { + data = static_cast(attr.dyn_cast().data()); dtype = phi::DataType::FLOAT64; - } else if (attr.isa()) { - data = static_cast(attr.dyn_cast().data()); + } else if (attr.isa()) { + data = static_cast(attr.dyn_cast().data()); dtype = phi::DataType::INT32; - } else if (attr.isa()) { - data = static_cast(attr.dyn_cast().data()); + } else if (attr.isa()) { + data = static_cast(attr.dyn_cast().data()); dtype = phi::DataType::INT64; - } else if (attr.isa()) { - data = static_cast(attr.dyn_cast().data()); + } else if (attr.isa()) { + data = static_cast(attr.dyn_cast().data()); dtype = phi::DataType::BOOL; } else if (attr.isa()) { // TODO(phlrain) : need update here, downcast from double to float @@ -174,35 +173,35 @@ inline ir::Operation* InsertFullOperationForAttributeInput(ir::IrContext* ctx, attr.dyn_cast().data().to()); dtype = phi::DataType::FLOAT64; } - ir::Builder builder(ctx, program->block()); + pir::Builder builder(ctx, program->block()); dialect::FullOp full_op = builder.Build( std::vector{1}, data, dtype, phi::CPUPlace()); return full_op.operation(); } -inline ir::Operation* InsertFullArrayOperationForAttributeInput( - ir::IrContext* ctx, ir::Program* program, ir::Attribute attr) { +inline pir::Operation* InsertFullArrayOperationForAttributeInput( + pir::IrContext* ctx, pir::Program* program, pir::Attribute attr) { IR_ENFORCE(attr.isa(), "Encounter non IntArray type when trying to insert IntArray " "mutable attribute"); phi::IntArray int_array = attr.dyn_cast().data(); - ir::Builder builder(ctx, program->block()); + pir::Builder builder(ctx, program->block()); dialect::FullIntArrayOp full_int_array_op = builder.Build( int_array.GetData(), phi::DataType::INT64, phi::CPUPlace()); return full_int_array_op.operation(); } -inline ir::Operation* InsertStackOperationForTarget( - ir::IrContext* ctx, +inline pir::Operation* InsertStackOperationForTarget( + pir::IrContext* ctx, TranslationContext* param_map, - ir::Program* program, + pir::Program* program, const std::vector& args, int axis = 0) { auto* combine_op = InsertCombineOperationForTarget(ctx, param_map, program, args); - ir::Builder builder(ctx, program->block()); + pir::Builder builder(ctx, program->block()); dialect::StackOp stack_op = builder.Build(combine_op->result(0), axis); return stack_op.operation(); @@ -210,8 +209,8 @@ inline ir::Operation* InsertStackOperationForTarget( } // namespace -ir::OpInfo OpTranscriber::LoopkUpOpInfo(ir::IrContext* ctx, - const OpDesc& op_desc) { +pir::OpInfo OpTranscriber::LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) { std::string target_op_name = kTargetDialectPrefix + OpNameCompatibleMapping(op_desc.Type()); if (IsInplace(op_desc) && *target_op_name.rbegin() != '_') { @@ -230,11 +229,11 @@ ir::OpInfo OpTranscriber::LoopkUpOpInfo(ir::IrContext* ctx, } void OpTranscriber::InsertSliceOperationForInput( - ir::IrContext* ctx, + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const OpInputInfoList& input_infos, - ir::Program* program) { + pir::Program* program) { auto& op_normalizer = OpNameNormalizer::instance(); std::set yaml_input_set; for (const auto& info : input_infos) { @@ -265,10 +264,11 @@ void OpTranscriber::InsertSliceOperationForInput( } } -ir::OpResult OpTranscriber::GetAttributeAsInput(ir::IrContext* ctx, - ir::Program* program, - const OpDesc& op_desc, - const OpInputInfo& input_info) { +pir::OpResult OpTranscriber::GetAttributeAsInput( + pir::IrContext* ctx, + pir::Program* program, + const OpDesc& op_desc, + const OpInputInfo& input_info) { auto& attribute_translator = AttributeTranslator::instance(); auto& op_normalizer = OpNameNormalizer::instance(); @@ -283,10 +283,10 @@ ir::OpResult OpTranscriber::GetAttributeAsInput(ir::IrContext* ctx, paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" << " name: " << legacy_attr_name << " " << legacy_attr.index(); - ir::Attribute new_attr = + pir::Attribute new_attr = attribute_translator(input_info.type_name, legacy_attr); - ir::Operation* defining_op = nullptr; + pir::Operation* defining_op = nullptr; bool is_int_array = (input_info.type_name.find("IntArrayAttribute") != input_info.type_name.npos); if (is_int_array) { @@ -299,13 +299,13 @@ ir::OpResult OpTranscriber::GetAttributeAsInput(ir::IrContext* ctx, return defining_op->result(0); } -std::vector OpTranscriber::GenerateOperationInput( - ir::IrContext* ctx, +std::vector OpTranscriber::GenerateOperationInput( + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string& normalized_op_name, const OpInputInfoList& input_infos, - ir::Program* program) { + pir::Program* program) { VLOG(10) << "[op:" << op_desc.Type() << "][input] entrance"; auto& op_normalizer = OpNameNormalizer::instance(); @@ -314,11 +314,11 @@ std::vector OpTranscriber::GenerateOperationInput( VLOG(10) << "[op:" << op_desc.Type() << "][input] start"; - std::vector op_inputs; + std::vector op_inputs; for (const auto& info : input_infos) { if (auto special_handler = this->GetSpecialInputHandlers(info.name)) { - ir::OpResult ret = special_handler( + pir::OpResult ret = special_handler( ctx, param_map, op_desc, normalized_op_name, info, program); op_inputs.push_back(ret); continue; @@ -407,7 +407,7 @@ std::vector OpTranscriber::GenerateOperationInput( } std::tuple -OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, +OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, const OpDesc& op_desc, const OpOutputInfoList& output_infos) { OpOutputMapping arg_to_idx; @@ -457,7 +457,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, legacy_output_vars[0]); if (var->GetType() == paddle::framework::proto::VarType::LOD_TENSOR_ARRAY) { - ir::Type translated_var_type = + pir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); op_output_types.push_back(translated_var_type); arg_to_idx[var->Name()] = {cur_output_idx, 0}; @@ -486,7 +486,8 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, << "[" << op_desc.Type() << "]" << info.name << " var: " << var_name << " type: " << var->GetType(); - ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); + pir::Type translated_var_type = + type_translator[var->GetType()](ctx, *var); arg_to_idx[var_name] = {cur_output_idx, 0}; op_output_types.push_back(translated_var_type); @@ -496,7 +497,7 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "]" << info.name << " :" << info.type_name << " var: " << legacy_output_name; - std::vector types; + std::vector types; for (IdxInVector idx_in_vec = 0; idx_in_vec < legacy_output_vars.size(); idx_in_vec++) { const auto& var_name = legacy_output_vars[idx_in_vec]; @@ -509,26 +510,26 @@ OpTranscriber::GenerateOperationOutput(ir::IrContext* ctx, VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "]" << info.name << " var: " << var_name << " type: " << var->GetType(); - ir::Type translated_var_type = + pir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); types.push_back(translated_var_type); arg_to_idx[var_name] = {cur_output_idx, idx_in_vec}; } - ir::Type vec_type = ir::VectorType::get(ctx, types); + pir::Type vec_type = pir::VectorType::get(ctx, types); op_output_types.push_back(vec_type); } } return {op_output_types, arg_to_idx}; } -ir::AttributeMap OpTranscriber::TranslateOpAttribute( - ir::IrContext* ctx, +pir::AttributeMap OpTranscriber::TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) { auto& attribute_translator = AttributeTranslator::instance(); auto& op_normalizer = OpNameNormalizer::instance(); - ir::AttributeMap attribute_map = {}; + pir::AttributeMap attribute_map = {}; for (const auto& info : op_attr_infos) { if (auto handler = this->GetSpecialAttributeHandlers(info.name)) { @@ -546,7 +547,7 @@ ir::AttributeMap OpTranscriber::TranslateOpAttribute( op_desc.GetAttr(legacy_attr_name); VLOG(10) << "attribute in " << op_desc.Type() << " name: " << legacy_attr_name << " " << legacy_attr.index(); - ir::Attribute new_attr = + pir::Attribute new_attr = attribute_translator(info.type_name, legacy_attr); attribute_map[info.name] = new_attr; if (!new_attr) { @@ -563,36 +564,36 @@ ir::AttributeMap OpTranscriber::TranslateOpAttribute( return attribute_map; } -void OpTranscriber::HandleNonexistentAttribute(ir::IrContext*, - ir::AttributeMap* attribute_map, +void OpTranscriber::HandleNonexistentAttribute(pir::IrContext*, + pir::AttributeMap* attribute_map, const OpAttributeInfo& info) { auto& attribute_translator = AttributeTranslator::instance(); (*attribute_map)[info.name] = attribute_translator(info.type_name, paddle::framework::Attribute()); } -void OpTranscriber::RecordOpResultMapping(ir::IrContext* ctx, +void OpTranscriber::RecordOpResultMapping(pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, - ir::Operation* operation, + pir::Operation* operation, const OpOutputMapping& arg_to_idx) { for (const auto& [arg_name, idx] : arg_to_idx) { const auto& [idx_in_op, idx_in_vec] = idx; VLOG(10) << "[output recording]" << "[" << op_desc.Type() << "]" << arg_name << " " << idx_in_op << " " << idx_in_vec; - ir::OpResult value = operation->result(idx_in_op); - bool generated_by_vector = value.type().isa(); + pir::OpResult value = operation->result(idx_in_op); + bool generated_by_vector = value.type().isa(); (*param_map)[arg_name] = VariableDefiningInfo( value, generated_by_vector, generated_by_vector ? idx_in_vec : -1); } } -ir::Operation* OpTranscriber::operator()(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - ir::Program* program) { +pir::Operation* OpTranscriber::operator()(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + pir::Program* program) { auto op_info = this->LoopkUpOpInfo(ctx, op_desc); auto* op_info_concept = op_info.GetInterfaceImpl(); @@ -618,8 +619,8 @@ ir::Operation* OpTranscriber::operator()(ir::IrContext* ctx, this->TranslateOpAttribute(ctx, op_info.name(), attr_infos, op_desc); VLOG(4) << "[general op][" << op_desc.Type() << "] preparation end."; - ir::Operation* operation = - ir::Operation::Create(op_inputs, attribute_map, op_output_types, op_info); + pir::Operation* operation = pir::Operation::Create( + op_inputs, attribute_map, op_output_types, op_info); VLOG(4) << "[general op][" << op_desc.Type() << "] opearation creation end."; program->block()->push_back(operation); @@ -630,13 +631,13 @@ ir::Operation* OpTranscriber::operator()(ir::IrContext* ctx, } struct CastOpTranscriber : public OpTranscriber { - ir::AttributeMap TranslateOpAttribute( - ir::IrContext*, + pir::AttributeMap TranslateOpAttribute( + pir::IrContext*, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) override { auto& attribute_translator = AttributeTranslator::instance(); - ir::AttributeMap attribute_map = {}; + pir::AttributeMap attribute_map = {}; const OpAttributeInfo info = op_attr_infos[0]; std::string legacy_attr_name("out_dtype"); @@ -647,7 +648,7 @@ struct CastOpTranscriber : public OpTranscriber { } VLOG(10) << "attribute in " << op_desc.Type() << " name: " << legacy_attr_name << " " << legacy_attr.index(); - ir::Attribute new_attr = attribute_translator(info.type_name, legacy_attr); + pir::Attribute new_attr = attribute_translator(info.type_name, legacy_attr); attribute_map[info.name] = new_attr; return attribute_map; @@ -655,35 +656,35 @@ struct CastOpTranscriber : public OpTranscriber { }; struct EmbeddingOpTranscriber : public OpTranscriber { - void HandleNonexistentAttribute(ir::IrContext* ctx, - ir::AttributeMap* attribute_map, + void HandleNonexistentAttribute(pir::IrContext* ctx, + pir::AttributeMap* attribute_map, const OpAttributeInfo& info) override { if (info.name == "padding_idx") { - (*attribute_map)[info.name] = ir::Int64Attribute::get(ctx, -1); + (*attribute_map)[info.name] = pir::Int64Attribute::get(ctx, -1); } else if (info.name == "sparse") { - (*attribute_map)[info.name] = ir::BoolAttribute::get(ctx, false); + (*attribute_map)[info.name] = pir::BoolAttribute::get(ctx, false); } } }; struct IncrementOpTranscriber : public OpTranscriber { - ir::AttributeMap TranslateOpAttribute( - ir::IrContext* ctx, + pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) override { auto& attribute_translator = AttributeTranslator::instance(); - ir::AttributeMap attribute_map = {}; + pir::AttributeMap attribute_map = {}; paddle::framework::Attribute legacy_attr; if (op_desc.HasAttr("step")) { legacy_attr = op_desc.GetAttr("step"); VLOG(10) << "attribute in " << op_desc.Type() << " step: " << " " << legacy_attr.index(); - ir::Attribute new_attr = attribute_translator(legacy_attr); + pir::Attribute new_attr = attribute_translator(legacy_attr); attribute_map["value"] = new_attr; } else { - attribute_map["value"] = ir::FloatAttribute::get(ctx, 1.0f); + attribute_map["value"] = pir::FloatAttribute::get(ctx, 1.0f); } return attribute_map; @@ -694,21 +695,23 @@ struct IncrementOpTranscriber : public OpTranscriber { // `legacy_ops.yaml`. For this op we simulate the logic in // python/paddle/tensor/creation.py::assign(x, output) struct AssignValueOpTranscriber : public OpTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { - std::string target_op_name = "pd.assign_value"; + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { + std::string target_op_name = "pd_op.assign_value"; const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { IR_THROW( - "Op assign_value should have corresponding OpInfo pd.assign_value"); + "Op assign_value should have corresponding OpInfo " + "pd_op.assign_value"); } return op_info; } - ir::Operation* operator()(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - ir::Program* program) override { + pir::Operation* operator()(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + pir::Program* program) override { VLOG(10) << "[op assign_value] start transcribing"; auto op_info = this->LoopkUpOpInfo(ctx, op_desc); auto* op_info_concept = @@ -724,7 +727,7 @@ struct AssignValueOpTranscriber : public OpTranscriber { } auto& attribute_translator = AttributeTranslator::instance(); - ir::AttributeMap attribute_map; + pir::AttributeMap attribute_map; paddle::framework::Attribute legacy_attr; if (op_desc.HasAttr("shape")) { @@ -732,7 +735,7 @@ struct AssignValueOpTranscriber : public OpTranscriber { } else { IR_THROW("Op assign_value should have attribute `shape` but not find"); } - ir::Attribute attr_shape = + pir::Attribute attr_shape = attribute_translator(attr_info_maps.at("shape").type_name, legacy_attr); attribute_map["shape"] = attr_shape; @@ -741,11 +744,11 @@ struct AssignValueOpTranscriber : public OpTranscriber { } else { IR_THROW("Op assign_value should have attribute `dtype` but not find"); } - ir::Attribute attr_dtype = + pir::Attribute attr_dtype = attribute_translator(attr_info_maps.at("dtype").type_name, legacy_attr); attribute_map["dtype"] = attr_dtype; - ir::Attribute attr_place = + pir::Attribute attr_place = dialect::PlaceAttribute::get(ctx, phi::CPUPlace()); attribute_map["place"] = attr_place; @@ -764,20 +767,20 @@ struct AssignValueOpTranscriber : public OpTranscriber { "Op assign_value should have attribute `**_values` but not find"); } - ir::Attribute attr_values = attribute_translator( + pir::Attribute attr_values = attribute_translator( attr_info_maps.at("values").type_name, legacy_attr); attribute_map["values"] = attr_values; VLOG(10) << "[op assign_value] attribute translation done"; - std::vector op_inputs = {}; + std::vector op_inputs = {}; OpOutputMapping arg_to_idx; OpOutputTypeList op_output_types; std::tie(op_output_types, arg_to_idx) = this->GenerateOperationOutput(ctx, op_desc, output_infos); - ir::Operation* operation = ir::Operation::Create( + pir::Operation* operation = pir::Operation::Create( op_inputs, attribute_map, op_output_types, op_info); program->block()->push_back(operation); RecordOpResultMapping(ctx, param_map, op_desc, operation, arg_to_idx); @@ -792,12 +795,12 @@ struct AssignValueOpTranscriber : public OpTranscriber { // So we generate an input by `full` with same type of output `DropoutState` of // OpDesc And we still should be aware that `DropoutState` is an optional output // in static graph. -ir::OpResult TranslateDropOutStateIn(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - const std::string& normalized_op_name, - const OpInputInfo& input_info, - ir::Program* program) { +pir::OpResult TranslateDropOutStateIn(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + const std::string& normalized_op_name, + const OpInputInfo& input_info, + pir::Program* program) { const std::string legacy_output_name = "DropoutState"; std::vector legacy_output_vars; if (op_desc.HasOutput(legacy_output_name)) { @@ -806,7 +809,7 @@ ir::OpResult TranslateDropOutStateIn(ir::IrContext* ctx, if (legacy_output_vars.empty()) { VLOG(3) << "[input translating] not find output variable: DropoutState"; - return ir::OpResult(nullptr); + return pir::OpResult(nullptr); } // `DropoutState` is a tensor @@ -816,14 +819,14 @@ ir::OpResult TranslateDropOutStateIn(ir::IrContext* ctx, IR_THROW("Unexpected: Rnn Op should have a non-empty DropoutState"); } auto& type_translator = TypeTranslator::instance(); - ir::Type translated_var_type = + pir::Type translated_var_type = type_translator[dropout_state->GetType()](ctx, *dropout_state); IR_ENFORCE( translated_var_type.isa(), "Unexpected: Rnn Op's output DropoutState should be a DenseTensor"); auto tensor_type = translated_var_type.dyn_cast(); - ir::Builder builder(ctx, program->block()); + pir::Builder builder(ctx, program->block()); dialect::FullOp full_op = builder.Build( phi::vectorize(tensor_type.dims()), 0.0f, @@ -845,26 +848,27 @@ struct RnnOpTranscriber : public OpTranscriber { }; struct EmbeddingGradOpTranscriber : public OpTranscriber { - void HandleNonexistentAttribute(ir::IrContext* ctx, - ir::AttributeMap* attribute_map, + void HandleNonexistentAttribute(pir::IrContext* ctx, + pir::AttributeMap* attribute_map, const OpAttributeInfo& info) override { if (info.name == "padding_idx") { - (*attribute_map)[info.name] = ir::Int64Attribute::get(ctx, -1); + (*attribute_map)[info.name] = pir::Int64Attribute::get(ctx, -1); } else if (info.name == "sparse") { - (*attribute_map)[info.name] = ir::BoolAttribute::get(ctx, false); + (*attribute_map)[info.name] = pir::BoolAttribute::get(ctx, false); } } - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { std::string target_op_name = kTargetDialectPrefix + OpNameCompatibleMapping(op_desc.Type()); bool is_sparse = paddle::get(op_desc.GetAttr("is_sparse")); if (is_sparse) { - target_op_name = "pd.embedding_grad_sparse"; + target_op_name = "pd_op.embedding_grad_sparse"; } else { - target_op_name = "pd.embedding_grad_dense"; + target_op_name = "pd_op.embedding_grad_dense"; } VLOG(6) << "[op name normalizing: " << op_desc.Type() << " to " << target_op_name; @@ -880,45 +884,45 @@ struct EmbeddingGradOpTranscriber : public OpTranscriber { }; struct FeedOpTranscriber : public OpTranscriber { - ir::AttributeMap TranslateOpAttribute( - ir::IrContext* ctx, + pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) override { - ir::AttributeMap attribute_map = { - {"name", ir::StrAttribute::get(ctx, op_desc.OutputArgumentNames()[0])}, + pir::AttributeMap attribute_map = { + {"name", pir::StrAttribute::get(ctx, op_desc.OutputArgumentNames()[0])}, {"col", - ir::Int32Attribute::get(ctx, op_desc.GetAttrIfExists("col"))}, + pir::Int32Attribute::get(ctx, op_desc.GetAttrIfExists("col"))}, }; return attribute_map; } - std::vector GenerateOperationInput( - ir::IrContext* ctx, + std::vector GenerateOperationInput( + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string& normalized_op_name, const OpInputInfoList& input_infos, - ir::Program* program) override { + pir::Program* program) override { return {}; } }; struct DataOpTranscriber : public FeedOpTranscriber { - ir::AttributeMap TranslateOpAttribute( - ir::IrContext* ctx, + pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) override { int allocate_type = paddle::get(op_desc.GetAttr("place")); auto& attribute_translator = AttributeTranslator::instance(); - ir::Attribute shape = attribute_translator( + pir::Attribute shape = attribute_translator( "paddle::dialect::IntArrayAttribute", op_desc.GetAttr("shape")); - ir::AttributeMap attribute_map = { + pir::AttributeMap attribute_map = { {"name", - ir::StrAttribute::get(ctx, - op_desc.GetAttrIfExists("name"))}, + pir::StrAttribute::get(ctx, + op_desc.GetAttrIfExists("name"))}, {"shape", shape}, {"dtype", paddle::dialect::DataTypeAttribute::get(ctx, phi::DataType::FLOAT32)}, @@ -932,18 +936,18 @@ struct DataOpTranscriber : public FeedOpTranscriber { }; struct SplitOpTranscriber : public OpTranscriber { - std::vector GenerateOperationInput( - ir::IrContext* ctx, + std::vector GenerateOperationInput( + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string& normalized_op_name, const OpInputInfoList& input_infos, - ir::Program* program) override { + pir::Program* program) override { // input of split is [Tensor x, IntArray sections, Scalar(int) axis)] VLOG(10) << "[op:split][input] start"; - std::vector op_inputs; + std::vector op_inputs; // process first input auto x_input_vars = op_desc.Input("X"); IR_ENFORCE(x_input_vars.size() == 1, "x input of split MUST be a tensor"); @@ -963,7 +967,7 @@ struct SplitOpTranscriber : public OpTranscriber { op_inputs.push_back(combine_op->result(0)); } else { auto& attribute_translator = AttributeTranslator::instance(); - ir::Attribute new_attr = attribute_translator( + pir::Attribute new_attr = attribute_translator( "paddle::dialect::IntArrayAttribute", op_desc.GetAttr("sections")); auto sec_defin_op = InsertFullArrayOperationForAttributeInput(ctx, program, new_attr); @@ -982,8 +986,8 @@ struct SplitOpTranscriber : public OpTranscriber { op_inputs.push_back(axis_defining_info.value); } else { auto& attribute_translator = AttributeTranslator::instance(); - ir::Attribute new_attr = - attribute_translator("ir::Int32Attribute", op_desc.GetAttr("axis")); + pir::Attribute new_attr = + attribute_translator("pir::Int32Attribute", op_desc.GetAttr("axis")); auto sec_defin_op = InsertFullOperationForAttributeInput(ctx, program, new_attr); @@ -993,16 +997,16 @@ struct SplitOpTranscriber : public OpTranscriber { return op_inputs; } - ir::AttributeMap TranslateOpAttribute( - ir::IrContext* ctx, + pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) override { int num = paddle::get(op_desc.GetAttr("num")); if (num > 0) { - ir::AttributeMap attribute_map = { + pir::AttributeMap attribute_map = { {"num", - ir::Int32Attribute::get(ctx, op_desc.GetAttrIfExists("num"))}, + pir::Int32Attribute::get(ctx, op_desc.GetAttrIfExists("num"))}, }; return attribute_map; @@ -1011,19 +1015,20 @@ struct SplitOpTranscriber : public OpTranscriber { return {}; } - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { int num = paddle::get(op_desc.GetAttr("num")); std::string target_op_name; if (num > 0) { - target_op_name = "pd.split_with_num"; + target_op_name = "pd_op.split_with_num"; } else { - target_op_name = "pd.split"; + target_op_name = "pd_op.split"; } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op assign_value should have corresponding OpInfo pd.split"); + IR_THROW("Op assign_value should have corresponding OpInfo pd_op.split"); } return op_info; @@ -1031,10 +1036,10 @@ struct SplitOpTranscriber : public OpTranscriber { }; struct FetchOpTranscriber : public OpTranscriber { - ir::Operation* operator()(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - ir::Program* program) override { + pir::Operation* operator()(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + pir::Program* program) override { auto op_info = this->LoopkUpOpInfo(ctx, op_desc); auto* op_info_concept = @@ -1052,14 +1057,14 @@ struct FetchOpTranscriber : public OpTranscriber { ctx, param_map, op_desc, op_info.name(), input_infos, program); OpOutputTypeList op_output_types; - ir::AttributeMap attribute_map = { - {"name", ir::StrAttribute::get(ctx, op_desc.InputArgumentNames()[0])}, + pir::AttributeMap attribute_map = { + {"name", pir::StrAttribute::get(ctx, op_desc.InputArgumentNames()[0])}, {"col", - ir::Int32Attribute::get(ctx, op_desc.GetAttrIfExists("col"))}, + pir::Int32Attribute::get(ctx, op_desc.GetAttrIfExists("col"))}, }; op_output_types.push_back(op_inputs[0].type()); - ir::Operation* operation = ir::Operation::Create( + pir::Operation* operation = pir::Operation::Create( op_inputs, attribute_map, op_output_types, op_info); program->block()->push_back(operation); @@ -1068,13 +1073,13 @@ struct FetchOpTranscriber : public OpTranscriber { }; struct ShadowOutputOpTranscriber : public OpTranscriber { - ir::Operation* operator()(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - ir::Program* program) override { - auto op_info = ctx->GetRegisteredOpInfo(ir::SetParameterOp::name()); + pir::Operation* operator()(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + pir::Program* program) override { + auto op_info = ctx->GetRegisteredOpInfo(pir::SetParameterOp::name()); - std::vector op_inputs; + std::vector op_inputs; auto legacy_input_vars = op_desc.Input("x", true); auto defining_info = (*param_map)[legacy_input_vars[0]]; @@ -1086,14 +1091,14 @@ struct ShadowOutputOpTranscriber : public OpTranscriber { op_inputs.push_back(defining_info.value); - ir::AttributeMap attribute_map = { + pir::AttributeMap attribute_map = { {"parameter_name", - ir::StrAttribute::get(ctx, - op_desc.GetAttrIfExists("name"))}, + pir::StrAttribute::get(ctx, + op_desc.GetAttrIfExists("name"))}, }; - ir::Operation* operation = - ir::Operation::Create(op_inputs, attribute_map, {}, op_info); + pir::Operation* operation = + pir::Operation::Create(op_inputs, attribute_map, {}, op_info); program->block()->push_back(operation); return operation; @@ -1102,7 +1107,8 @@ struct ShadowOutputOpTranscriber : public OpTranscriber { // NOTE, add_n op in legacy ops don't have a kernel, so we use a new op for now struct AddNOpTranscriber : public OpTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { std::string target_op_name = kTargetDialectPrefix + OpNameCompatibleMapping(op_desc.Type()); if (IsInplace(op_desc)) { @@ -1120,18 +1126,20 @@ struct AddNOpTranscriber : public OpTranscriber { }; struct TrilAndTriuOpTranscriber : public OpTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { bool lower = PADDLE_GET_CONST(bool, op_desc.GetAttr("lower")); std::string target_op_name = ""; if (lower) { - target_op_name = "pd.tril"; + target_op_name = "pd_op.tril"; } else { - target_op_name = "pd.triu"; + target_op_name = "pd_op.triu"; } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { IR_THROW( - "Op tril_triu should have corresponding OpInfo pd.tril or pd.triu."); + "Op tril_triu should have corresponding OpInfo pd_op.tril or " + "pd_op.triu."); } return op_info; @@ -1139,27 +1147,28 @@ struct TrilAndTriuOpTranscriber : public OpTranscriber { }; struct FillConstant2FullTranscriber : public OpTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { const auto& op_info = ctx->GetRegisteredOpInfo(dialect::FullOp::name()); if (!op_info) { - IR_THROW("Op fill_constant should have corresponding OpInfo pd.full"); + IR_THROW("Op fill_constant should have corresponding OpInfo pd_op.full"); } return op_info; } - std::vector GenerateOperationInput( - ir::IrContext* ctx, + std::vector GenerateOperationInput( + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string& normalized_op_name, const OpInputInfoList& input_infos, - ir::Program* program) override { + pir::Program* program) override { return {}; } - ir::AttributeMap TranslateOpAttribute( - ir::IrContext* ctx, + pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) override { @@ -1168,9 +1177,9 @@ struct FillConstant2FullTranscriber : public OpTranscriber { float value = PADDLE_GET_CONST(float, op_desc.GetAttr("value")); int dtype = PADDLE_GET_CONST(int, op_desc.GetAttr("dtype")); - auto attr_value = ir::FloatAttribute::get(ctx, value); + auto attr_value = pir::FloatAttribute::get(ctx, value); - ir::AttributeMap attribute_map = { + pir::AttributeMap attribute_map = { {"shape", attribute_translator("paddle::dialect::IntArrayAttribute", shape_attr)}, @@ -1217,25 +1226,26 @@ struct FillConstant2FullTranscriber : public OpTranscriber { }; struct FillConstant2FullWithTensorTranscriber : public OpTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { - const auto& op_info = ctx->GetRegisteredOpInfo("pd.full_with_tensor"); + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { + const auto& op_info = ctx->GetRegisteredOpInfo("pd_op.full_with_tensor"); if (!op_info) { IR_THROW( "Op fill_constant should have corresponding OpInfo " - "pd.full_with_tensor"); + "pd_op.full_with_tensor"); } return op_info; } - std::vector GenerateOperationInput( - ir::IrContext* ctx, + std::vector GenerateOperationInput( + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string& normalized_op_name, const OpInputInfoList& input_infos, - ir::Program* program) override { - std::vector op_inputs; + pir::Program* program) override { + std::vector op_inputs; if (op_desc.HasInput("ShapeTensor", true) && op_desc.Input("ShapeTensor", true).size() > 0) { auto shape_tensor_vars = op_desc.Input("ShapeTensor", true); @@ -1250,7 +1260,7 @@ struct FillConstant2FullWithTensorTranscriber : public OpTranscriber { } else { auto& attribute_translator = AttributeTranslator::instance(); paddle::framework::Attribute shape_attr = op_desc.GetAttr("shape"); - ir::Attribute new_attr = attribute_translator( + pir::Attribute new_attr = attribute_translator( "paddle::dialect::IntArrayAttribute", shape_attr); auto defining_op = InsertFullArrayOperationForAttributeInput(ctx, program, new_attr); @@ -1264,7 +1274,7 @@ struct FillConstant2FullWithTensorTranscriber : public OpTranscriber { op_inputs.push_back(defining_info.value); } else { float value = PADDLE_GET_CONST(float, op_desc.GetAttr("value")); - ir::Attribute new_attr = ir::FloatAttribute::get(ctx, value); + pir::Attribute new_attr = pir::FloatAttribute::get(ctx, value); auto defining_op = InsertFullOperationForAttributeInput(ctx, program, new_attr); op_inputs.push_back(defining_op->result(0)); @@ -1272,14 +1282,14 @@ struct FillConstant2FullWithTensorTranscriber : public OpTranscriber { return op_inputs; } - ir::AttributeMap TranslateOpAttribute( - ir::IrContext* ctx, + pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc) override { int dtype = PADDLE_GET_CONST(int, op_desc.GetAttr("dtype")); - ir::AttributeMap attribute_map = { + pir::AttributeMap attribute_map = { {"dtype", paddle::dialect::DataTypeAttribute::get( ctx, @@ -1290,10 +1300,10 @@ struct FillConstant2FullWithTensorTranscriber : public OpTranscriber { }; struct FillConstantTranscriber : public OpTranscriber { - ir::Operation* operator()(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - ir::Program* program) override { + pir::Operation* operator()(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + pir::Program* program) override { bool has_mutable_attribute = op_desc.HasInput("ShapeTensor", true) && op_desc.Input("ShapeTensor", true).size() > 0; has_mutable_attribute |= op_desc.HasInput("ShapeTensorList", true) && @@ -1310,12 +1320,13 @@ struct FillConstantTranscriber : public OpTranscriber { } }; -ir::OpResult TranslateNumClassesForOneHot(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - const std::string& normalized_op_name, - const OpInputInfo& input_info, - ir::Program* program) { +pir::OpResult TranslateNumClassesForOneHot( + pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + const std::string& normalized_op_name, + const OpInputInfo& input_info, + pir::Program* program) { const std::string legacy_attr_name = "depth"; const std::string legacy_tensor_name = "depth_tensor"; std::vector legacy_vars; @@ -1343,9 +1354,9 @@ ir::OpResult TranslateNumClassesForOneHot(ir::IrContext* ctx, paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" << " name: " << legacy_attr_name << " " << legacy_attr.index(); - ir::Attribute new_attr = attribute_translator(legacy_attr); + pir::Attribute new_attr = attribute_translator(legacy_attr); - ir::Operation* defining_op = + pir::Operation* defining_op = InsertFullOperationForAttributeInput(ctx, program, new_attr); return defining_op->result(0); } @@ -1360,16 +1371,16 @@ struct OneHotTranscriber : public OpTranscriber { }; }; -ir::Attribute TranslateReduceAll(ir::IrContext* ctx, - const OpDesc& op_desc, - const OpAttributeInfo& attr_info) { +pir::Attribute TranslateReduceAll(pir::IrContext* ctx, + const OpDesc& op_desc, + const OpAttributeInfo& attr_info) { bool reduce_all = false; if (op_desc.HasAttr("reduce_all")) { reduce_all = paddle::get(op_desc.GetAttr("reduce_all")); } if (reduce_all) { - return ir::ArrayAttribute::get(ctx, std::vector{}); + return pir::ArrayAttribute::get(ctx, std::vector{}); } auto& attribute_translator = AttributeTranslator::instance(); @@ -1391,13 +1402,13 @@ struct ReduceOpTranscriber : public OpTranscriber { }; struct ElementwiseTranscriber : public OpTranscriber { - std::vector GenerateOperationInput( - ir::IrContext* ctx, + std::vector GenerateOperationInput( + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string& normalized_op_name, const OpInputInfoList& input_infos, - ir::Program* program) override { + pir::Program* program) override { int axis = paddle::get(op_desc.GetAttr("axis")); if (axis == -1) { @@ -1421,12 +1432,12 @@ struct ElementwiseTranscriber : public OpTranscriber { ctx, param_map, program, x_defining_info, x_name); x_defining_info = param_map->at(x_name); } - ir::OpResult x_value = x_defining_info.value; + pir::OpResult x_value = x_defining_info.value; IR_ENFORCE(x_value, "Expected op[%s]'s input %s is not null", op_desc.Type(), x_name); - ir::Type x_type = x_value.type(); + pir::Type x_type = x_value.type(); IR_ENFORCE(x_type.isa(), "Expected op[%s]'s input %s is DenseTensor but got %s", op_desc.Type(), @@ -1452,12 +1463,12 @@ struct ElementwiseTranscriber : public OpTranscriber { ctx, param_map, program, y_defining_info, y_name); y_defining_info = param_map->at(y_name); } - ir::OpResult y_value = y_defining_info.value; + pir::OpResult y_value = y_defining_info.value; IR_ENFORCE(y_value, "Expected op[%s]'s input %s is not null", op_desc.Type(), y_name); - ir::Type y_type = y_value.type(); + pir::Type y_type = y_value.type(); IR_ENFORCE(y_type.isa(), "Expected op[%s]'s input %s is DenseTensor but got %s", op_desc.Type(), @@ -1482,8 +1493,8 @@ struct ElementwiseTranscriber : public OpTranscriber { axis, append_size); - ir::Builder builder(ctx, program->block()); - ir::OpResult y_new; + pir::Builder builder(ctx, program->block()); + pir::OpResult y_new; if (std::find(y_shape.begin(), y_shape.end(), -1) == y_shape.end()) { std::vector y_new_shape(y_shape); for (int i = 0; i <= append_size; i++) { @@ -1500,8 +1511,8 @@ struct ElementwiseTranscriber : public OpTranscriber { std::vector(append_size, 1), phi::DataType::INT64, phi::CPUPlace()); - auto y_true_shape_op = builder.Build( - std::vector{shape_op.out(), append_shape_op.out()}); + auto y_true_shape_op = builder.Build( + std::vector{shape_op.out(), append_shape_op.out()}); auto concat_op = builder.Build(y_true_shape_op.out(), 0); auto y_new_shape = concat_op.out(); @@ -1513,12 +1524,14 @@ struct ElementwiseTranscriber : public OpTranscriber { }; struct GradAddOpTranscriber : public ElementwiseTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { - const std::string& target_op_name = "pd.add"; + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { + const std::string& target_op_name = "pd_op.add"; const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { IR_THROW( - "Op assign_value should have corresponding OpInfo pd.assign_value_"); + "Op assign_value should have corresponding OpInfo " + "pd_op.assign_value_"); } return op_info; @@ -1526,10 +1539,10 @@ struct GradAddOpTranscriber : public ElementwiseTranscriber { }; struct ElementwiseGradTranscriber : public OpTranscriber { - void RecordOpResultMapping(ir::IrContext* ctx, + void RecordOpResultMapping(pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, - ir::Operation* operation, + pir::Operation* operation, const OpOutputMapping& arg_to_idx) override { OpTranscriber::RecordOpResultMapping( ctx, param_map, op_desc, operation, arg_to_idx); @@ -1566,12 +1579,12 @@ struct ElementwiseGradTranscriber : public OpTranscriber { op_desc.Type(), y_name); auto y_defining_info = param_map->at(y_name); - ir::OpResult y_value = y_defining_info.value; + pir::OpResult y_value = y_defining_info.value; IR_ENFORCE(y_value, "Expected op[%s]'s input %s is not null", op_desc.Type(), y_name); - ir::Type y_type = y_value.type(); + pir::Type y_type = y_value.type(); IR_ENFORCE(y_type.isa(), "Expected op[%s]'s input %s is DenseTensor but got %s", op_desc.Type(), @@ -1581,8 +1594,8 @@ struct ElementwiseGradTranscriber : public OpTranscriber { y_type.dyn_cast(); std::vector y_shape = phi::vectorize(y_tensor_type.dims()); - ir::OpResult value = operation->result(idx_in_op); - ir::Builder builder(ctx, operation->GetParent()); + pir::OpResult value = operation->result(idx_in_op); + pir::Builder builder(ctx, operation->GetParent()); auto reshape_op = builder.Build(value, y_shape); (*param_map)[y_grad_var_name] = VariableDefiningInfo(reshape_op.out(), false, -1); @@ -1590,10 +1603,10 @@ struct ElementwiseGradTranscriber : public OpTranscriber { }; struct SetValueOpTranscriber : public OpTranscriber { - ir::OpResult GetAttributeAsInput(ir::IrContext* ctx, - ir::Program* program, - const OpDesc& op_desc, - const OpInputInfo& input_info) override { + pir::OpResult GetAttributeAsInput(pir::IrContext* ctx, + pir::Program* program, + const OpDesc& op_desc, + const OpInputInfo& input_info) override { auto& attribute_translator = AttributeTranslator::instance(); auto& op_normalizer = OpNameNormalizer::instance(); @@ -1608,23 +1621,24 @@ struct SetValueOpTranscriber : public OpTranscriber { framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" << " name: " << legacy_attr_name << " " << legacy_attr.index(); - ir::Attribute new_attr = + pir::Attribute new_attr = attribute_translator("paddle::dialect::IntArrayAttribute", legacy_attr); - ir::Operation* defining_op = + pir::Operation* defining_op = InsertFullArrayOperationForAttributeInput(ctx, program, new_attr); return defining_op->result(0); } }; struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { std::string target_op_name = dialect::SetValueWithTensorOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { IR_THROW( "Op set_value should have corresponding OpInfo " - "pd.set_value_with_tensor"); + "pd_op.set_value_with_tensor"); } return op_info; @@ -1635,12 +1649,12 @@ struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber { if (input_name != "values") { return nullptr; } - return [](ir::IrContext* ctx, + return [](pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string&, const OpInputInfo& info, - ir::Program* program) -> ir::OpResult { + pir::Program* program) -> pir::OpResult { std::vector legacy_input_vars; IR_ENFORCE(op_desc.HasInput("ValueTensor"), "[set_value] should have ValueTensor"); @@ -1662,13 +1676,14 @@ struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber { }; struct SetValueGradOpTranscriber : public SetValueWithTensorOpTranscriber { - ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc) override { + pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { std::string target_op_name = dialect::SetValueGradOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { IR_THROW( "Op set_value_grad should have corresponding OpInfo " - "pd.set_value_grad"); + "pd_op.set_value_grad"); } return op_info; @@ -1676,10 +1691,10 @@ struct SetValueGradOpTranscriber : public SetValueWithTensorOpTranscriber { }; struct LegacySetValueDispatcher : public OpTranscriber { - ir::Operation* operator()(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - ir::Program* program) override { + pir::Operation* operator()(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + pir::Program* program) override { std::vector legacy_input_vars; // if op has input with name "ValueTensor", then use that input as value @@ -1698,8 +1713,8 @@ struct LegacySetValueDispatcher : public OpTranscriber { }; OpTranslator::OpTranslator() { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); general_handler = OpTranscriber(); special_handlers["add_n"] = AddNOpTranscriber(); diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.h b/paddle/fluid/ir_adaptor/translator/op_translator.h index afc7566be12b3..2ae6643999b8d 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.h +++ b/paddle/fluid/ir_adaptor/translator/op_translator.h @@ -20,12 +20,12 @@ #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/var_desc.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace translator { @@ -41,7 +41,7 @@ struct OpTranscriber { using IdxInVector = size_t; using ResultIdx = std::tuple; using OpDesc = paddle::framework::OpDesc; - using OpOutputTypeList = std::vector; + using OpOutputTypeList = std::vector; using OpOutputMapping = std::unordered_map; using OpInputInfo = dialect::OpInputInfo; using OpInputInfoList = std::vector; @@ -49,51 +49,51 @@ struct OpTranscriber { using OpAttributeInfoList = std::vector; using OpOutputInfo = dialect::OpOutputInfo; using OpOutputInfoList = std::vector; - using InputHandlerFn = std::function; - using AttributeHandlerFn = std::function; + using InputHandlerFn = std::function; + using AttributeHandlerFn = std::function; public: - virtual ir::Operation* operator()(ir::IrContext* ctx, - TranslationContext* param_map, - const OpDesc& op_desc, - ir::Program* program); + virtual pir::Operation* operator()(pir::IrContext* ctx, + TranslationContext* param_map, + const OpDesc& op_desc, + pir::Program* program); public: - virtual ir::OpInfo LoopkUpOpInfo(ir::IrContext* ctx, const OpDesc& op_desc); - virtual std::vector GenerateOperationInput( - ir::IrContext* ctx, + virtual pir::OpInfo LoopkUpOpInfo(pir::IrContext* ctx, const OpDesc& op_desc); + virtual std::vector GenerateOperationInput( + pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const std::string& normalized_op_name, const OpInputInfoList& input_infos, - ir::Program* program); + pir::Program* program); virtual std::tuple GenerateOperationOutput( - ir::IrContext* ctx, + pir::IrContext* ctx, const OpDesc& op_desc, const OpOutputInfoList& output_infos); - virtual void HandleNonexistentAttribute(ir::IrContext*, - ir::AttributeMap* attribute_map, + virtual void HandleNonexistentAttribute(pir::IrContext*, + pir::AttributeMap* attribute_map, const OpAttributeInfo& info); - virtual ir::AttributeMap TranslateOpAttribute( - ir::IrContext* ctx, + virtual pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, const std::string& normalized_op_name, const OpAttributeInfoList& op_attr_infos, const OpDesc& op_desc); - virtual ir::OpResult GetAttributeAsInput(ir::IrContext* ctx, - ir::Program* program, - const OpDesc& op_desc, - const OpInputInfo& input_info); + virtual pir::OpResult GetAttributeAsInput(pir::IrContext* ctx, + pir::Program* program, + const OpDesc& op_desc, + const OpInputInfo& input_info); - virtual void RecordOpResultMapping(ir::IrContext* ctx, + virtual void RecordOpResultMapping(pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, - ir::Operation* operation, + pir::Operation* operation, const OpOutputMapping& arg_to_idx); public: @@ -105,11 +105,11 @@ struct OpTranscriber { const std::string& input_name) { return nullptr; } - virtual void InsertSliceOperationForInput(ir::IrContext* ctx, + virtual void InsertSliceOperationForInput(pir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, const OpInputInfoList& input_infos, - ir::Program* program); + pir::Program* program); }; class OpTranslator { @@ -118,8 +118,8 @@ class OpTranslator { using OpDesc = paddle::framework::OpDesc; using BlockDesc = paddle::framework::BlockDesc; using VarDesc = paddle::framework::VarDesc; - using OpTranslateFn = std::function; + using OpTranslateFn = std::function; private: OpTranslator(); // Disallow instantiation outside of the class. diff --git a/paddle/fluid/ir_adaptor/translator/program_translator.cc b/paddle/fluid/ir_adaptor/translator/program_translator.cc index 9065554781265..40ca1e77a1eab 100644 --- a/paddle/fluid/ir_adaptor/translator/program_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/program_translator.cc @@ -23,15 +23,15 @@ #include "paddle/fluid/ir_adaptor/translator/op_translator.h" #include "paddle/fluid/ir_adaptor/translator/type_translator.h" #include "paddle/fluid/ir_adaptor/translator/utils.h" -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace translator { @@ -46,9 +46,9 @@ const std::unordered_set ProgramTranslator::no_cast_var_names = { }; ProgramTranslator::ProgramTranslator(const ProgramDesc* legacy_program, - ir::Program* program) + pir::Program* program) : legacy_program_(legacy_program), program_(program) { - ctx_ = ir::IrContext::Instance(); + ctx_ = pir::IrContext::Instance(); } void ProgramTranslator::Translate() { @@ -84,31 +84,31 @@ void ProgramTranslator::Translate() { } } -inline ir::Operation* InsertGetParamaterOp(ir::IrContext* ctx, - const VarDesc* var) { +inline pir::Operation* InsertGetParamaterOp(pir::IrContext* ctx, + const VarDesc* var) { auto& type_translator = TypeTranslator::instance(); - std::string get_parameter_op_name(ir::GetParameterOp::name()); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(get_parameter_op_name); - std::unordered_map op_attribute_map = { - {"parameter_name", ir::StrAttribute::get(ctx, var->Name())}, + std::string get_parameter_op_name(pir::GetParameterOp::name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(get_parameter_op_name); + std::unordered_map op_attribute_map = { + {"parameter_name", pir::StrAttribute::get(ctx, var->Name())}, }; - ir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); - ir::Operation* operation = ir::Operation::Create( + pir::Type translated_var_type = type_translator[var->GetType()](ctx, *var); + pir::Operation* operation = pir::Operation::Create( {}, op_attribute_map, {translated_var_type}, op_info); return operation; } -inline ir::Operation* InsertSetParamaterOp(ir::IrContext* ctx, - ir::OpResult defining_op_result, - const VarDesc* var) { - std::string set_parameter_op_name(ir::SetParameterOp::name()); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(set_parameter_op_name); - std::unordered_map op_attribute_map = { - {"parameter_name", ir::StrAttribute::get(ctx, var->Name())}, +inline pir::Operation* InsertSetParamaterOp(pir::IrContext* ctx, + pir::OpResult defining_op_result, + const VarDesc* var) { + std::string set_parameter_op_name(pir::SetParameterOp::name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(set_parameter_op_name); + std::unordered_map op_attribute_map = { + {"parameter_name", pir::StrAttribute::get(ctx, var->Name())}, }; - ir::Operation* operation = ir::Operation::Create( + pir::Operation* operation = pir::Operation::Create( {defining_op_result}, op_attribute_map, {}, op_info); return operation; } @@ -149,7 +149,7 @@ void ProgramTranslator::GetParameterForSingleBlock(const BlockDesc& block) { var_desc, phi::errors::PreconditionNotMet( "VarDesc of [%s] can not be nullptr", var_name)); - ir::Operation* op = InsertGetParamaterOp(ctx_, var_desc); + pir::Operation* op = InsertGetParamaterOp(ctx_, var_desc); program_->block()->push_back(op); param_map_[var_name] = VariableDefiningInfo(op->result(0)); VLOG(10) << "[op translated][get parameter]" << var_name; @@ -178,7 +178,7 @@ void ProgramTranslator::InsertOperationToSingleBlock(const BlockDesc& block) { continue; } } - ir::Operation* operation = fn(ctx_, ¶m_map_, *op, program_); + pir::Operation* operation = fn(ctx_, ¶m_map_, *op, program_); VLOG(10) << "[op translated][special]" << operation; } } @@ -203,7 +203,7 @@ void ProgramTranslator::SetParameterFromSingleBlock(const BlockDesc& block) { need_set_parameter_op &= (param_map_.count(var_name) != 0); need_set_parameter_op &= (!set_input_var_names.count(var_name)); if (need_set_parameter_op) { - ir::OpResult defining_op_result = param_map_[var_name].value; + pir::OpResult defining_op_result = param_map_[var_name].value; if (!defining_op_result) { continue; } @@ -214,11 +214,11 @@ void ProgramTranslator::SetParameterFromSingleBlock(const BlockDesc& block) { defining_op_result = param_map_.at(var_name).value; } - ir::Operation* op = InsertSetParamaterOp( + pir::Operation* op = InsertSetParamaterOp( ctx_, defining_op_result, parameter_name_mappings_[var_name]); - ir::Block* block = program_->block(); - ir::Block::iterator insert_pos = std::find( + pir::Block* block = program_->block(); + pir::Block::iterator insert_pos = std::find( block->begin(), block->end(), defining_op_result.owner()); IR_ENFORCE( @@ -249,7 +249,7 @@ void ProgramTranslator::SetStopGradientAttributeForAllValue( if (var == nullptr) { continue; } - ir::OpResult value = value_info.value; + pir::OpResult value = value_info.value; if (!value) { PADDLE_THROW(phi::errors::PreconditionNotMet( "Value of [%s] can not ber None", var_name)); @@ -261,19 +261,19 @@ void ProgramTranslator::SetStopGradientAttributeForAllValue( "Defining operator of [%s] can not be nullptr", var_name)); VLOG(8) << "[op translated][stop gradient]" << var_name << " from: " << defining_op->name(); - std::vector stop_gradients; + std::vector stop_gradients; if (defining_op->HasAttribute(kAttrStopGradients)) { stop_gradients = defining_op->attribute(kAttrStopGradients) - .dyn_cast() + .dyn_cast() .AsVector(); } else { - stop_gradients = std::vector( - defining_op->num_results(), ir::BoolAttribute::get(ctx_, false)); + stop_gradients = std::vector( + defining_op->num_results(), pir::BoolAttribute::get(ctx_, false)); } stop_gradients[value.GetResultIndex()] = - ir::BoolAttribute::get(ctx_, var->StopGradient()); + pir::BoolAttribute::get(ctx_, var->StopGradient()); defining_op->set_attribute(kAttrStopGradients, - ir::ArrayAttribute::get(ctx_, stop_gradients)); + pir::ArrayAttribute::get(ctx_, stop_gradients)); } } @@ -288,7 +288,7 @@ void ProgramTranslator::SetIsPersisableAttributeForAllValue( if (var == nullptr) { continue; } - ir::OpResult value = value_info.value; + pir::OpResult value = value_info.value; if (!value) { PADDLE_THROW(phi::errors::PreconditionNotMet( "Value of [%s] can not ber None", var_name)); @@ -300,19 +300,19 @@ void ProgramTranslator::SetIsPersisableAttributeForAllValue( "Defining operator of [%s] can not be nullptr", var_name)); VLOG(8) << "[op translated][is persisable]" << var_name << " from: " << defining_op->name(); - std::vector is_persisable; + std::vector is_persisable; if (defining_op->HasAttribute(kAttrIsPersisable)) { is_persisable = defining_op->attribute(kAttrIsPersisable) - .dyn_cast() + .dyn_cast() .AsVector(); } else { - is_persisable = std::vector( - defining_op->num_results(), ir::BoolAttribute::get(ctx_, false)); + is_persisable = std::vector( + defining_op->num_results(), pir::BoolAttribute::get(ctx_, false)); } is_persisable[value.GetResultIndex()] = - ir::BoolAttribute::get(ctx_, var->Persistable()); + pir::BoolAttribute::get(ctx_, var->Persistable()); defining_op->set_attribute(kAttrIsPersisable, - ir::ArrayAttribute::get(ctx_, is_persisable)); + pir::ArrayAttribute::get(ctx_, is_persisable)); } } diff --git a/paddle/fluid/ir_adaptor/translator/program_translator.h b/paddle/fluid/ir_adaptor/translator/program_translator.h index 88901376ae3cb..a0c7495f21364 100644 --- a/paddle/fluid/ir_adaptor/translator/program_translator.h +++ b/paddle/fluid/ir_adaptor/translator/program_translator.h @@ -20,15 +20,15 @@ #include #include "paddle/fluid/framework/program_desc.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace translator { struct VariableDefiningInfo { - VariableDefiningInfo(ir::OpResult value, + VariableDefiningInfo(pir::OpResult value, bool generated_by_vector = false, int idx_in_vector = -1) : value(value), @@ -36,7 +36,7 @@ struct VariableDefiningInfo { idx_in_vector(idx_in_vector) {} VariableDefiningInfo() {} - ir::OpResult value; + pir::OpResult value; bool generated_by_vector = false; // true if target variable is generated by Vector @@ -54,14 +54,14 @@ class ProgramTranslator { public: explicit ProgramTranslator(const ProgramDesc* legacy_program, - ir::Program* program); + pir::Program* program); void Translate(); private: const ProgramDesc* legacy_program_; // not owned - ir::Program* program_; // not owned - ir::IrContext* ctx_; // not owned + pir::Program* program_; // not owned + pir::IrContext* ctx_; // not owned TranslationContext param_map_; std::unordered_map parameter_name_mappings_; diff --git a/paddle/fluid/ir_adaptor/translator/translate.cc b/paddle/fluid/ir_adaptor/translator/translate.cc index 87bef41641a5f..0f98e557743fc 100644 --- a/paddle/fluid/ir_adaptor/translator/translate.cc +++ b/paddle/fluid/ir_adaptor/translator/translate.cc @@ -17,20 +17,20 @@ #include #include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/program.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/program.h" namespace paddle { using LegacyProgramDesc = ::paddle::framework::ProgramDesc; -using Program = ::ir::Program; +using Program = pir::Program; std::unique_ptr TranslateLegacyProgramToProgram( const LegacyProgramDesc& legacy_program) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); auto program = std::make_unique(ctx); translator::ProgramTranslator program_translator(&legacy_program, program.get()); diff --git a/paddle/fluid/ir_adaptor/translator/translate.h b/paddle/fluid/ir_adaptor/translator/translate.h index 8f604a47761fc..47ad12003f807 100644 --- a/paddle/fluid/ir_adaptor/translator/translate.h +++ b/paddle/fluid/ir_adaptor/translator/translate.h @@ -17,12 +17,12 @@ #include #include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/ir/core/program.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/pir/core/program.h" namespace paddle { -std::unique_ptr<::ir::Program> TranslateLegacyProgramToProgram( +std::unique_ptr<::pir::Program> TranslateLegacyProgramToProgram( const ::paddle::framework::ProgramDesc& legacy_program); } // namespace paddle diff --git a/paddle/fluid/ir_adaptor/translator/type_translator.cc b/paddle/fluid/ir_adaptor/translator/type_translator.cc index 5c3cbdbc240ce..ef1dbf543c671 100644 --- a/paddle/fluid/ir_adaptor/translator/type_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/type_translator.cc @@ -15,9 +15,9 @@ #include "paddle/fluid/ir_adaptor/translator/type_translator.h" #include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type_storage.h" -#include "paddle/ir/core/builtin_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/type_storage.h" +#include "paddle/pir/core/builtin_type.h" namespace paddle { namespace translator { @@ -34,59 +34,59 @@ using SelectedRowsTypeStorage = paddle::dialect::SelectedRowsTypeStorage; TypeTranslator::TypeTranslator() { handlers = { {VarType::BOOL, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::BoolType::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::BoolType::get(ctx); }}, {VarType::UINT8, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::UInt8Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::UInt8Type::get(ctx); }}, {VarType::INT8, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Int8Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Int8Type::get(ctx); }}, {VarType::INT16, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Int16Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Int16Type::get(ctx); }}, {VarType::INT32, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Int32Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Int32Type::get(ctx); }}, {VarType::INT64, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Int64Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Int64Type::get(ctx); }}, {VarType::FP16, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Float16Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Float16Type::get(ctx); }}, {VarType::FP32, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Float32Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Float32Type::get(ctx); }}, {VarType::FP64, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Float64Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Float64Type::get(ctx); }}, {VarType::BF16, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::BFloat16Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::BFloat16Type::get(ctx); }}, {VarType::COMPLEX64, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Complex64Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Complex64Type::get(ctx); }}, {VarType::COMPLEX128, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { - return ir::Complex128Type::get(ctx); + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { + return pir::Complex128Type::get(ctx); }}, {VarType::LOD_TENSOR, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { VLOG(10) << "[vartype translating]" << "[" << var_desc.Name() << "] from LOD_TENSOR"; - ir::Type dtype = + pir::Type dtype = this->operator[](var_desc.GetDataType())(ctx, var_desc); DenseTensorTypeStorage::Dim dim = phi::make_ddim(var_desc.GetShape()); DenseTensorTypeStorage::DataLayout layout = @@ -96,18 +96,18 @@ TypeTranslator::TypeTranslator() { return DenseTensorType::get(ctx, dtype, dim, layout, lod, offset); }}, {VarType::LOD_TENSOR_ARRAY, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { VLOG(10) << "[vartype translating]" << "[" << var_desc.Name() << "] from LOD_TENSOR_ARRAY"; - return ir::VectorType::get(ctx, std::vector{}); + return pir::VectorType::get(ctx, std::vector{}); }}, {VarType::SELECTED_ROWS, - [&](ir::IrContext* ctx, const VarDesc& var_desc) -> ir::Type { + [&](pir::IrContext* ctx, const VarDesc& var_desc) -> pir::Type { VLOG(10) << "[vartype translating]" << "[" << var_desc.Name() << "] from SELECTED_ROWS"; - ir::Type dtype = + pir::Type dtype = this->operator[](var_desc.GetDataType())(ctx, var_desc); SelectedRowsTypeStorage::Dim dim = phi::make_ddim(var_desc.GetShape()); @@ -115,7 +115,7 @@ TypeTranslator::TypeTranslator() { SelectedRowsTypeStorage::DataLayout::UNDEFINED; SelectedRowsTypeStorage::LoD lod = {}; size_t offset = 0; - ir::Type SelectedRows = + pir::Type SelectedRows = SelectedRowsType::get(ctx, dtype, dim, layout, lod, offset); return SelectedRows; }}, diff --git a/paddle/fluid/ir_adaptor/translator/type_translator.h b/paddle/fluid/ir_adaptor/translator/type_translator.h index d93be9a9db371..255795c92d807 100644 --- a/paddle/fluid/ir_adaptor/translator/type_translator.h +++ b/paddle/fluid/ir_adaptor/translator/type_translator.h @@ -20,15 +20,15 @@ #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" namespace paddle { namespace translator { using TypeTranslateFn = - std::function; + std::function; class TypeTranslator { public: diff --git a/paddle/fluid/ir_adaptor/translator/utils.cc b/paddle/fluid/ir_adaptor/translator/utils.cc index 38f3f5fd8c90b..4a591eeedf083 100644 --- a/paddle/fluid/ir_adaptor/translator/utils.cc +++ b/paddle/fluid/ir_adaptor/translator/utils.cc @@ -16,43 +16,43 @@ #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" #include "paddle/fluid/ir_adaptor/translator/op_translator.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/utils.h" namespace paddle { namespace translator { -ir::Operation* InsertSliceOperationForTarget( - ir::IrContext* ctx, +pir::Operation* InsertSliceOperationForTarget( + pir::IrContext* ctx, TranslationContext* param_map, - ir::Program* program, + pir::Program* program, const VariableDefiningInfo& defining_info, const std::string& arg_name) { - std::string slice_op_name(ir::SliceOp::name()); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(slice_op_name); - std::unordered_map op_attribute_map = { - {"index", ir::Int32Attribute::get(ctx, defining_info.idx_in_vector)}, + std::string slice_op_name(pir::SliceOp::name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(slice_op_name); + std::unordered_map op_attribute_map = { + {"index", pir::Int32Attribute::get(ctx, defining_info.idx_in_vector)}, }; - ir::VectorType src_vec_type = - defining_info.value.type().dyn_cast(); - ir::Operation* operation = - ir::Operation::Create({defining_info.value}, - op_attribute_map, - {src_vec_type[defining_info.idx_in_vector]}, - op_info); + pir::VectorType src_vec_type = + defining_info.value.type().dyn_cast(); + pir::Operation* operation = + pir::Operation::Create({defining_info.value}, + op_attribute_map, + {src_vec_type[defining_info.idx_in_vector]}, + op_info); program->block()->push_back(operation); - ir::OpResult target_op_result = operation->result(0); + pir::OpResult target_op_result = operation->result(0); (*param_map)[arg_name] = VariableDefiningInfo(target_op_result); return operation; } std::ostream& operator<<(std::ostream& os, const std::vector& vec_str) { - ir::PrintInterleave( + pir::PrintInterleave( vec_str.begin(), vec_str.end(), [&os](std::string s) { os << s; }, @@ -61,7 +61,7 @@ std::ostream& operator<<(std::ostream& os, } std::vector CheckUnregisteredOperationInBlock( - ir::IrContext* ctx, const framework::BlockDesc& block) { + pir::IrContext* ctx, const framework::BlockDesc& block) { auto& op_translator = OpTranslator::instance(); std::vector unregistered_ops; for (auto op : block.AllOps()) { @@ -71,7 +71,7 @@ std::vector CheckUnregisteredOperationInBlock( OpTranscriber general_handler; try { general_handler.LoopkUpOpInfo(ctx, *op); - } catch (ir::IrNotMetException& e) { + } catch (pir::IrNotMetException& e) { unregistered_ops.push_back(op->Type()); } } @@ -79,8 +79,8 @@ std::vector CheckUnregisteredOperationInBlock( } std::vector CheckUnregisteredOperation( - ir::IrContext* ctx, const framework::ProgramDesc& legacy_program) { - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx, const framework::ProgramDesc& legacy_program) { + ctx->GetOrRegisterDialect(); std::vector unregistered_ops; for (size_t block_idx = 0; block_idx < legacy_program.Size(); block_idx++) { diff --git a/paddle/fluid/ir_adaptor/translator/utils.h b/paddle/fluid/ir_adaptor/translator/utils.h index 20e462b5bbde1..63bbde06d2ec0 100644 --- a/paddle/fluid/ir_adaptor/translator/utils.h +++ b/paddle/fluid/ir_adaptor/translator/utils.h @@ -19,17 +19,17 @@ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/ir_adaptor/translator/program_translator.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/program.h" namespace paddle { namespace translator { -ir::Operation* InsertSliceOperationForTarget( - ir::IrContext* ctx, +pir::Operation* InsertSliceOperationForTarget( + pir::IrContext* ctx, TranslationContext* param_map, - ir::Program* program, + pir::Program* program, const VariableDefiningInfo& defining_info, const std::string& arg_name); @@ -37,7 +37,7 @@ std::ostream& operator<<(std::ostream& os, const std::vector& vec_str); std::vector CheckUnregisteredOperation( - ir::IrContext* ctx, const framework::ProgramDesc& legacy_program); + pir::IrContext* ctx, const framework::ProgramDesc& legacy_program); } // namespace translator } // namespace paddle diff --git a/paddle/fluid/jit/engine/interpreter_engine.cc b/paddle/fluid/jit/engine/interpreter_engine.cc index 23cb3ee8b5a20..9c5f7b20d9fd6 100644 --- a/paddle/fluid/jit/engine/interpreter_engine.cc +++ b/paddle/fluid/jit/engine/interpreter_engine.cc @@ -20,9 +20,9 @@ #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/new_executor/interpretercore.h" #include "paddle/fluid/framework/program_desc.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace jit { diff --git a/paddle/fluid/operators/cinn/cinn_launch_context.cc b/paddle/fluid/operators/cinn/cinn_launch_context.cc index fc23dbf88064c..0700028807fc0 100644 --- a/paddle/fluid/operators/cinn/cinn_launch_context.cc +++ b/paddle/fluid/operators/cinn/cinn_launch_context.cc @@ -42,9 +42,9 @@ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/string/printf.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/core/ddim.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" #include "paddle/utils/string/string_helper.h" namespace paddle { diff --git a/paddle/fluid/operators/cinn/cinn_launch_op.h b/paddle/fluid/operators/cinn/cinn_launch_op.h index 2913da9bc5c39..02e70c549cfc2 100644 --- a/paddle/fluid/operators/cinn/cinn_launch_op.h +++ b/paddle/fluid/operators/cinn/cinn_launch_op.h @@ -29,9 +29,9 @@ #include "paddle/fluid/operators/cinn/cinn_launch_context.h" #include "paddle/fluid/operators/cinn/cinn_op_helper.h" #include "paddle/fluid/platform/profiler.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/core/flags.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" PHI_DECLARE_bool(enable_pe_launch_cinn); PHI_DECLARE_bool(enable_interpretercore_launch_cinn); diff --git a/paddle/fluid/ir/CMakeLists.txt b/paddle/fluid/pir/CMakeLists.txt similarity index 100% rename from paddle/fluid/ir/CMakeLists.txt rename to paddle/fluid/pir/CMakeLists.txt diff --git a/paddle/fluid/pir/dialect/CMakeLists.txt b/paddle/fluid/pir/dialect/CMakeLists.txt new file mode 100644 index 0000000000000..17a73237c5fdb --- /dev/null +++ b/paddle/fluid/pir/dialect/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(operator) +add_subdirectory(kernel) diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/CMakeLists.txt b/paddle/fluid/pir/dialect/kernel/CMakeLists.txt similarity index 100% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/CMakeLists.txt rename to paddle/fluid/pir/dialect/kernel/CMakeLists.txt diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/CMakeLists.txt b/paddle/fluid/pir/dialect/kernel/ir/CMakeLists.txt similarity index 80% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/CMakeLists.txt rename to paddle/fluid/pir/dialect/kernel/ir/CMakeLists.txt index af5e5c4fc9016..bdfdb75410524 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/CMakeLists.txt +++ b/paddle/fluid/pir/dialect/kernel/ir/CMakeLists.txt @@ -2,4 +2,4 @@ file(GLOB PADDLE_KERNEL_DIALECT_SRCS "*.cc") cc_library( pd_kernel_dialect SRCS ${PADDLE_KERNEL_DIALECT_SRCS} - DEPS pd_dialect_core) + DEPS pd_op_dialect_core) diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute_storage.h b/paddle/fluid/pir/dialect/kernel/ir/attribute_storage.h similarity index 88% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute_storage.h rename to paddle/fluid/pir/dialect/kernel/ir/attribute_storage.h index 18312b88b8ae2..1c8b4f9150b25 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute_storage.h +++ b/paddle/fluid/pir/dialect/kernel/ir/attribute_storage.h @@ -14,16 +14,16 @@ #pragma once -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/utils.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/kernel_factory.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/utils.h" namespace paddle { namespace dialect { -struct KernelAttributeStorage : public ir::AttributeStorage { +struct KernelAttributeStorage : public pir::AttributeStorage { using ParamKey = phi::KernelKey; explicit KernelAttributeStorage(const ParamKey &key) { kernel_key_ = key; } diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.cc b/paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.cc similarity index 89% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.cc rename to paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.cc index 43ed52ffc6701..f8c23f993ca2d 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.cc +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.cc @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::KernelAttribute) diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h b/paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h similarity index 86% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h rename to paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h index fa17b823f0278..7b6bc2336813a 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h @@ -14,14 +14,14 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute_storage.h" -#include "paddle/ir/core/attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/attribute_storage.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/attribute.h" namespace paddle { namespace dialect { -class KernelAttribute : public ir::Attribute { +class KernelAttribute : public pir::Attribute { public: using Attribute::Attribute; diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.cc b/paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.cc similarity index 76% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.cc rename to paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.cc index c2f4dfefb4d2b..592319dcfd36e 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.cc +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.cc @@ -12,26 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_op.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h" #include "paddle/fluid/platform/init_phi.h" -#include "paddle/ir/core/ir_printer.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/ddim.h" +#include "paddle/pir/core/ir_printer.h" REGISTER_FILE_SYMBOLS(kernel_dialect); namespace paddle { namespace dialect { -PaddleKernelDialect::PaddleKernelDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { +KernelDialect::KernelDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } -void PaddleKernelDialect::initialize() { +void KernelDialect::initialize() { RegisterTypes(); RegisterTypes(); @@ -39,7 +39,7 @@ void PaddleKernelDialect::initialize() { RegisterAttributes(); } -void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) const { +void KernelDialect::PrintType(pir::Type type, std::ostream &os) const { if (type.isa()) { AllocatedDenseTensorType tensor_type = type.dyn_cast(); @@ -67,16 +67,16 @@ void PaddleKernelDialect::PrintType(ir::Type type, std::ostream &os) const { } } -void PaddleKernelDialect::PrintAttribute(ir::Attribute attr, - std::ostream &os) const { +void KernelDialect::PrintAttribute(pir::Attribute attr, + std::ostream &os) const { phi::KernelKey kernel = attr.dyn_cast().data(); os << ""; } -void PaddleKernelDialect::PrintOperation(ir::Operation *op, - ir::IrPrinter &printer) const { +void KernelDialect::PrintOperation(pir::Operation *op, + pir::IrPrinter &printer) const { if (op->dyn_cast() || op->dyn_cast()) { auto &os = printer.os; printer.PrintOpResult(op); @@ -86,7 +86,7 @@ void PaddleKernelDialect::PrintOperation(ir::Operation *op, if (op->attributes().count("is_inplace") != 0 && op->attributes() .at("is_inplace") - .dyn_cast() + .dyn_cast() .data()) { kernel_name = kernel_name + "_"; } @@ -97,7 +97,7 @@ void PaddleKernelDialect::PrintOperation(ir::Operation *op, if (op->attributes().count("is_inplace") != 0 && op->attributes() .at("is_inplace") - .dyn_cast() + .dyn_cast() .data()) { kernel_name = kernel_name + "_"; } @@ -117,4 +117,4 @@ void PaddleKernelDialect::PrintOperation(ir::Operation *op, } // namespace dialect } // namespace paddle -IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleKernelDialect) +IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::KernelDialect) diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h b/paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h similarity index 64% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h rename to paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h index 8099e1d1da093..d2fbcadaf8cf2 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h @@ -14,23 +14,23 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" namespace paddle { namespace dialect { -class PaddleKernelDialect : public ir::Dialect { +class KernelDialect : public pir::Dialect { public: - explicit PaddleKernelDialect(ir::IrContext* context); + explicit KernelDialect(pir::IrContext* context); static const char* name() { return "pd_kernel"; } - void PrintType(ir::Type type, std::ostream& os) const override; + void PrintType(pir::Type type, std::ostream& os) const override; - void PrintAttribute(ir::Attribute attr, std::ostream& os) const override; + void PrintAttribute(pir::Attribute attr, std::ostream& os) const override; - void PrintOperation(ir::Operation* op, - ir::IrPrinter& printer) const override; // NOLINT + void PrintOperation(pir::Operation* op, + pir::IrPrinter& printer) const override; // NOLINT private: void initialize(); @@ -39,4 +39,4 @@ class PaddleKernelDialect : public ir::Dialect { } // namespace dialect } // namespace paddle -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleKernelDialect) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::KernelDialect) diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.cc b/paddle/fluid/pir/dialect/kernel/ir/kernel_op.cc similarity index 78% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.cc rename to paddle/fluid/pir/dialect/kernel/ir/kernel_op.cc index 4a934505aad55..62c1129f84620 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.cc +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_op.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" -#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_op.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/builtin_attribute.h" namespace paddle { namespace dialect { @@ -31,12 +31,12 @@ void PhiKernelOp::Verify() { auto& attributes = this->attributes(); PADDLE_ENFORCE(attributes.count("op_name") > 0 && - attributes.at("op_name").isa(), + attributes.at("op_name").isa(), phi::errors::PreconditionNotMet( "Type of attribute: op_name is not right.")); PADDLE_ENFORCE(attributes.count("kernel_name") > 0 && - attributes.at("kernel_name").isa(), + attributes.at("kernel_name").isa(), phi::errors::PreconditionNotMet( "Type of attribute: kernel_name is not right.")); @@ -47,10 +47,13 @@ void PhiKernelOp::Verify() { } std::string PhiKernelOp::op_name() { - return attributes().at("op_name").dyn_cast().AsString(); + return attributes().at("op_name").dyn_cast().AsString(); } std::string PhiKernelOp::kernel_name() { - return attributes().at("kernel_name").dyn_cast().AsString(); + return attributes() + .at("kernel_name") + .dyn_cast() + .AsString(); } phi::KernelKey PhiKernelOp::kernel_key() { return attributes().at("kernel_key").dyn_cast().data(); @@ -67,12 +70,12 @@ void LegacyKernelOp::Verify() { auto& attributes = this->attributes(); PADDLE_ENFORCE(attributes.count("op_name") > 0 && - attributes.at("op_name").isa(), + attributes.at("op_name").isa(), phi::errors::PreconditionNotMet( "Type of attribute: op_name is not right.")); PADDLE_ENFORCE(attributes.count("kernel_name") > 0 && - attributes.at("kernel_name").isa(), + attributes.at("kernel_name").isa(), phi::errors::PreconditionNotMet( "Type of attribute: kernel_name is not right.")); @@ -83,10 +86,13 @@ void LegacyKernelOp::Verify() { } std::string LegacyKernelOp::op_name() { - return attributes().at("op_name").dyn_cast().AsString(); + return attributes().at("op_name").dyn_cast().AsString(); } std::string LegacyKernelOp::kernel_name() { - return attributes().at("kernel_name").dyn_cast().AsString(); + return attributes() + .at("kernel_name") + .dyn_cast() + .AsString(); } phi::KernelKey LegacyKernelOp::kernel_key() { return attributes().at("kernel_key").dyn_cast().data(); diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h b/paddle/fluid/pir/dialect/kernel/ir/kernel_op.h similarity index 89% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h rename to paddle/fluid/pir/dialect/kernel/ir/kernel_op.h index 0a574bc60b218..8a18959665e0c 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_op.h @@ -14,13 +14,13 @@ #pragma once -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/op_base.h" #include "paddle/phi/core/kernel_factory.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/op_base.h" namespace paddle { namespace dialect { -class PhiKernelOp : public ir::Op { +class PhiKernelOp : public pir::Op { public: using Op::Op; static const char *name() { return "pd_kernel.phi_kernel"; } @@ -32,7 +32,7 @@ class PhiKernelOp : public ir::Op { void Verify(); }; -class LegacyKernelOp : public ir::Op { +class LegacyKernelOp : public pir::Op { public: using Op::Op; static const char *name() { return "pd_kernel.legacy_kernel"; } diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.cc b/paddle/fluid/pir/dialect/kernel/ir/kernel_type.cc similarity index 91% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.cc rename to paddle/fluid/pir/dialect/kernel/ir/kernel_type.cc index 9740f1296a51b..60a722f13dab5 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.cc +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_type.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h" namespace paddle { namespace dialect { @@ -21,7 +21,7 @@ const phi::Place& AllocatedDenseTensorType::place() const { return storage()->place_; } -const ir::Type& AllocatedDenseTensorType::dtype() const { +const pir::Type& AllocatedDenseTensorType::dtype() const { return storage()->dense_tensor_type_.dtype(); } @@ -45,7 +45,7 @@ const phi::Place& AllocatedSelectedRowsType::place() const { return storage()->place_; } -const ir::Type& AllocatedSelectedRowsType::dtype() const { +const pir::Type& AllocatedSelectedRowsType::dtype() const { return storage()->selected_rows_type_.dtype(); } diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h b/paddle/fluid/pir/dialect/kernel/ir/kernel_type.h similarity index 74% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h rename to paddle/fluid/pir/dialect/kernel/ir/kernel_type.h index b00f2e5320dde..e2d851020c197 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h +++ b/paddle/fluid/pir/dialect/kernel/ir/kernel_type.h @@ -14,30 +14,30 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type_storage.h" -#include "paddle/ir/core/type.h" +#include "paddle/fluid/pir/dialect/kernel/ir/type_storage.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/pir/core/type.h" namespace paddle { namespace dialect { -class AllocatedDenseTensorType : public ir::Type { +class AllocatedDenseTensorType : public pir::Type { public: using Type::Type; DECLARE_TYPE_UTILITY_FUNCTOR(AllocatedDenseTensorType, AllocatedDenseTensorTypeStorage); - static AllocatedDenseTensorType get(ir::IrContext *ctx, + static AllocatedDenseTensorType get(pir::IrContext *ctx, const phi::Place &place, dialect::DenseTensorType type) { - return ir::TypeManager::template get( + return pir::TypeManager::template get( ctx, place, type); } - static AllocatedDenseTensorType get(ir::IrContext *ctx, + static AllocatedDenseTensorType get(pir::IrContext *ctx, const phi::Place &place, - const ir::Type &dtype, + const pir::Type &dtype, const phi::DDim &dims, const phi::DataLayout &layout, const phi::LoD &lod, @@ -45,13 +45,13 @@ class AllocatedDenseTensorType : public ir::Type { dialect::DenseTensorType dense_tensor_type = dialect::DenseTensorType::get(ctx, dtype, dims, layout, lod, offset); - return ir::TypeManager::template get( + return pir::TypeManager::template get( ctx, place, dense_tensor_type); } const phi::Place &place() const; - const ir::Type &dtype() const; + const pir::Type &dtype() const; const phi::DDim &dims() const; @@ -62,23 +62,23 @@ class AllocatedDenseTensorType : public ir::Type { const size_t &offset() const; }; -class AllocatedSelectedRowsType : public ir::Type { +class AllocatedSelectedRowsType : public pir::Type { public: using Type::Type; DECLARE_TYPE_UTILITY_FUNCTOR(AllocatedSelectedRowsType, AllocatedSelectedRowsTypeStorage); - static AllocatedSelectedRowsType get(ir::IrContext *ctx, + static AllocatedSelectedRowsType get(pir::IrContext *ctx, const phi::Place &place, dialect::SelectedRowsType type) { - return ir::TypeManager::template get( + return pir::TypeManager::template get( ctx, place, type); } - static AllocatedSelectedRowsType get(ir::IrContext *ctx, + static AllocatedSelectedRowsType get(pir::IrContext *ctx, const phi::Place &place, - const ir::Type &dtype, + const pir::Type &dtype, const phi::DDim &dims, const phi::DataLayout &layout, const phi::LoD &lod, @@ -86,13 +86,13 @@ class AllocatedSelectedRowsType : public ir::Type { dialect::SelectedRowsType type = dialect::SelectedRowsType::get(ctx, dtype, dims, layout, lod, offset); - return ir::TypeManager::template get( + return pir::TypeManager::template get( ctx, place, type); } const phi::Place &place() const; - const ir::Type &dtype() const; + const pir::Type &dtype() const; const phi::DDim &dims() const; diff --git a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type_storage.h b/paddle/fluid/pir/dialect/kernel/ir/type_storage.h similarity index 72% rename from paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type_storage.h rename to paddle/fluid/pir/dialect/kernel/ir/type_storage.h index 1913dd6e6346c..46622587e51f5 100644 --- a/paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type_storage.h +++ b/paddle/fluid/pir/dialect/kernel/ir/type_storage.h @@ -16,10 +16,10 @@ #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" #include "paddle/phi/core/tensor_meta.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/utils.h" namespace paddle { namespace dialect { @@ -30,7 +30,7 @@ namespace dialect { /// following methods: (1)declare ParamKey, (2)define Construction method, /// (3)define HashValue method, (4)overload operator==. /// -struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { +struct AllocatedDenseTensorTypeStorage : public pir::TypeStorage { using Place = phi::Place; /// /// \brief Declare ParamKey according to parameter type. @@ -56,18 +56,19 @@ struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { static std::size_t HashValue(const ParamKey& key) { std::size_t hash_value = 0; // hash place - hash_value = ir::hash_combine(hash_value, std::get<0>(key).HashValue()); + hash_value = pir::hash_combine(hash_value, std::get<0>(key).HashValue()); // hash dtype auto dense_tensor_type = std::get<1>(key); - hash_value = ir::hash_combine(hash_value, - dialect::DenseTensorTypeStorage::HashValue( - dialect::DenseTensorTypeStorage::ParamKey( - dense_tensor_type.dtype(), - dense_tensor_type.dims(), - dense_tensor_type.data_layout(), - dense_tensor_type.lod(), - dense_tensor_type.offset()))); + hash_value = + pir::hash_combine(hash_value, + dialect::DenseTensorTypeStorage::HashValue( + dialect::DenseTensorTypeStorage::ParamKey( + dense_tensor_type.dtype(), + dense_tensor_type.dims(), + dense_tensor_type.data_layout(), + dense_tensor_type.lod(), + dense_tensor_type.offset()))); return hash_value; } @@ -92,7 +93,7 @@ struct AllocatedDenseTensorTypeStorage : public ir::TypeStorage { /// \brief Define Parametric TypeStorage for AllocatedSelectedRowsTypeStorage. /// /// -struct AllocatedSelectedRowsTypeStorage : public ir::TypeStorage { +struct AllocatedSelectedRowsTypeStorage : public pir::TypeStorage { using Place = phi::Place; /// /// \brief Declare ParamKey according to parameter type. @@ -118,18 +119,19 @@ struct AllocatedSelectedRowsTypeStorage : public ir::TypeStorage { static std::size_t HashValue(const ParamKey& key) { std::size_t hash_value = 791; // hash place - hash_value = ir::hash_combine(hash_value, std::get<0>(key).HashValue()); + hash_value = pir::hash_combine(hash_value, std::get<0>(key).HashValue()); // hash dtype auto selected_rows_type = std::get<1>(key); - hash_value = ir::hash_combine(hash_value, - dialect::DenseTensorTypeStorage::HashValue( - dialect::DenseTensorTypeStorage::ParamKey( - selected_rows_type.dtype(), - selected_rows_type.dims(), - selected_rows_type.data_layout(), - selected_rows_type.lod(), - selected_rows_type.offset()))); + hash_value = + pir::hash_combine(hash_value, + dialect::DenseTensorTypeStorage::HashValue( + dialect::DenseTensorTypeStorage::ParamKey( + selected_rows_type.dtype(), + selected_rows_type.dims(), + selected_rows_type.data_layout(), + selected_rows_type.lod(), + selected_rows_type.offset()))); return hash_value; } diff --git a/paddle/fluid/ir/dialect/op_generator/api_gen.py b/paddle/fluid/pir/dialect/op_generator/api_gen.py similarity index 94% rename from paddle/fluid/ir/dialect/op_generator/api_gen.py rename to paddle/fluid/pir/dialect/op_generator/api_gen.py index cae035c657b69..7d6787ef7707e 100644 --- a/paddle/fluid/ir/dialect/op_generator/api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/api_gen.py @@ -25,11 +25,11 @@ #include -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/value.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/place.h" #include "paddle/phi/common/scalar.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/manual_api.h" {body} @@ -37,11 +37,11 @@ CPP_FILE_TEMPLATE = """ -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_api.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/api_builder.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_op.h" {body} @@ -71,16 +71,16 @@ """ COMBINE_OP_TEMPLATE = """ - auto {op_name} = APIBuilder::Instance().GetBuilder()->Build({in_name});""" + auto {op_name} = APIBuilder::Instance().GetBuilder()->Build({in_name});""" SPLIT_OP_TEMPLATE = """ - auto {op_name} = APIBuilder::Instance().GetBuilder()->Build({in_name});""" + auto {op_name} = APIBuilder::Instance().GetBuilder()->Build({in_name});""" COMPUTE_OP_TEMPLATE = """ paddle::dialect::{op_class_name} {op_inst_name} = APIBuilder::Instance().GetBuilder()->Build({args});""" -OP_RESULT = 'ir::OpResult' -VECTOR_TYPE = 'ir::VectorType' +OP_RESULT = 'pir::OpResult' +VECTOR_TYPE = 'pir::VectorType' PD_MANUAL_OP_LIST = ['add_n'] @@ -91,9 +91,9 @@ def get_op_class_name(op_name): class CodeGen: def __init__(self) -> None: self._type_map = { - 'paddle::dialect::DenseTensorType': 'ir::OpResult', - 'paddle::dialect::SelectedRowsType': 'ir::OpResult', - 'ir::VectorType': 'std::vector', + 'paddle::dialect::DenseTensorType': 'pir::OpResult', + 'paddle::dialect::SelectedRowsType': 'pir::OpResult', + 'pir::VectorType': 'std::vector', } def _parse_yaml(self, op_yaml_files, op_compat_yaml_file): diff --git a/paddle/fluid/ir/dialect/op_generator/op_build_gen.py b/paddle/fluid/pir/dialect/op_generator/op_build_gen.py similarity index 89% rename from paddle/fluid/ir/dialect/op_generator/op_build_gen.py rename to paddle/fluid/pir/dialect/op_generator/op_build_gen.py index 66d1094c9e5fc..9aa02ca01780f 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_build_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/op_build_gen.py @@ -42,16 +42,16 @@ def GenBuildInputArgsStr( attr_args_is_map=False, ): ''' - Example: ir::Builder &builder, ir::OperationArgument &argument, ir::OpResult x_, phi::DataType dtype=phi::DataType::UNDEFINED, phi::Place place={} + Example: pir::Builder &builder, pir::OperationArgument &argument, pir::OpResult x_, phi::DataType dtype=phi::DataType::UNDEFINED, phi::Place place={} ''' # add inputs - build_args_str = "ir::Builder &builder, ir::OperationArgument &argument" + build_args_str = "pir::Builder &builder, pir::OperationArgument &argument" if len(op_input_name_list) > 0: for input_name in op_input_name_list: - build_args_str += ", ir::OpResult " + input_name + "_" + build_args_str += ", pir::OpResult " + input_name + "_" if attr_args_is_map: - build_args_str += ", ir::AttributeMap attributes" + build_args_str += ", pir::AttributeMap attributes" else: if not mutable_attr_is_input: # add attributes @@ -86,7 +86,7 @@ def GenBuildInputArgsStr( # add mutable attributes as inputs if len(op_mutable_attribute_name_list) > 0: for mutable_attr in op_mutable_attribute_name_list: - build_args_str += ", ir::OpResult " + mutable_attr + "_" + build_args_str += ", pir::OpResult " + mutable_attr + "_" # add non-mutable attributes for attr_idx in range(len(op_non_mutable_attribute_name_list)): @@ -146,11 +146,11 @@ def GenBuildInserFullForMutableAttribute( build_mutable_attribute = "" BUILD_INTARRAY_ATTRIBUTE_TEMPLATE = """ // Generate int_array mutable attribute: {attr_name} paddle::dialect::FullIntArrayOp full_{attr_name}_op = builder.Build({attr_name}, {phi_dtype}, phi::CPUPlace()); - ir::OpResult {attr_name}_ = full_{attr_name}_op->result(0); + pir::OpResult {attr_name}_ = full_{attr_name}_op->result(0); """ BUILD_SCALAR_ATTRIBUTE_TEMPLATE = """ // Generate scalar mutable attribute: {attr_name} paddle::dialect::FullOp full_{attr_name}_op = builder.Build(std::vector{{1}}, {attr_name}, {phi_dtype}, phi::CPUPlace()); - ir::OpResult {attr_name}_ = full_{attr_name}_op->result(0); + pir::OpResult {attr_name}_ = full_{attr_name}_op->result(0); """ for idx in range(len(op_mutable_attribute_name_list)): attr_name = op_mutable_attribute_name_list[idx] @@ -177,7 +177,7 @@ def GenBuildInserFullForMutableAttribute( def GenBuildInputs(op_input_name_list, op_mutable_attribute_name_list): - BUILD_INPUT_TEMPLATE = """ std::vector argument_inputs = {{{inputs_args}}}; + BUILD_INPUT_TEMPLATE = """ std::vector argument_inputs = {{{inputs_args}}}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); """ build_input_str = ' VLOG(4) << "Builder construction inputs";\n' @@ -194,24 +194,25 @@ def GenBuildInputs(op_input_name_list, op_mutable_attribute_name_list): def GenBuildAttributes( op_non_mutable_attribute_name_list, op_non_mutable_attribute_type_list ): - INTARRAY_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = {op_attribute_type}::get(ir::IrContext::Instance(), phi::IntArray({attr})); + INTARRAY_STR_TEMPLATE = """ pir::Attribute attr_{attr_name} = {op_attribute_type}::get(pir::IrContext::Instance(), phi::IntArray({attr})); """ - SCALAR_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = paddle::dialect::TransToIrAttribute({attr}, ir::IrContext::Instance()); + SCALAR_STR_TEMPLATE = """ pir::Attribute attr_{attr_name} = paddle::dialect::TransToIrAttribute({attr}, pir::IrContext::Instance()); """ - STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = {op_attribute_type}::get(ir::IrContext::Instance(), {attr}); + STR_TEMPLATE = """ pir::Attribute attr_{attr_name} = {op_attribute_type}::get(pir::IrContext::Instance(), {attr}); """ - ARRAY_ATTRIBUTE_TEMPLATE = """ std::vector vec_{attr_name}; + ARRAY_ATTRIBUTE_TEMPLATE = """ std::vector vec_{attr_name}; for (size_t i = 0; i < static_cast({attr_size}); i++) {{ {create_attribute} vec_{attr_name}.push_back(attr_{attr_name}); }} - ir::Attribute attr_{attr_name} = ir::ArrayAttribute::get(ir::IrContext::Instance(), vec_{attr_name}); + pir::Attribute attr_{attr_name} = pir::ArrayAttribute::get(pir::IrContext::Instance(), vec_{attr_name}); """ attr_str = ' VLOG(4) << "Builder construction attributes";\n' + array_attr_type = "pir::ArrayAttribute<" for idx in range(len(op_non_mutable_attribute_name_list)): - if "ir::ArrayAttribute<" in op_non_mutable_attribute_type_list[idx]: + if array_attr_type in op_non_mutable_attribute_type_list[idx]: inner_attribute_type = op_non_mutable_attribute_type_list[idx][ - 19:-1 + len(array_attr_type) : -1 ] if inner_attribute_type == "paddle::dialect::IntArrayAttribute": attr_str += ARRAY_ATTRIBUTE_TEMPLATE.format( @@ -322,7 +323,7 @@ def GenBuildOutputs( CREATE_SCALAR_MUTABLE_ATTRIBUE_TEMPLATE = """ {dtype} {name} = {name}_.owner()->dyn_cast().attributes().at("value").dyn_cast().data().to<{dtype}>(); (void){name};\n""" CREATE_INTARRAY_MUTABLE_ATTRIBUE_WITH_UNKONW_DATA_TEMPLATE = """ phi::IntArray {name}; - if ({name}_.owner()->info().id() == ir::TypeId::get()) {{ + if ({name}_.owner()->info().id() == pir::TypeId::get()) {{ {name} = std::move(phi::IntArray({name}_.owner() ->dyn_cast() .attributes() @@ -330,8 +331,8 @@ def GenBuildOutputs( .dyn_cast() .data() .GetData())); - }} else if ({name}_.type().isa()) {{ - size_t {name}_size = {name}_.type().dyn_cast().size(); + }} else if ({name}_.type().isa()) {{ + size_t {name}_size = {name}_.type().dyn_cast().size(); {name} = std::move(phi::IntArray(std::vector({name}_size, -1))); {name}.SetFromTensor(true); }} else if ({name}_.type().isa()) {{ @@ -343,7 +344,7 @@ def GenBuildOutputs( }}\n""" CREATE_SCALAR_MUTABLE_ATTRIBUE_WITH_UNKONW_DATA_TEMPLATE = """ phi::Scalar {name}; - if ({name}_.owner()->info().id() == ir::TypeId::get()) {{ + if ({name}_.owner()->info().id() == pir::TypeId::get()) {{ {name} = std::move(phi::Scalar({name}_.owner() ->dyn_cast() .attributes() @@ -373,8 +374,8 @@ def GenBuildOutputs( # Prepar input type for idx in range(len(op_input_name_list)): # is a vector - if 'ir::VectorType' in op_input_type_list[idx]: - build_output_str += " ir::VectorType {name} = {name}_.type().dyn_cast(); (void){name};\n".format( + if 'pir::VectorType' in op_input_type_list[idx]: + build_output_str += " pir::VectorType {name} = {name}_.type().dyn_cast(); (void){name};\n".format( name=op_input_name_list[idx] ) # is a Tensor @@ -414,7 +415,7 @@ def GenBuildOutputs( ) ) # string - elif attr_dtype[0] == "ir::StrAttribute": + elif attr_dtype[0] == "pir::StrAttribute": build_output_str += "" else: assert "mutable attribtue type is not right." @@ -430,7 +431,7 @@ def GenBuildOutputs( ) not in infer_meta_args: # is a vector if ( - 'ir::VectorType' + 'pir::VectorType' in op_input_type_list[ op_input_name_list.index( op_infer_meta_map['param'][idx] @@ -456,7 +457,7 @@ def GenBuildOutputs( # Prepare outputs_meta_tensor for infer meta for idx in range(len(op_output_name_list)): # is a vector - if 'ir::VectorType' in op_output_type_list[idx]: + if 'pir::VectorType' in op_output_type_list[idx]: build_output_str += CREATE_OUTPUT_VEC_METATENSOR_TEMPLATE.format( name=op_output_name_list[idx], output_size=op_output_size_list[idx], @@ -488,23 +489,23 @@ def GenBuildOutputs( ) # use dense_{name} or vec_dense_{name} to create Outputs type - build_output_str += "\n std::vector argument_outputs;" + build_output_str += "\n std::vector argument_outputs;" CREATE_OUTPUT_DENSE_TENSOR_TEMPLATE = """ - ir::Type {name}_dense_tensor_type = paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_{name}.dtype()), dense_{name}.dims(), dense_{name}.layout(), dense_{name}.lod(), dense_{name}.offset()); + pir::Type {name}_dense_tensor_type = paddle::dialect::DenseTensorType::get(pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_{name}.dtype()), dense_{name}.dims(), dense_{name}.layout(), dense_{name}.lod(), dense_{name}.offset()); argument_outputs.push_back({name}_dense_tensor_type); """ CREATE_OUTPUT_VEC_DENSE_TENSOR_TEMPLATE = """ - std::vector {name}_types; + std::vector {name}_types; for (size_t i=0; i < static_cast({output_size}); i++) {{ - {name}_types.push_back(paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), paddle::dialect::TransToIrDataType(vec_dense_{name}[i].dtype()), vec_dense_{name}[i].dims(), vec_dense_{name}[i].layout(), vec_dense_{name}[i].lod(), vec_dense_{name}[i].offset())); + {name}_types.push_back(paddle::dialect::DenseTensorType::get(pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(vec_dense_{name}[i].dtype()), vec_dense_{name}[i].dims(), vec_dense_{name}[i].layout(), vec_dense_{name}[i].lod(), vec_dense_{name}[i].offset())); }} - ir::Type {name}_vector_type = ir::VectorType::get(ir::IrContext::Instance(), {name}_types); + pir::Type {name}_vector_type = pir::VectorType::get(pir::IrContext::Instance(), {name}_types); argument_outputs.push_back({name}_vector_type); """ for idx in range(len(op_output_name_list)): # is a vector - if 'ir::VectorType' in op_output_type_list[idx]: + if 'pir::VectorType' in op_output_type_list[idx]: build_output_str += CREATE_OUTPUT_VEC_DENSE_TENSOR_TEMPLATE.format( name=op_output_name_list[idx], output_size=op_output_size_list[idx], @@ -606,12 +607,12 @@ def gen_build_func_str( {attr_type} {attribute_name} = attributes.at("{attribute_name}").dyn_cast<{attr_ir_type}>().data(); """ GET_STR_ATTRIBUTES_FROM_MAP_TEMPLATE = """ - {attr_type} {attribute_name} = attributes.at("{attribute_name}").dyn_cast().AsString(); + {attr_type} {attribute_name} = attributes.at("{attribute_name}").dyn_cast().AsString(); """ GET_ARRAY_ATTRIBUTE_FROM_MAP_TEMPLATE = """ {attr_type} {attribute_name}; - for (size_t i = 0; i < attributes.at("{attribute_name}").dyn_cast().size(); i++) {{ - {attribute_name}.push_back(attributes.at("{attribute_name}").dyn_cast().at(i).dyn_cast<{inner_type}>().{data_name}()); + for (size_t i = 0; i < attributes.at("{attribute_name}").dyn_cast().size(); i++) {{ + {attribute_name}.push_back(attributes.at("{attribute_name}").dyn_cast().at(i).dyn_cast<{inner_type}>().{data_name}()); }} """ GET_INTARRAY_ATTRIBUTE_FROM_MAP_TEMPLATE = """ @@ -622,6 +623,7 @@ def gen_build_func_str( """ get_attributes_str = "" + array_attr_str = "pir::ArrayAttribute" if attr_args_is_map: for idx in range(len(op_attribute_name_list)): attr_type = op_attribute_build_arg_type_list[idx] @@ -629,10 +631,13 @@ def gen_build_func_str( attr_type = attr_type.replace("&", "") # if op_attribute_build_arg_type_list[idx] == "const std::vector&": # attr_type = "std::vector" - if "ir::ArrayAttribute" in op_attribute_type_list[idx]: - inner_type = op_attribute_type_list[idx][19:-1] + + if array_attr_str in op_attribute_type_list[idx]: + inner_type = op_attribute_type_list[idx][ + len(array_attr_str) + 1 : -1 + ] data_name = "data" - if inner_type == "ir::StrAttribute": + if inner_type == "pir::StrAttribute": data_name = "AsString" get_attributes_str += ( GET_ARRAY_ATTRIBUTE_FROM_MAP_TEMPLATE.format( @@ -662,7 +667,7 @@ def gen_build_func_str( attribute_name=op_attribute_name_list[idx], ) ) - elif "ir::StrAttribute" in op_attribute_type_list[idx]: + elif "pir::StrAttribute" in op_attribute_type_list[idx]: get_attributes_str += ( GET_STR_ATTRIBUTES_FROM_MAP_TEMPLATE.format( attr_type=attr_type, diff --git a/paddle/fluid/ir/dialect/op_generator/op_gen.py b/paddle/fluid/pir/dialect/op_generator/op_gen.py similarity index 94% rename from paddle/fluid/ir/dialect/op_generator/op_gen.py rename to paddle/fluid/pir/dialect/op_generator/op_gen.py index 8663d23059d45..2c84d0a404131 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/op_gen.py @@ -41,22 +41,22 @@ #undef GET_OP_LIST {op_declare} #else -// This file is generated by "paddle/fluid/ir/dialect/op_generator/op_gen.py" +// This file is generated by "paddle/fluid/pir/dialect/op_generator/op_gen.py" #include -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/operation_utils.h" -#include "paddle/ir/core/op_base.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/vjp.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/operation_utils.h" +#include "paddle/pir/core/op_base.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/vjp.h" +#include "paddle/fluid/pir/dialect/operator/trait/inplace.h" #include "paddle/fluid/framework/infershape_utils.h" #include "paddle/phi/core/infermeta_utils.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/manual_op.h" {input} @@ -72,7 +72,7 @@ """ OP_DECLARE_TEMPLATE = """ -class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{ +class {op_name} : public pir::Op<{op_name}{interfaces}{traits}> {{ public: using Op::Op; static const char *name() {{ return "{dialect_op_name}"; }} @@ -97,15 +97,15 @@ class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{ # ===================================== # String Template for cc file code gen # ===================================== -CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_generator/op_gen.py" +CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/pir/dialect/op_generator/op_gen.py" #include "{h_file}" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/meta_tensor.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/ir_context.h" #include "paddle/phi/core/enforce.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/infermeta/binary.h" @@ -117,7 +117,7 @@ class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{ #include "paddle/phi/infermeta/fusion.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/fluid/primitive/rule/vjp/vjp.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/op_base.h" {input} @@ -126,13 +126,13 @@ class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{ # ===================================== # String Template for pd_op_vjp.cc file code gen # ===================================== -VJP_CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_generator/op_gen.py" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" +VJP_CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/pir/dialect/op_generator/op_gen.py" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" #include "paddle/fluid/primitive/rule/vjp/vjp.h" #include "paddle/fluid/primitive/type/lazy_tensor.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/op_base.h" #include "paddle/phi/common/int_array.h" namespace paddle {{ @@ -166,11 +166,11 @@ class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{ """ scalar_type_maps = { - 'int': 'ir::Int32Attribute', - 'int64_t': 'ir::Int64Attribute', - 'float': 'ir::FloatAttribute', - 'dobule': 'ir::DoubleAttribute', - 'bool': 'ir::BoolAttribute', + 'int': 'pir::Int32Attribute', + 'int64_t': 'pir::Int64Attribute', + 'float': 'pir::FloatAttribute', + 'dobule': 'pir::DoubleAttribute', + 'bool': 'pir::BoolAttribute', } _NO_NEED_GEN_OPS = {'add_n', 'add_n_', 'add_n_with_kernel', 'split_grad'} @@ -255,33 +255,33 @@ def __init__(self, op_yaml_item, op_compat_item): self.attr_types_map = { 'IntArray': ['paddle::dialect::IntArrayAttribute', 'IntArray'], 'Scalar': ['paddle::dialect::ScalarAttribute', 'Scalar'], - 'Scalar(int)': ['ir::Int32Attribute', 'int'], - 'Scalar(int64_t)': ['ir::Int64Attribute', 'int64_t'], - 'Scalar(float)': ['ir::FloatAttribute', 'float'], - 'Scalar(dobule)': ['ir::DoubleAttribute', 'dobule'], + 'Scalar(int)': ['pir::Int32Attribute', 'int'], + 'Scalar(int64_t)': ['pir::Int64Attribute', 'int64_t'], + 'Scalar(float)': ['pir::FloatAttribute', 'float'], + 'Scalar(dobule)': ['pir::DoubleAttribute', 'dobule'], 'Scalar[]': [ - 'ir::ArrayAttribute', + 'pir::ArrayAttribute', 'const std::vector&', ], - 'int': ['ir::Int32Attribute', 'int'], - 'int32_t': ['ir::Int32Attribute', 'int32_t'], - 'int64_t': ['ir::Int64Attribute', 'int64_t'], - 'long': ['ir::LongAttribute', 'long'], - 'size_t': ['ir::Size_tAttribute', 'size_t'], - 'float': ['ir::FloatAttribute', 'float'], + 'int': ['pir::Int32Attribute', 'int'], + 'int32_t': ['pir::Int32Attribute', 'int32_t'], + 'int64_t': ['pir::Int64Attribute', 'int64_t'], + 'long': ['pir::LongAttribute', 'long'], + 'size_t': ['pir::Size_tAttribute', 'size_t'], + 'float': ['pir::FloatAttribute', 'float'], 'float[]': [ - 'ir::ArrayAttribute', + 'pir::ArrayAttribute', 'const std::vector&', ], - 'double': ['ir::DoubleAttribute', 'double'], - 'bool': ['ir::BoolAttribute', 'bool'], + 'double': ['pir::DoubleAttribute', 'double'], + 'bool': ['pir::BoolAttribute', 'bool'], 'bool[]': [ - 'ir::ArrayAttribute', + 'pir::ArrayAttribute', 'const std::vector&', ], - 'str': ['ir::StrAttribute', 'const std::string&'], + 'str': ['pir::StrAttribute', 'const std::string&'], 'str[]': [ - 'ir::ArrayAttribute', + 'pir::ArrayAttribute', 'const std::vector&', ], 'Place': ['paddle::dialect::PlaceAttribute', 'const Place&'], @@ -291,11 +291,11 @@ def __init__(self, op_yaml_item, op_compat_item): ], 'DataType': ['paddle::dialect::DataTypeAttribute', 'DataType'], 'int64_t[]': [ - 'ir::ArrayAttribute', + 'pir::ArrayAttribute', 'const std::vector&', ], 'int[]': [ - 'ir::ArrayAttribute', + 'pir::ArrayAttribute', 'const std::vector&', ], } @@ -517,7 +517,7 @@ def parse_input_name_list(self): def parse_input_type_list(self): input_types_map = { 'Tensor': 'paddle::dialect::DenseTensorType', - 'Tensor[]': 'ir::VectorType', + 'Tensor[]': 'pir::VectorType', } type_list = [] for input_info in self.op_yaml_item['inputs']: @@ -554,7 +554,7 @@ def parse_output_name_list(self): def parse_output_type_list(self): output_type_map = { 'Tensor': 'paddle::dialect::DenseTensorType', - 'Tensor[]': 'ir::VectorType', + 'Tensor[]': 'pir::VectorType', 'SelectedRows': 'paddle::dialect::SelectedRowsType', } type_list = [] diff --git a/paddle/fluid/ir/dialect/op_generator/op_interface_gen.py b/paddle/fluid/pir/dialect/op_generator/op_interface_gen.py similarity index 91% rename from paddle/fluid/ir/dialect/op_generator/op_interface_gen.py rename to paddle/fluid/pir/dialect/op_generator/op_interface_gen.py index 4c02c09bae518..f73d4d38cb2de 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_interface_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/op_interface_gen.py @@ -26,8 +26,8 @@ {input_type} {input_name}(std::make_shared(op_obj.{input_name}()));""" OP_VJP_FORWARD_MULTI_INPUT_TEMPLATE = """ - ir::CombineOp combine_op_obj = - op_obj.{input_name}().GetDefiningOp()->dyn_cast(); + pir::CombineOp combine_op_obj = + op_obj.{input_name}().GetDefiningOp()->dyn_cast(); std::vector {input_name}; for (size_t idx = 0; idx < combine_op_obj.inputs().size(); idx++) {{ {input_name}.emplace_back( @@ -57,18 +57,18 @@ {inputs_list}stop_gradients);""" OP_VJP_STOPGRADIENT_TEMPLATE = """ - std::vector> res(tensor_res.size()); + std::vector> res(tensor_res.size()); for (size_t i = 0; i < tensor_res.size(); ++i) { res[i].resize(tensor_res[i].size()); for (size_t j = 0; j < tensor_res[i].size(); ++j) { if(tensor_res[i][j].defined()){ - res[i][j] = std::static_pointer_cast(tensor_res[i][j].impl())->value().dyn_cast(); + res[i][j] = std::static_pointer_cast(tensor_res[i][j].impl())->value().dyn_cast(); } } }""" OP_VJP_DEFINE_TEMPLATE = """ -std::vector> {op_class_name}::Vjp(ir::Operation* op, const std::vector>& out_grads, const std::vector>& stop_gradients){{ +std::vector> {op_class_name}::Vjp(pir::Operation* op, const std::vector>& out_grads, const std::vector>& stop_gradients){{ {op_class_name} op_obj = op->dyn_cast<{op_class_name}>(); VLOG(6) << "Prepare inputs of {op_grad_name}"; @@ -89,11 +89,11 @@ input_types_map = { 'paddle::dialect::DenseTensorType': 'Tensor', - 'ir::VectorType': 'Tensor[]', + 'pir::VectorType': 'Tensor[]', } attr_data_map = { - 'ir::StrAttribute': 'AsString', + 'pir::StrAttribute': 'AsString', } @@ -218,5 +218,5 @@ def gen_exclusive_interface_str(op_info): " static void InferMeta( phi::InferMetaContext *infer_meta );" ) if op_info.op_phi_name[0] in vjp_interface_declare_gen_op_list: - exclusive_interface_str += "\n static std::vector> Vjp(ir::Operation* op, const std::vector>& out_grads, const std::vector>& stop_gradients);" + exclusive_interface_str += "\n static std::vector> Vjp(pir::Operation* op, const std::vector>& out_grads, const std::vector>& stop_gradients);" return exclusive_interface_str diff --git a/paddle/fluid/ir/dialect/op_generator/op_member_func_gen.py b/paddle/fluid/pir/dialect/op_generator/op_member_func_gen.py similarity index 88% rename from paddle/fluid/ir/dialect/op_generator/op_member_func_gen.py rename to paddle/fluid/pir/dialect/op_generator/op_member_func_gen.py index 9bc2c75ccf8a9..1cf32a44c5f60 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_member_func_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/op_member_func_gen.py @@ -14,9 +14,9 @@ # generator op member function -OP_GET_INPUT_TEMPLATE = """ ir::Value {input_name}() {{ return operand_source({input_index}); }} +OP_GET_INPUT_TEMPLATE = """ pir::Value {input_name}() {{ return operand_source({input_index}); }} """ -OP_GET_OUTPUT_TEMPLATE = """ ir::OpResult {output_name}() {{ return result({output_index}); }} +OP_GET_OUTPUT_TEMPLATE = """ pir::OpResult {output_name}() {{ return result({output_index}); }} """ diff --git a/paddle/fluid/ir/dialect/op_generator/op_verify_gen.py b/paddle/fluid/pir/dialect/op_generator/op_verify_gen.py similarity index 91% rename from paddle/fluid/ir/dialect/op_generator/op_verify_gen.py rename to paddle/fluid/pir/dialect/op_generator/op_verify_gen.py index 917728f2c8b17..4dffdb2c7b814 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_verify_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/op_verify_gen.py @@ -43,7 +43,7 @@ PADDLE_ENFORCE((*this)->operand_source({index}).type().isa<{standard}>(), phi::errors::PreconditionNotMet("Type validation failed for the {index}th input."));""" INPUT_VECTORTYPE_CHECK_TEMPLATE = """ - if (auto vec_type = (*this)->operand_source({index}).type().dyn_cast()) {{ + if (auto vec_type = (*this)->operand_source({index}).type().dyn_cast()) {{ for (size_t i = 0; i < vec_type.size(); ++i) {{ PADDLE_ENFORCE(vec_type[i].isa<{standard}>(), phi::errors::PreconditionNotMet("Type validation failed for the {index}th input.")); @@ -60,7 +60,7 @@ }}""" INPUT_OPTIONAL_VECTORTYPE_CHECK_TEMPLATE = """ if (auto val = (*this)->operand({index})) {{ - if (auto vec_type = val.type().dyn_cast()) {{ + if (auto vec_type = val.type().dyn_cast()) {{ for (size_t i = 0; i < vec_type.size(); i++) {{ PADDLE_ENFORCE(vec_type[i].isa<{standard}>(), phi::errors::PreconditionNotMet("Type validation failed for the {index}th input.")); @@ -75,10 +75,10 @@ PADDLE_ENFORCE(attributes.count("{attribute_name}")>0 && attributes.at("{attribute_name}").isa<{standard}>(), phi::errors::PreconditionNotMet("Type of attribute: {attribute_name} is not right."));""" ATTRIBUTE_VECTOR_CHECK_TEMPLATE = """ - PADDLE_ENFORCE(attributes.count("{attribute_name}")>0 && attributes.at("{attribute_name}").isa(), + PADDLE_ENFORCE(attributes.count("{attribute_name}")>0 && attributes.at("{attribute_name}").isa(), phi::errors::PreconditionNotMet("Type of attribute: {attribute_name} is not right.")); - for (size_t i = 0; i < attributes.at("{attribute_name}").dyn_cast().size(); i++) {{ - PADDLE_ENFORCE(attributes.at("{attribute_name}").dyn_cast().at(i).isa<{standard}>(), + for (size_t i = 0; i < attributes.at("{attribute_name}").dyn_cast().size(); i++) {{ + PADDLE_ENFORCE(attributes.at("{attribute_name}").dyn_cast().at(i).isa<{standard}>(), phi::errors::PreconditionNotMet("Type of attribute: {attribute_name} is not right.")); }}""" OUTPUT_TYPE_CHECK_TEMPLATE = """ @@ -86,7 +86,7 @@ phi::errors::PreconditionNotMet("Type validation failed for the {index}th output."));""" OUTPUT_VECTORTYPE_CHECK_TEMPLATE = """ auto output_{index}_type = (*this)->result({index}).type(); - if (auto vec_type = output_{index}_type.dyn_cast()) {{ + if (auto vec_type = output_{index}_type.dyn_cast()) {{ for (size_t i = 0; i < vec_type.size(); i++) {{ PADDLE_ENFORCE(vec_type[i].isa<{standard}>(), phi::errors::PreconditionNotMet("Type validation failed for the {index}th output.")); @@ -103,7 +103,7 @@ }}""" OUTPUT_OPTIONAL_VECTORTYPE_CHECK_TEMPLATE = """ if (auto output_{index}_type = (*this)->result({index}).type()) {{ - if (auto vec_type = output_{index}_type.dyn_cast()) {{ + if (auto vec_type = output_{index}_type.dyn_cast()) {{ for (size_t i = 0; i < vec_type.size(); ++i) {{ PADDLE_ENFORCE(vec_type[i].isa<{standard}>(), phi::errors::PreconditionNotMet("Type validation failed for the {index}th output.")); @@ -128,13 +128,14 @@ def gen_inputs_type_check_str( // Inputs num is 0, not need to check inputs type.""" else: inputs_type_check_str = "" + vector_type_str = "pir::VectorType<" for idx in range(len(op_input_type_list)): input_type = op_input_type_list[idx] is_optional = op_input_optional_list[idx] is_vector = False - if input_type.startswith("ir::VectorType<"): + if input_type.startswith(vector_type_str): is_vector = True - input_type = input_type[15:-1] + input_type = input_type[len(vector_type_str) : -1] check_str = "" if is_optional == "true": if is_vector: @@ -182,11 +183,13 @@ def gen_attributes_type_check_str( else: attributes_check_str = """ auto& attributes = this->attributes();""" + array_attr_str = "pir::ArrayAttribute<" for idx in range(len(op_non_mutable_attribute_name_list)): attribute_name = op_non_mutable_attribute_name_list[idx] attribute_type = op_non_mutable_attribute_type_list[idx] - if attribute_type.startswith("ir::ArrayAttribute<"): - attribute_type = attribute_type[19:-1] + + if attribute_type.startswith(array_attr_str): + attribute_type = attribute_type[len(array_attr_str) : -1] attributes_check_str += ATTRIBUTE_VECTOR_CHECK_TEMPLATE.format( attribute_name=attribute_name, standard=attribute_type, @@ -205,13 +208,14 @@ def gen_outputs_type_check_str(op_output_type_list, op_output_optional_list): // Outputs num is 0, not need to check outputs type.""" else: outputs_type_check_str = "" + vector_type_str = "pir::VectorType<" for idx in range(len(op_output_type_list)): output_type = op_output_type_list[idx] is_optional = op_output_optional_list[idx] is_vector = False - if output_type.startswith("ir::VectorType<"): + if output_type.startswith(vector_type_str): is_vector = True - output_type = output_type[15:-1] + output_type = output_type[len(vector_type_str) : -1] check_str = "" if is_optional == "true": if is_vector: diff --git a/paddle/fluid/ir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py similarity index 100% rename from paddle/fluid/ir/dialect/op_generator/ops_api_gen.py rename to paddle/fluid/pir/dialect/op_generator/ops_api_gen.py diff --git a/paddle/fluid/ir/dialect/op_generator/python_c_gen.py b/paddle/fluid/pir/dialect/op_generator/python_c_gen.py similarity index 99% rename from paddle/fluid/ir/dialect/op_generator/python_c_gen.py rename to paddle/fluid/pir/dialect/op_generator/python_c_gen.py index a890a8db5d249..10f20da3ffe2a 100644 --- a/paddle/fluid/ir/dialect/op_generator/python_c_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/python_c_gen.py @@ -46,7 +46,7 @@ CPP_FILE_TEMPLATE = """ #include "paddle/fluid/pybind/static_op_function.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_api.h" #include "paddle/fluid/pybind/eager_utils.h" #include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/op_function_common.h" diff --git a/paddle/fluid/ir/dialect/op_generator/vjp_interface_gen_op_list.py b/paddle/fluid/pir/dialect/op_generator/vjp_interface_gen_op_list.py similarity index 100% rename from paddle/fluid/ir/dialect/op_generator/vjp_interface_gen_op_list.py rename to paddle/fluid/pir/dialect/op_generator/vjp_interface_gen_op_list.py diff --git a/paddle/fluid/ir/dialect/paddle_dialect/CMakeLists.txt b/paddle/fluid/pir/dialect/operator/CMakeLists.txt similarity index 100% rename from paddle/fluid/ir/dialect/paddle_dialect/CMakeLists.txt rename to paddle/fluid/pir/dialect/operator/CMakeLists.txt diff --git a/paddle/fluid/pir/dialect/operator/interface/CMakeLists.txt b/paddle/fluid/pir/dialect/operator/interface/CMakeLists.txt new file mode 100644 index 0000000000000..a6496585e7790 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/CMakeLists.txt @@ -0,0 +1,7 @@ +# All source files of pd_op_dialect, except for the source file of op, which is generated in the compilation directory. +file(GLOB PD_INTERFACE_SRCS "*.cc") + +cc_library( + pd_interface + SRCS ${PD_INTERFACE_SRCS} + DEPS pir_core phi_utils) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h b/paddle/fluid/pir/dialect/operator/interface/infermeta.h similarity index 85% rename from paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h rename to paddle/fluid/pir/dialect/operator/interface/infermeta.h index ba3d54c59439b..2c01a006a0cdf 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h +++ b/paddle/fluid/pir/dialect/operator/interface/infermeta.h @@ -13,12 +13,12 @@ // limitations under the License. #pragma once -#include "paddle/ir/core/op_base.h" #include "paddle/phi/core/infermeta_utils.h" +#include "paddle/pir/core/op_base.h" namespace paddle { namespace dialect { -class InferMetaInterface : public ir::OpInterfaceBase { +class InferMetaInterface : public pir::OpInterfaceBase { public: struct Concept { explicit Concept(void (*infer_meta)(phi::InferMetaContext *)) @@ -35,8 +35,8 @@ class InferMetaInterface : public ir::OpInterfaceBase { Model() : Concept(InferMeta) {} }; - InferMetaInterface(ir::Operation *op, Concept *impl) - : ir::OpInterfaceBase(op), impl_(impl) {} + InferMetaInterface(pir::Operation *op, Concept *impl) + : pir::OpInterfaceBase(op), impl_(impl) {} void InferMeta(phi::InferMetaContext *infer_meta) { impl_->infer_meta_(infer_meta); diff --git a/paddle/fluid/ir/dialect/paddle_dialect/interface/interface.cc b/paddle/fluid/pir/dialect/operator/interface/interface.cc similarity index 79% rename from paddle/fluid/ir/dialect/paddle_dialect/interface/interface.cc rename to paddle/fluid/pir/dialect/operator/interface/interface.cc index 12b14de308640..92b3bf0ba2168 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/interface/interface.cc +++ b/paddle/fluid/pir/dialect/operator/interface/interface.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/vjp.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/interface/vjp.h" IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::InferMetaInterface) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::OpYamlInfoInterface) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h b/paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h similarity index 83% rename from paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h rename to paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h index 7663fb2029a43..33011f5613eb5 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h +++ b/paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h @@ -14,8 +14,8 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/pir/core/op_base.h" using OpInfoTuple = std::tuple, std::vector, @@ -25,7 +25,7 @@ using OpInfoTuple = std::tuple, namespace paddle { namespace dialect { -class OpYamlInfoInterface : public ir::OpInterfaceBase { +class OpYamlInfoInterface : public pir::OpInterfaceBase { public: struct Concept { explicit Concept(OpInfoTuple (*get_op_info)()) @@ -40,8 +40,8 @@ class OpYamlInfoInterface : public ir::OpInterfaceBase { Model() : Concept(GetOpInfo) {} }; - OpYamlInfoInterface(ir::Operation *op, Concept *impl) - : ir::OpInterfaceBase(op), impl_(impl) {} + OpYamlInfoInterface(pir::Operation *op, Concept *impl) + : pir::OpInterfaceBase(op), impl_(impl) {} OpInfoTuple GetOpInfo() { return impl_->get_op_info_(); } diff --git a/paddle/fluid/ir/dialect/paddle_dialect/interface/vjp.h b/paddle/fluid/pir/dialect/operator/interface/vjp.h similarity index 62% rename from paddle/fluid/ir/dialect/paddle_dialect/interface/vjp.h rename to paddle/fluid/pir/dialect/operator/interface/vjp.h index a373cd0bacca4..56c814db89088 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/interface/vjp.h +++ b/paddle/fluid/pir/dialect/operator/interface/vjp.h @@ -13,29 +13,29 @@ // limitations under the License. #pragma once -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/op_base.h" namespace paddle { namespace dialect { -class VjpInterface : public ir::OpInterfaceBase { +class VjpInterface : public pir::OpInterfaceBase { public: struct Concept { - explicit Concept(std::vector> (*vjp)( - ir::Operation* op, - const std::vector>& out_grads, + explicit Concept(std::vector> (*vjp)( + pir::Operation* op, + const std::vector>& out_grads, const std::vector>& stop_gradients)) : vjp_(vjp) {} - std::vector> (*vjp_)( - ir::Operation* op, - const std::vector>& out_grads, + std::vector> (*vjp_)( + pir::Operation* op, + const std::vector>& out_grads, const std::vector>& stop_gradients); }; template struct Model : public Concept { - static std::vector> Vjp( - ir::Operation* op, - const std::vector>& out_grads, + static std::vector> Vjp( + pir::Operation* op, + const std::vector>& out_grads, const std::vector>& stop_gradients) { return ConcreteOp::Vjp(op, out_grads, stop_gradients); } @@ -43,12 +43,12 @@ class VjpInterface : public ir::OpInterfaceBase { Model() : Concept(Vjp) {} }; - VjpInterface(ir::Operation* op, Concept* impl) - : ir::OpInterfaceBase(op), impl_(impl) {} + VjpInterface(pir::Operation* op, Concept* impl) + : pir::OpInterfaceBase(op), impl_(impl) {} - std::vector> Vjp( - ir::Operation* op, - const std::vector>& out_grads, + std::vector> Vjp( + pir::Operation* op, + const std::vector>& out_grads, const std::vector>& stop_gradients) { return impl_->vjp_(op, out_grads, stop_gradients); } diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/.gitignore b/paddle/fluid/pir/dialect/operator/ir/.gitignore similarity index 100% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/.gitignore rename to paddle/fluid/pir/dialect/operator/ir/.gitignore diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/CMakeLists.txt b/paddle/fluid/pir/dialect/operator/ir/CMakeLists.txt similarity index 83% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/CMakeLists.txt rename to paddle/fluid/pir/dialect/operator/ir/CMakeLists.txt index 64e3e982133be..71df1b6811bf7 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/CMakeLists.txt +++ b/paddle/fluid/pir/dialect/operator/ir/CMakeLists.txt @@ -1,12 +1,12 @@ set(PD_DIALECT_BINARY_DIR - "${PADDLE_BINARY_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir") + "${PADDLE_BINARY_DIR}/paddle/fluid/pir/dialect/operator/ir") -# Generate pd_dialect files defining op using op_gen_file +# Generate pd_op_dialect files defining op using op_gen_file set(op_gen_parsed_yaml_file ${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parse_op.py) set(op_gen_file - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/op_gen.py) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/op_generator/op_gen.py) set(op_compat_yaml_file ${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml) set(op_forward_yaml_file1 ${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/ops.parsed.yaml @@ -28,23 +28,22 @@ set(fused_op_backward_yaml_file ) set(pd_op_forward_yaml_file - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops.yaml) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/operator/ir/ops.yaml) set(pd_op_backward_yaml_file - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops_backward.yaml -) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml) set(parsed_op_dir - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/paddle_dialect/ir/generated) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/operator/ir/generated) -set(op_yaml_file3 ${parsed_op_dir}/pd_ops.parsed.yaml) -set(op_yaml_file4 ${parsed_op_dir}/pd_ops_backward.parsed.yaml) +set(op_yaml_file3 ${parsed_op_dir}/ops.parsed.yaml) +set(op_yaml_file4 ${parsed_op_dir}/ops_backward.parsed.yaml) set(op_yaml_files ${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${fused_op_forward_yaml_file},${fused_op_backward_yaml_file},${op_yaml_file3},${op_yaml_file4} ) set(op_namespace paddle,dialect) -set(dialect_name pd) +set(dialect_name pd_op) set(op_header_file ${PD_DIALECT_BINARY_DIR}/pd_op.h) set(op_source_file ${PD_DIALECT_BINARY_DIR}/pd_op.cc) set(op_header_file_tmp ${op_header_file}.tmp) @@ -96,7 +95,7 @@ set(api_gen_yaml_files ${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${op_yaml_file3},${op_yaml_file4} ) set(api_gen_file - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/api_gen.py) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/op_generator/api_gen.py) set(api_header_file ${PD_DIALECT_BINARY_DIR}/pd_api.h) set(api_source_file ${PD_DIALECT_BINARY_DIR}/pd_api.cc) set(api_header_file_tmp ${api_header_file}.tmp) @@ -125,7 +124,7 @@ add_custom_command( VERBATIM) set(python_c_gen_file - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/python_c_gen.py) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/op_generator/python_c_gen.py) set(python_c_header_file ${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/static_op_function.h) set(python_c_source_file @@ -160,7 +159,7 @@ add_custom_target(static_op_function_gen ALL DEPENDS ${python_c_header_file} ${python_c_source_file}) set(ops_api_gen_file - ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/ops_api_gen.py) + ${PADDLE_SOURCE_DIR}/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py) set(ops_api_source_file ${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/ops_api.cc) set(ops_api_source_file_tmp ${ops_api_source_file}.tmp) @@ -186,26 +185,26 @@ add_custom_command( add_custom_target(ops_api_gen ALL DEPENDS ${ops_api_source_file}) cc_library( - pd_dialect_core - SRCS pd_attribute.cc pd_type.cc pd_meta_tensor.cc + pd_op_dialect_core + SRCS op_attribute.cc op_type.cc meta_tensor.cc DEPS phi pd_interface pd_trait type_info) cc_library( - pd_dialect_op - SRCS ${op_source_file} pd_manual_op.cc - DEPS pd_dialect_core) + pd_op_dialect_op + SRCS ${op_source_file} manual_op.cc + DEPS pd_op_dialect_core) cc_library( api_builder SRCS api_builder.cc - DEPS ir_core) + DEPS pir_core) cc_library( - pd_dialect_api - SRCS ${api_source_file} pd_manual_api.cc - DEPS api_builder pd_dialect_op) + pd_op_dialect_api + SRCS ${api_source_file} manual_api.cc + DEPS api_builder pd_op_dialect_op) -target_include_directories(pd_dialect_api PRIVATE ${PD_DIALECT_BINARY_DIR}) +target_include_directories(pd_op_dialect_api PRIVATE ${PD_DIALECT_BINARY_DIR}) cc_library( - pd_dialect - SRCS pd_dialect.cc pd_manual_op_vjp.cc ${op_vjp_source_file} - DEPS pd_dialect_api param_to_variable primitive_vjp_experimental - pd_dialect_utils op_yaml_info_parser) + pd_op_dialect + SRCS op_dialect.cc manual_op_vjp.cc ${op_vjp_source_file} + DEPS pd_op_dialect_api param_to_variable primitive_vjp_experimental + pd_op_dialect_utils op_yaml_info_parser) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.cc b/paddle/fluid/pir/dialect/operator/ir/api_builder.cc similarity index 80% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.cc rename to paddle/fluid/pir/dialect/operator/ir/api_builder.cc index 0ded4ee1a5de8..893c664b78b08 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.cc +++ b/paddle/fluid/pir/dialect/operator/ir/api_builder.cc @@ -12,22 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/fluid/pir/dialect/operator/ir/api_builder.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/ir_context.h" namespace paddle { namespace dialect { APIBuilder::APIBuilder() : builder_(nullptr) { - ctx_ = ir::IrContext::Instance(); + ctx_ = pir::IrContext::Instance(); } -void APIBuilder::SetProgram(ir::Program* program) { - builder_ = std::make_shared(ctx_, program->block()); +void APIBuilder::SetProgram(pir::Program* program) { + builder_ = std::make_shared(ctx_, program->block()); } -void APIBuilder::SetInsertionPoint(ir::Operation* op) { +void APIBuilder::SetInsertionPoint(pir::Operation* op) { IR_ENFORCE(builder_ != nullptr, "builder doesn't hold program, please call SetProgram for " "initialization."); diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h b/paddle/fluid/pir/dialect/operator/ir/api_builder.h similarity index 78% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h rename to paddle/fluid/pir/dialect/operator/ir/api_builder.h index 029c79c2110c0..a06f529d2c5be 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h +++ b/paddle/fluid/pir/dialect/operator/ir/api_builder.h @@ -15,9 +15,9 @@ #pragma once #include -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/macros.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/macros.h" +#include "paddle/pir/core/program.h" namespace paddle { namespace dialect { @@ -30,25 +30,25 @@ class APIBuilder { static APIBuilder api_builder; return api_builder; } - void SetProgram(ir::Program* program); + void SetProgram(pir::Program* program); /// Set the insertion point to the specified operation, which will cause /// subsequent insertions to go right before it. - void SetInsertionPoint(ir::Operation* op); + void SetInsertionPoint(pir::Operation* op); void ResetInsertionPointToStart(); void ResetInsertionPointToEnd(); - std::shared_ptr GetBuilder() { return builder_; } + std::shared_ptr GetBuilder() { return builder_; } private: APIBuilder(); DISABLE_COPY_AND_ASSIGN(APIBuilder); - ir::IrContext* ctx_; - std::shared_ptr builder_; + pir::IrContext* ctx_; + std::shared_ptr builder_; }; } // namespace dialect diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute_storage.h b/paddle/fluid/pir/dialect/operator/ir/attribute_storage.h similarity index 84% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute_storage.h rename to paddle/fluid/pir/dialect/operator/ir/attribute_storage.h index 1877e5043fc65..68f066b009329 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute_storage.h +++ b/paddle/fluid/pir/dialect/operator/ir/attribute_storage.h @@ -14,17 +14,17 @@ #pragma once -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/utils.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/int_array.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/common/place.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/utils.h" namespace paddle { namespace dialect { -struct IntArrayAttributeStorage : public ir::AttributeStorage { +struct IntArrayAttributeStorage : public pir::AttributeStorage { using ParamKey = phi::IntArray; explicit IntArrayAttributeStorage(const ParamKey &key) { data_ = key; } @@ -36,9 +36,9 @@ struct IntArrayAttributeStorage : public ir::AttributeStorage { static std::size_t HashValue(const ParamKey &key) { size_t hash_value = 0; hash_value = - ir::hash_combine(hash_value, std::hash()(key.FromTensor())); + pir::hash_combine(hash_value, std::hash()(key.FromTensor())); for (auto value : key.GetData()) { - hash_value = ir::hash_combine(hash_value, std::hash()(value)); + hash_value = pir::hash_combine(hash_value, std::hash()(value)); } return hash_value; } @@ -54,7 +54,7 @@ struct IntArrayAttributeStorage : public ir::AttributeStorage { phi::IntArray data_; }; -struct DataTypeAttributeStorage : public ir::AttributeStorage { +struct DataTypeAttributeStorage : public pir::AttributeStorage { using ParamKey = phi::DataType; explicit DataTypeAttributeStorage(const ParamKey &key) { data_ = key; } @@ -75,7 +75,7 @@ struct DataTypeAttributeStorage : public ir::AttributeStorage { phi::DataType data_; }; -struct PlaceAttributeStorage : public ir::AttributeStorage { +struct PlaceAttributeStorage : public pir::AttributeStorage { using ParamKey = phi::Place; explicit PlaceAttributeStorage(const ParamKey &key) { data_ = key; } @@ -94,7 +94,7 @@ struct PlaceAttributeStorage : public ir::AttributeStorage { phi::Place data_; }; -struct DataLayoutAttributeStorage : public ir::AttributeStorage { +struct DataLayoutAttributeStorage : public pir::AttributeStorage { using ParamKey = phi::DataLayout; explicit DataLayoutAttributeStorage(const ParamKey &key) { data_ = key; } diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.cc b/paddle/fluid/pir/dialect/operator/ir/manual_api.cc similarity index 66% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.cc rename to paddle/fluid/pir/dialect/operator/ir/manual_api.cc index b95d78a74f470..5c3e107686dfd 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.cc +++ b/paddle/fluid/pir/dialect/operator/ir/manual_api.cc @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/ir/core/builtin_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/manual_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/api_builder.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/pir/core/builtin_op.h" namespace paddle { namespace dialect { -ir::OpResult split_grad(std::vector out_grads, - ir::OpResult axis) { +pir::OpResult split_grad(std::vector out_grads, + pir::OpResult axis) { auto combine_op = - APIBuilder::Instance().GetBuilder()->Build(out_grads); + APIBuilder::Instance().GetBuilder()->Build(out_grads); paddle::dialect::SplitGradOp split_grad_op = APIBuilder::Instance().GetBuilder()->Build( combine_op.out(), axis); @@ -30,9 +30,9 @@ ir::OpResult split_grad(std::vector out_grads, return split_grad_op.x_grad(); } -ir::OpResult split_grad(std::vector out_grads, int axis) { +pir::OpResult split_grad(std::vector out_grads, int axis) { auto combine_op = - APIBuilder::Instance().GetBuilder()->Build(out_grads); + APIBuilder::Instance().GetBuilder()->Build(out_grads); paddle::dialect::SplitGradOp split_grad_op = APIBuilder::Instance().GetBuilder()->Build( combine_op.out(), axis); diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.h b/paddle/fluid/pir/dialect/operator/ir/manual_api.h similarity index 79% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.h rename to paddle/fluid/pir/dialect/operator/ir/manual_api.h index 5eba73e5182bd..b98746aa88454 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_api.h +++ b/paddle/fluid/pir/dialect/operator/ir/manual_api.h @@ -16,15 +16,16 @@ #include -#include "paddle/ir/core/value.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/place.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace dialect { -ir::OpResult split_grad(std::vector out_grads, ir::OpResult axis); +pir::OpResult split_grad(std::vector out_grads, + pir::OpResult axis); -ir::OpResult split_grad(std::vector out_grads, int axis); +pir::OpResult split_grad(std::vector out_grads, int axis); } // namespace dialect } // namespace paddle diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc b/paddle/fluid/pir/dialect/operator/ir/manual_op.cc similarity index 84% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc rename to paddle/fluid/pir/dialect/operator/ir/manual_op.cc index 058a08a384d2d..d029b3c673ece 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc +++ b/paddle/fluid/pir/dialect/operator/ir/manual_op.cc @@ -12,20 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/fluid/pir/dialect/operator/ir/manual_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/enforce.h" #include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/fusion.h" #include "paddle/phi/infermeta/multiary.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/ir_context.h" namespace paddle { namespace dialect { @@ -33,7 +33,7 @@ namespace dialect { OpInfoTuple AddNOp::GetOpInfo() { std::vector inputs = { OpInputInfo("inputs", - "ir::VectorType", + "pir::VectorType", false, false, false, @@ -57,7 +57,8 @@ void AddNOp::Verify() { 1u, phi::errors::PreconditionNotMet( "The size %d of inputs must be equal to 1.", input_size)); - if (auto vec_type = (*this)->operand(0).type().dyn_cast()) { + if (auto vec_type = + (*this)->operand(0).type().dyn_cast()) { for (size_t i = 0; i < vec_type.size(); ++i) { PADDLE_ENFORCE(vec_type[i].isa() || vec_type[i].isa(), @@ -96,17 +97,17 @@ void AddNOp::Verify() { VLOG(4) << "End Verifying for: AddNOp."; } -void AddNOp::Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult inputs) { +void AddNOp::Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult inputs) { VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = {inputs}; + std::vector argument_inputs = {inputs}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; VLOG(4) << "Builder construction outputs"; - ir::VectorType x = inputs.type().dyn_cast(); + pir::VectorType x = inputs.type().dyn_cast(); (void)x; std::vector vec_dense_x; @@ -137,9 +138,9 @@ void AddNOp::Build(ir::Builder &builder, // NOLINT phi::AddNInferMeta(meta_x, &meta_out); - std::vector argument_outputs; - ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + std::vector argument_outputs; + pir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), TransToIrDataType(dense_out.dtype()), dense_out.dims(), dense_out.layout(), @@ -158,7 +159,7 @@ OpInfoTuple AddN_Op::GetOpInfo() { std::vector inputs = { paddle::dialect::OpInputInfo( "inputs", - "ir::VectorType", + "pir::VectorType", false, false, false, @@ -172,17 +173,17 @@ OpInfoTuple AddN_Op::GetOpInfo() { return std::make_tuple(inputs, attributes, outputs, run_time_info, "add_n_"); } -void AddN_Op::Build(ir::Builder &builder, - ir::OperationArgument &argument, - ir::OpResult inputs_) { +void AddN_Op::Build(pir::Builder &builder, + pir::OperationArgument &argument, + pir::OpResult inputs_) { VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = {inputs_}; + std::vector argument_inputs = {inputs_}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; VLOG(4) << "Builder construction outputs"; - ir::VectorType inputs = inputs_.type().dyn_cast(); + pir::VectorType inputs = inputs_.type().dyn_cast(); std::vector vec_dense_inputs; for (size_t i = 0; i < static_cast(inputs.size()); i++) { vec_dense_inputs.push_back(phi::DenseTensor( @@ -213,9 +214,9 @@ void AddN_Op::Build(ir::Builder &builder, phi::AddNInferMeta(meta_inputs, &meta_out); - std::vector argument_outputs; - ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + std::vector argument_outputs; + pir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_out.dtype()), dense_out.dims(), dense_out.layout(), @@ -236,7 +237,7 @@ void AddN_Op::Verify() { phi::errors::PreconditionNotMet( "The size %d of inputs must be equal to 1.", input_size)); if (auto vec_type = - (*this)->operand_source(0).type().dyn_cast()) { + (*this)->operand_source(0).type().dyn_cast()) { for (size_t i = 0; i < vec_type.size(); ++i) { PADDLE_ENFORCE(vec_type[i].isa() || vec_type[i].isa(), @@ -285,7 +286,7 @@ OpInfoTuple AddNWithKernelOp::GetOpInfo() { std::vector inputs = { paddle::dialect::OpInputInfo( "inputs", - "ir::VectorType", + "pir::VectorType", false, false, false, @@ -300,17 +301,17 @@ OpInfoTuple AddNWithKernelOp::GetOpInfo() { inputs, attributes, outputs, run_time_info, "add_n_with_kernel"); } -void AddNWithKernelOp::Build(ir::Builder &builder, - ir::OperationArgument &argument, - ir::OpResult inputs_) { +void AddNWithKernelOp::Build(pir::Builder &builder, + pir::OperationArgument &argument, + pir::OpResult inputs_) { VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = {inputs_}; + std::vector argument_inputs = {inputs_}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; VLOG(4) << "Builder construction outputs"; - ir::VectorType inputs = inputs_.type().dyn_cast(); + pir::VectorType inputs = inputs_.type().dyn_cast(); std::vector vec_dense_inputs; for (size_t i = 0; i < static_cast(inputs.size()); i++) { vec_dense_inputs.push_back(phi::DenseTensor( @@ -341,9 +342,9 @@ void AddNWithKernelOp::Build(ir::Builder &builder, phi::AddNInferMeta(meta_inputs, &meta_out); - std::vector argument_outputs; - ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + std::vector argument_outputs; + pir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_out.dtype()), dense_out.dims(), dense_out.layout(), @@ -365,7 +366,7 @@ void AddNWithKernelOp::Verify() { phi::errors::PreconditionNotMet( "The size %d of inputs must be equal to 1.", input_size)); if (auto vec_type = - (*this)->operand_source(0).type().dyn_cast()) { + (*this)->operand_source(0).type().dyn_cast()) { for (size_t i = 0; i < vec_type.size(); ++i) { PADDLE_ENFORCE(vec_type[i].isa() || vec_type[i].isa(), @@ -426,9 +427,9 @@ OpInfoTuple FusedGemmEpilogueOp::GetOpInfo() { false, false)}; std::vector attributes = { - paddle::dialect::OpAttributeInfo("trans_x", "ir::BoolAttribute", ""), - paddle::dialect::OpAttributeInfo("trans_y", "ir::BoolAttribute", ""), - paddle::dialect::OpAttributeInfo("activation", "ir::StrAttribute", "")}; + paddle::dialect::OpAttributeInfo("trans_x", "pir::BoolAttribute", ""), + paddle::dialect::OpAttributeInfo("trans_y", "pir::BoolAttribute", ""), + paddle::dialect::OpAttributeInfo("activation", "pir::StrAttribute", "")}; std::vector outputs = { paddle::dialect::OpOutputInfo( "out", "paddle::dialect::DenseTensorType", false, false), @@ -448,32 +449,32 @@ OpInfoTuple FusedGemmEpilogueOp::GetOpInfo() { inputs, attributes, outputs, run_time_info, "fused_gemm_epilogue"); } -void FusedGemmEpilogueOp::Build(ir::Builder &builder, - ir::OperationArgument &argument, - ir::OpResult x_, - ir::OpResult y_, - ir::OpResult bias_, - ir::AttributeMap attributes) { - bool trans_x = attributes.at("trans_x").dyn_cast().data(); +void FusedGemmEpilogueOp::Build(pir::Builder &builder, + pir::OperationArgument &argument, + pir::OpResult x_, + pir::OpResult y_, + pir::OpResult bias_, + pir::AttributeMap attributes) { + bool trans_x = attributes.at("trans_x").dyn_cast().data(); - bool trans_y = attributes.at("trans_y").dyn_cast().data(); + bool trans_y = attributes.at("trans_y").dyn_cast().data(); std::string activation = - attributes.at("activation").dyn_cast().AsString(); + attributes.at("activation").dyn_cast().AsString(); VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = {x_, y_, bias_}; + std::vector argument_inputs = {x_, y_, bias_}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; - ir::Attribute attr_trans_x = - ir::BoolAttribute::get(ir::IrContext::Instance(), trans_x); + pir::Attribute attr_trans_x = + pir::BoolAttribute::get(pir::IrContext::Instance(), trans_x); argument.AddAttribute("trans_x", attr_trans_x); - ir::Attribute attr_trans_y = - ir::BoolAttribute::get(ir::IrContext::Instance(), trans_y); + pir::Attribute attr_trans_y = + pir::BoolAttribute::get(pir::IrContext::Instance(), trans_y); argument.AddAttribute("trans_y", attr_trans_y); - ir::Attribute attr_activation = - ir::StrAttribute::get(ir::IrContext::Instance(), activation); + pir::Attribute attr_activation = + pir::StrAttribute::get(pir::IrContext::Instance(), activation); argument.AddAttribute("activation", attr_activation); VLOG(4) << "Builder construction outputs"; @@ -540,9 +541,9 @@ void FusedGemmEpilogueOp::Build(ir::Builder &builder, &meta_out, activation == "none" ? nullptr : &meta_reserve_space); - std::vector argument_outputs; - ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + std::vector argument_outputs; + pir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_out.dtype()), dense_out.dims(), dense_out.layout(), @@ -550,11 +551,11 @@ void FusedGemmEpilogueOp::Build(ir::Builder &builder, dense_out.offset()); argument_outputs.push_back(out_dense_tensor_type); - ir::Type reserve_space_dense_tensor_type = + pir::Type reserve_space_dense_tensor_type = activation == "none" - ? ir::Type() + ? pir::Type() : paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_reserve_space.dtype()), dense_reserve_space.dims(), dense_reserve_space.layout(), @@ -599,15 +600,15 @@ void FusedGemmEpilogueOp::Verify() { { auto &attributes = this->attributes(); PADDLE_ENFORCE(attributes.count("trans_x") > 0 && - attributes.at("trans_x").isa(), + attributes.at("trans_x").isa(), phi::errors::PreconditionNotMet( "Type of attribute: trans_x is not right.")); PADDLE_ENFORCE(attributes.count("trans_y") > 0 && - attributes.at("trans_y").isa(), + attributes.at("trans_y").isa(), phi::errors::PreconditionNotMet( "Type of attribute: trans_y is not right.")); PADDLE_ENFORCE(attributes.count("activation") > 0 && - attributes.at("activation").isa(), + attributes.at("activation").isa(), phi::errors::PreconditionNotMet( "Type of attribute: activation is not right.")); } @@ -659,10 +660,10 @@ OpInfoTuple FusedGemmEpilogueGradOp::GetOpInfo() { false, false)}; std::vector attributes = { - paddle::dialect::OpAttributeInfo("trans_x", "ir::BoolAttribute", ""), - paddle::dialect::OpAttributeInfo("trans_y", "ir::BoolAttribute", ""), + paddle::dialect::OpAttributeInfo("trans_x", "pir::BoolAttribute", ""), + paddle::dialect::OpAttributeInfo("trans_y", "pir::BoolAttribute", ""), paddle::dialect::OpAttributeInfo( - "activation_grad", "ir::StrAttribute", "")}; + "activation_grad", "pir::StrAttribute", "")}; std::vector outputs = { paddle::dialect::OpOutputInfo( "x_grad", "paddle::dialect::DenseTensorType", false, false), @@ -689,34 +690,34 @@ OpInfoTuple FusedGemmEpilogueGradOp::GetOpInfo() { inputs, attributes, outputs, run_time_info, "fused_gemm_epilogue_grad"); } -void FusedGemmEpilogueGradOp::Build(ir::Builder &builder, - ir::OperationArgument &argument, - ir::OpResult x_, - ir::OpResult y_, - ir::OpResult reserve_space_, - ir::OpResult out_grad_, - ir::AttributeMap attributes) { - bool trans_x = attributes.at("trans_x").dyn_cast().data(); +void FusedGemmEpilogueGradOp::Build(pir::Builder &builder, + pir::OperationArgument &argument, + pir::OpResult x_, + pir::OpResult y_, + pir::OpResult reserve_space_, + pir::OpResult out_grad_, + pir::AttributeMap attributes) { + bool trans_x = attributes.at("trans_x").dyn_cast().data(); - bool trans_y = attributes.at("trans_y").dyn_cast().data(); + bool trans_y = attributes.at("trans_y").dyn_cast().data(); std::string activation_grad = - attributes.at("activation_grad").dyn_cast().AsString(); + attributes.at("activation_grad").dyn_cast().AsString(); VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = { + std::vector argument_inputs = { x_, y_, reserve_space_, out_grad_}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; - ir::Attribute attr_trans_x = - ir::BoolAttribute::get(ir::IrContext::Instance(), trans_x); + pir::Attribute attr_trans_x = + pir::BoolAttribute::get(pir::IrContext::Instance(), trans_x); argument.AddAttribute("trans_x", attr_trans_x); - ir::Attribute attr_trans_y = - ir::BoolAttribute::get(ir::IrContext::Instance(), trans_y); + pir::Attribute attr_trans_y = + pir::BoolAttribute::get(pir::IrContext::Instance(), trans_y); argument.AddAttribute("trans_y", attr_trans_y); - ir::Attribute attr_activation_grad = - ir::StrAttribute::get(ir::IrContext::Instance(), activation_grad); + pir::Attribute attr_activation_grad = + pir::StrAttribute::get(pir::IrContext::Instance(), activation_grad); argument.AddAttribute("activation_grad", attr_activation_grad); VLOG(4) << "Builder construction outputs"; @@ -809,9 +810,9 @@ void FusedGemmEpilogueGradOp::Build(ir::Builder &builder, &meta_y_grad, &meta_bias_grad); - std::vector argument_outputs; - ir::Type x_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + std::vector argument_outputs; + pir::Type x_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_x_grad.dtype()), dense_x_grad.dims(), dense_x_grad.layout(), @@ -819,8 +820,8 @@ void FusedGemmEpilogueGradOp::Build(ir::Builder &builder, dense_x_grad.offset()); argument_outputs.push_back(x_grad_dense_tensor_type); - ir::Type y_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + pir::Type y_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_y_grad.dtype()), dense_y_grad.dims(), dense_y_grad.layout(), @@ -828,8 +829,8 @@ void FusedGemmEpilogueGradOp::Build(ir::Builder &builder, dense_y_grad.offset()); argument_outputs.push_back(y_grad_dense_tensor_type); - ir::Type bias_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + pir::Type bias_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_bias_grad.dtype()), dense_bias_grad.dims(), dense_bias_grad.layout(), @@ -851,7 +852,7 @@ const char *SplitGradOp::attributes_name[1] = {"axis"}; OpInfoTuple SplitGradOp::GetOpInfo() { std::vector inputs = { OpInputInfo("out_grad", - "ir::VectorType", + "pir::VectorType", false, false, false, @@ -879,23 +880,23 @@ OpInfoTuple SplitGradOp::GetOpInfo() { inputs, attributes, outputs, run_time_info, "split_grad"); } -void SplitGradOp::Build(ir::Builder &builder, - ir::OperationArgument &argument, - ir::OpResult out_grad_, +void SplitGradOp::Build(pir::Builder &builder, + pir::OperationArgument &argument, + pir::OpResult out_grad_, float axis) { // Generate scalar mutable attribute: axis paddle::dialect::FullOp full_axis_op = builder.Build( std::vector{1}, axis, phi::DataType::FLOAT32, phi::CPUPlace()); - ir::OpResult axis_ = full_axis_op->result(0); + pir::OpResult axis_ = full_axis_op->result(0); VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = {out_grad_, axis_}; + std::vector argument_inputs = {out_grad_, axis_}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; VLOG(4) << "Builder construction outputs"; - ir::VectorType out_grad = out_grad_.type().dyn_cast(); + pir::VectorType out_grad = out_grad_.type().dyn_cast(); std::vector vec_dense_out_grad; for (size_t i = 0; i < static_cast(out_grad.size()); i++) { vec_dense_out_grad.push_back(phi::DenseTensor( @@ -930,9 +931,9 @@ void SplitGradOp::Build(ir::Builder &builder, phi::ConcatInferMeta(meta_out_grad, axis, &meta_x_grad); - std::vector argument_outputs; - ir::Type x_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + std::vector argument_outputs; + pir::Type x_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_x_grad.dtype()), dense_x_grad.dims(), dense_x_grad.layout(), @@ -942,18 +943,18 @@ void SplitGradOp::Build(ir::Builder &builder, argument.AddOutputs(argument_outputs.begin(), argument_outputs.end()); } -void SplitGradOp::Build(ir::Builder &builder, - ir::OperationArgument &argument, - ir::OpResult out_grad_, - ir::OpResult axis_) { +void SplitGradOp::Build(pir::Builder &builder, + pir::OperationArgument &argument, + pir::OpResult out_grad_, + pir::OpResult axis_) { VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = {out_grad_, axis_}; + std::vector argument_inputs = {out_grad_, axis_}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; VLOG(4) << "Builder construction outputs"; - ir::VectorType out_grad = out_grad_.type().dyn_cast(); + pir::VectorType out_grad = out_grad_.type().dyn_cast(); int axis = axis_.owner() ->dyn_cast() .attributes() @@ -995,9 +996,9 @@ void SplitGradOp::Build(ir::Builder &builder, phi::ConcatInferMeta(meta_out_grad, axis, &meta_x_grad); - std::vector argument_outputs; - ir::Type x_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + std::vector argument_outputs; + pir::Type x_grad_dense_tensor_type = paddle::dialect::DenseTensorType::get( + pir::IrContext::Instance(), TransToIrDataType(dense_x_grad.dtype()), dense_x_grad.dims(), dense_x_grad.layout(), @@ -1018,7 +1019,7 @@ void SplitGradOp::Verify() { phi::errors::PreconditionNotMet( "The size %d of inputs must be equal to 2.", input_size)); if (auto vec_type = - (*this)->operand_source(0).type().dyn_cast()) { + (*this)->operand_source(0).type().dyn_cast()) { for (size_t i = 0; i < vec_type.size(); ++i) { PADDLE_ENFORCE(vec_type[i].isa(), phi::errors::PreconditionNotMet( @@ -1064,29 +1065,29 @@ void SplitGradOp::InferMeta(phi::InferMetaContext *infer_meta) { fn(infer_meta); } -void IfOp::Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult cond, - std::vector &&output_types) { +void IfOp::Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult cond, + std::vector &&output_types) { argument.num_regions = 2; argument.AddOperand(cond); argument.output_types.swap(output_types); } -ir::Block *IfOp::true_block() { - ir::Region &true_region = (*this)->region(0); +pir::Block *IfOp::true_block() { + pir::Region &true_region = (*this)->region(0); if (true_region.empty()) true_region.emplace_back(); return true_region.front(); } -ir::Block *IfOp::false_block() { - ir::Region &false_region = (*this)->region(1); +pir::Block *IfOp::false_block() { + pir::Region &false_region = (*this)->region(1); if (false_region.empty()) false_region.emplace_back(); return false_region.front(); } -void IfOp::Print(ir::IrPrinter &printer) { +void IfOp::Print(pir::IrPrinter &printer) { auto &os = printer.os; auto op = operation(); printer.PrintOpResult(op); - os << " = pd.if"; + os << " = pd_op.if"; printer.PrintOpOperands(op); os << " -> "; printer.PrintOpReturnType(op); diff --git a/paddle/fluid/pir/dialect/operator/ir/manual_op.h b/paddle/fluid/pir/dialect/operator/ir/manual_op.h new file mode 100644 index 0000000000000..8cd8b9021858f --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/ir/manual_op.h @@ -0,0 +1,204 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifdef GET_MANUAL_OP_LIST +#undef GET_MANUAL_OP_LIST +paddle::dialect::AddNOp, paddle::dialect::SplitGradOp, paddle::dialect::IfOp + +#else + +#pragma once +#include + +#include "paddle/fluid/framework/infershape_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/trait/inplace.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/ir_printer.h" +#include "paddle/pir/core/op_base.h" +#include "paddle/pir/core/operation_utils.h" + +namespace paddle { +namespace dialect { + +class AddNOp : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.add_n"; } + static constexpr const char **attributes_name = nullptr; + static constexpr uint32_t attributes_num = 0; + static OpInfoTuple GetOpInfo(); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult inputs); + + void Verify(); + pir::Value inputs() { return operand_source(0); } + pir::OpResult out() { return result(0); } + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + +class AddN_Op : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.add_n_"; } + static constexpr const char **attributes_name = nullptr; + static constexpr uint32_t attributes_num = 0; + static OpInfoTuple GetOpInfo(); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult inputs_); + + void Verify(); + pir::Value inputs() { return operand_source(0); } + pir::OpResult out() { return result(0); } + + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + +class AddNWithKernelOp : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.add_n_with_kernel"; } + static constexpr const char **attributes_name = nullptr; + static constexpr uint32_t attributes_num = 0; + static OpInfoTuple GetOpInfo(); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult inputs_); + + void Verify(); + pir::Value inputs() { return operand_source(0); } + pir::OpResult out() { return result(0); } + + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + +class FusedGemmEpilogueOp + : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.fused_gemm_epilogue"; } + static const char *attributes_name[3]; + static constexpr uint32_t attributes_num = 3; + static OpInfoTuple GetOpInfo(); + + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult x_, + pir::OpResult y_, + pir::OpResult bias_, + pir::AttributeMap attributes); + void Verify(); + pir::Value x() { return operand_source(0); } + pir::Value y() { return operand_source(1); } + pir::Value bias() { return operand_source(2); } + pir::OpResult out() { return result(0); } + pir::OpResult reserve_space() { return result(1); } + + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + +class FusedGemmEpilogueGradOp + : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.fused_gemm_epilogue_grad"; } + static const char *attributes_name[3]; + static constexpr uint32_t attributes_num = 3; + static OpInfoTuple GetOpInfo(); + + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult x_, + pir::OpResult y_, + pir::OpResult reserve_space_, + pir::OpResult out_grad_, + pir::AttributeMap attributes); + void Verify(); + pir::Value x() { return operand_source(0); } + pir::Value y() { return operand_source(1); } + pir::Value reserve_space() { return operand_source(2); } + pir::Value out_grad() { return operand_source(3); } + pir::OpResult x_grad() { return result(0); } + pir::OpResult y_grad() { return result(1); } + pir::OpResult bias_grad() { return result(2); } + + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + +class SplitGradOp : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.split_grad"; } + static const char *attributes_name[1]; + static constexpr uint32_t attributes_num = 1; + static OpInfoTuple GetOpInfo(); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult x_, + float axis = 0); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult out_grad_, + pir::OpResult axis_); + + void Verify(); + pir::Value out_grad() { return operand_source(0); } + pir::Value axis() { return operand_source(1); } + pir::OpResult x_grad() { return result(0); } + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + +class IfOp : public pir::Op { + public: + using Op::Op; + static const char *name() { return "pd_op.if"; } + static constexpr const char **attributes_name = nullptr; + static constexpr uint32_t attributes_num = 0; + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult cond, + std::vector &&output_types); + pir::Value cond() { return operand_source(0); } + pir::Block *true_block(); + pir::Block *false_block(); + void Print(pir::IrPrinter &printer); // NOLINT + void Verify(); +}; + +} // namespace dialect +} // namespace paddle + +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::FusedGemmEpilogueOp) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::FusedGemmEpilogueGradOp) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::IfOp) +#endif diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op_vjp.cc b/paddle/fluid/pir/dialect/operator/ir/manual_op_vjp.cc similarity index 75% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op_vjp.cc rename to paddle/fluid/pir/dialect/operator/ir/manual_op_vjp.cc index a1c5adf089e22..118982b0bfd57 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op_vjp.cc +++ b/paddle/fluid/pir/dialect/operator/ir/manual_op_vjp.cc @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" #include "paddle/fluid/primitive/rule/vjp/vjp.h" #include "paddle/fluid/primitive/type/lazy_tensor.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/op_base.h" #include "paddle/phi/common/int_array.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/op_base.h" // TODO(wanghao107) // this file will be generated in pd_op.cc @@ -27,9 +27,9 @@ namespace paddle { namespace dialect { using IntArray = paddle::experimental::IntArray; -std::vector> SumOp::Vjp( - ir::Operation* op, - const std::vector>& out_grads, +std::vector> SumOp::Vjp( + pir::Operation* op, + const std::vector>& out_grads, const std::vector>& stop_gradients) { SumOp op_obj = op->dyn_cast(); Tensor x(std::make_shared(op_obj.x())); @@ -37,16 +37,16 @@ std::vector> SumOp::Vjp( Tensor axis(std::make_shared(op_obj.axis())); - bool keepdim = op->attribute("keepdim").dyn_cast().data(); + bool keepdim = op->attribute("keepdim").dyn_cast().data(); bool reduce_all = false; std::vector> tensor_res = primitive::sum_vjp( x, out_grad, axis, keepdim, reduce_all, stop_gradients); - std::vector> res(2, std::vector(1)); + std::vector> res(2, std::vector(1)); if (tensor_res[0][0].defined()) { res[0][0] = std::static_pointer_cast(tensor_res[0][0].impl()) ->value() - .dyn_cast(); + .dyn_cast(); } return res; } diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.cc b/paddle/fluid/pir/dialect/operator/ir/meta_tensor.cc similarity index 95% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.cc rename to paddle/fluid/pir/dialect/operator/ir/meta_tensor.cc index 2da7b098a6556..1985413ecb95d 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.cc +++ b/paddle/fluid/pir/dialect/operator/ir/meta_tensor.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h" +#include "paddle/fluid/pir/dialect/operator/ir/meta_tensor.h" -#include "paddle/ir/core/enforce.h" +#include "paddle/pir/core/enforce.h" namespace paddle { namespace dialect { diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h b/paddle/fluid/pir/dialect/operator/ir/meta_tensor.h similarity index 100% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h rename to paddle/fluid/pir/dialect/operator/ir/meta_tensor.h diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.cc b/paddle/fluid/pir/dialect/operator/ir/op_attribute.cc similarity index 85% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.cc rename to paddle/fluid/pir/dialect/operator/ir/op_attribute.cc index 72cc98447e10e..3b69d68eb65f3 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.cc +++ b/paddle/fluid/pir/dialect/operator/ir/op_attribute.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" namespace paddle { namespace dialect { @@ -29,18 +29,18 @@ phi::DataLayout DataLayoutAttribute::data() const { } phi::Scalar ScalarAttribute::data() { - if (isa()) { - return phi::Scalar(dyn_cast().data()); - } else if (isa()) { - return phi::Scalar(dyn_cast().data()); - } else if (isa()) { - return phi::Scalar(dyn_cast().data()); - } else if (isa()) { - return phi::Scalar(dyn_cast().data()); - } else if (isa()) { - return phi::Scalar(dyn_cast().data()); - } else if (isa()) { - return phi::Scalar(dyn_cast().AsString()); + if (isa()) { + return phi::Scalar(dyn_cast().data()); + } else if (isa()) { + return phi::Scalar(dyn_cast().data()); + } else if (isa()) { + return phi::Scalar(dyn_cast().data()); + } else if (isa()) { + return phi::Scalar(dyn_cast().data()); + } else if (isa()) { + return phi::Scalar(dyn_cast().data()); + } else if (isa()) { + return phi::Scalar(dyn_cast().AsString()); } else { PADDLE_THROW(phi::errors::Unimplemented( "Unsupported ir attribute when casting it into " @@ -48,7 +48,7 @@ phi::Scalar ScalarAttribute::data() { } } -IntArrayAttribute IntArrayAttribute::Parse(ir::IrParser &parser) { // NOLINT +IntArrayAttribute IntArrayAttribute::Parse(pir::IrParser &parser) { // NOLINT Token buket_token = parser.ConsumeToken(); std::vector vec{}; while (parser.PeekToken().val_ != "]") { @@ -66,7 +66,7 @@ IntArrayAttribute IntArrayAttribute::Parse(ir::IrParser &parser) { // NOLINT // |int32|uint64|int64|float32|complex64 // |complex128|Undefined|psting|flaot16 // |bfloat16|num_data_types|all_dtype -DataTypeAttribute DataTypeAttribute::Parse(ir::IrParser &parser) { // NOLINT +DataTypeAttribute DataTypeAttribute::Parse(pir::IrParser &parser) { // NOLINT std::unordered_map StringToDataType{ {"bool", phi::DataType::BOOL}, {"uint8", phi::DataType::UINT8}, @@ -96,7 +96,7 @@ DataTypeAttribute DataTypeAttribute::Parse(ir::IrParser &parser) { // NOLINT // Parse a PlaceAttribute // PlaceAttribute := Place(cpu)|Place(gpu:0)|Place(gpu_pinned) // |Place(xpu:0)|Place(ipu:0)|Place(:0)|undefined -PlaceAttribute PlaceAttribute::Parse(ir::IrParser &parser) { // NOLINT +PlaceAttribute PlaceAttribute::Parse(pir::IrParser &parser) { // NOLINT std::unordered_map StringToPlace{ {"cpu", phi::CPUPlace{}}, {"gpu", phi::GPUPlace{}}, @@ -126,7 +126,7 @@ PlaceAttribute PlaceAttribute::Parse(ir::IrParser &parser) { // NOLINT // |SPARSE_COO|SPARSE_CSR|NDHWC // |NCDHW|PSTRING_UNION|STRIDED DataLayoutAttribute DataLayoutAttribute::Parse( - ir::IrParser &parser) { // NOLINT + pir::IrParser &parser) { // NOLINT std::unordered_map StringToDataLayout{ {"NHWC", phi::DataLayout::kNHWC}, {"NCHW", phi::DataLayout::kNCHW}, diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h b/paddle/fluid/pir/dialect/operator/ir/op_attribute.h similarity index 65% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h rename to paddle/fluid/pir/dialect/operator/ir/op_attribute.h index e1d3daab7191d..78f9ca10d3254 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h +++ b/paddle/fluid/pir/dialect/operator/ir/op_attribute.h @@ -14,17 +14,17 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute_storage.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/ir_parser.h" +#include "paddle/fluid/pir/dialect/operator/ir/attribute_storage.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/ir_parser.h" namespace paddle { namespace dialect { -class IntArrayAttribute : public ir::Attribute { +class IntArrayAttribute : public pir::Attribute { public: using Attribute::Attribute; @@ -35,32 +35,32 @@ class IntArrayAttribute : public ir::Attribute { return storage() < right.storage(); } - static IntArrayAttribute Parse(ir::IrParser &parser); // NOLINT + static IntArrayAttribute Parse(pir::IrParser &parser); // NOLINT const phi::IntArray &data() const; }; -class ScalarAttribute : public ir::Attribute { +class ScalarAttribute : public pir::Attribute { public: using Attribute::Attribute; - static bool classof(ir::Attribute val) { - return (val.type_id() == ir::BoolAttribute::type_id()) || - (val.type_id() == ir::FloatAttribute::type_id()) || - (val.type_id() == ir::DoubleAttribute::type_id()) || - (val.type_id() == ir::Int32Attribute::type_id()) || - (val.type_id() == ir::Int64Attribute::type_id()) || - (val.type_id() == ir::StrAttribute::type_id()); + static bool classof(pir::Attribute val) { + return (val.type_id() == pir::BoolAttribute::type_id()) || + (val.type_id() == pir::FloatAttribute::type_id()) || + (val.type_id() == pir::DoubleAttribute::type_id()) || + (val.type_id() == pir::Int32Attribute::type_id()) || + (val.type_id() == pir::Int64Attribute::type_id()) || + (val.type_id() == pir::StrAttribute::type_id()); } - static ir::Attribute get(ir::IrContext *ctx, phi::Scalar scalar) { + static pir::Attribute get(pir::IrContext *ctx, phi::Scalar scalar) { return TransToIrAttribute(scalar, ctx); } phi::Scalar data(); }; -class DataTypeAttribute : public ir::Attribute { +class DataTypeAttribute : public pir::Attribute { public: using Attribute::Attribute; @@ -71,12 +71,12 @@ class DataTypeAttribute : public ir::Attribute { return storage() < right.storage(); } - static DataTypeAttribute Parse(ir::IrParser &parser); // NOLINT + static DataTypeAttribute Parse(pir::IrParser &parser); // NOLINT phi::DataType data() const; }; -class PlaceAttribute : public ir::Attribute { +class PlaceAttribute : public pir::Attribute { public: using Attribute::Attribute; @@ -86,12 +86,12 @@ class PlaceAttribute : public ir::Attribute { return storage() < right.storage(); } - static PlaceAttribute Parse(ir::IrParser &parser); // NOLINT + static PlaceAttribute Parse(pir::IrParser &parser); // NOLINT phi::Place data() const; }; -class DataLayoutAttribute : public ir::Attribute { +class DataLayoutAttribute : public pir::Attribute { public: using Attribute::Attribute; @@ -102,7 +102,7 @@ class DataLayoutAttribute : public ir::Attribute { return storage() < right.storage(); } - static DataLayoutAttribute Parse(ir::IrParser &parser); // NOLINT + static DataLayoutAttribute Parse(pir::IrParser &parser); // NOLINT phi::DataLayout data() const; }; diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc b/paddle/fluid/pir/dialect/operator/ir/op_dialect.cc similarity index 75% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc rename to paddle/fluid/pir/dialect/operator/ir/op_dialect.cc index 82169dafc5969..2c85ea18d3da3 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc +++ b/paddle/fluid/pir/dialect/operator/ir/op_dialect.cc @@ -12,26 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in -// paddle/fluid/ir/dialect/CMakeLists.txt. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type_storage.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.h" -#include "paddle/ir/core/ir_printer.h" -#include "paddle/ir/core/utils.h" +// paddle/fluid/pir/dialect/CMakeLists.txt. +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/type_storage.h" +#include "paddle/fluid/pir/dialect/operator/transforms/param_to_variable.h" +#include "paddle/pir/core/ir_printer.h" +#include "paddle/pir/core/utils.h" namespace paddle { namespace dialect { -PaddleDialect::PaddleDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { +OperatorDialect::OperatorDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } -void PaddleDialect::initialize() { +void OperatorDialect::initialize() { RegisterTypes(); RegisterTypes(); @@ -42,12 +42,12 @@ void PaddleDialect::initialize() { // NOTE(zhangbo9674): GET_OP_LIST is defined in pd_op.h which is // generated by op_gen.py, see details in - // paddle/fluid/ir/dialect/CMakeLists.txt. - // NOTE(Ruting)GET_MANUAL_OP_LIST is define in pd_manual_op.h" + // paddle/fluid/pir/dialect/CMakeLists.txt. + // NOTE(Ruting)GET_MANUAL_OP_LIST is define in manual_op.h" // use RegisterOps when list has more than two ops. RegisterOps< #define GET_OP_LIST -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" // NOLINT +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" // NOLINT >(); RegisterOps(); } -void PaddleDialect::PrintType(ir::Type type, std::ostream &os) const { +void OperatorDialect::PrintType(pir::Type type, std::ostream &os) const { os << type.dialect().name(); os << '.'; if (auto tensor_type = type.dyn_cast()) { @@ -82,7 +82,8 @@ void PaddleDialect::PrintType(ir::Type type, std::ostream &os) const { } } -void PaddleDialect::PrintAttribute(ir::Attribute attr, std::ostream &os) const { +void OperatorDialect::PrintAttribute(pir::Attribute attr, + std::ostream &os) const { os << "(" << attr.dialect().name(); os << '.'; if (auto int_array_attr = attr.dyn_cast()) { @@ -90,7 +91,7 @@ void PaddleDialect::PrintAttribute(ir::Attribute attr, std::ostream &os) const { os << "IntArray)" << "["; const auto &inner_data = data.GetData(); - ir::PrintInterleave( + pir::PrintInterleave( inner_data.begin(), inner_data.end(), [&os](int64_t i) { os << i; }, @@ -107,8 +108,8 @@ void PaddleDialect::PrintAttribute(ir::Attribute attr, std::ostream &os) const { } } -ir::Type PaddleDialect::ParseType(ir::IrParser &parser) { // NOLINT - parser.ConsumeAToken("pd.tensor"); +pir::Type OperatorDialect::ParseType(pir::IrParser &parser) { // NOLINT + parser.ConsumeAToken("pd_op.tensor"); parser.ConsumeAToken("<"); std::vector dim{}; Token dim_token = parser.PeekToken(); @@ -126,7 +127,7 @@ ir::Type PaddleDialect::ParseType(ir::IrParser &parser) { // NOLINT } } phi::DDim ddim = phi::make_ddim(dim); - ir::Type dtype = parser.ParseType(); + pir::Type dtype = parser.ParseType(); std::vector> lod; std::vector lodv; lodv.push_back(0); @@ -136,7 +137,8 @@ ir::Type PaddleDialect::ParseType(ir::IrParser &parser) { // NOLINT parser.ctx, dtype, ddim, phi::DataLayout::UNDEFINED, lod, 0); } -ir::Attribute PaddleDialect::ParseAttribute(ir::IrParser &parser) { // NOLINT +pir::Attribute OperatorDialect::ParseAttribute( + pir::IrParser &parser) { // NOLINT std::string type_name = parser.ConsumeToken().val_; std::string attribute_name = type_name.substr(type_name.find('.') + 1, std::string::npos); @@ -155,8 +157,8 @@ ir::Attribute PaddleDialect::ParseAttribute(ir::IrParser &parser) { // NOLINT } } -void PaddleDialect::PrintOperation(ir::Operation *op, - ir::IrPrinter &printer) const { +void OperatorDialect::PrintOperation(pir::Operation *op, + pir::IrPrinter &printer) const { if (auto if_op = op->dyn_cast()) { if_op.Print(printer); } else { @@ -167,4 +169,4 @@ void PaddleDialect::PrintOperation(ir::Operation *op, } // namespace dialect } // namespace paddle -IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleDialect) +IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::OperatorDialect) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h b/paddle/fluid/pir/dialect/operator/ir/op_dialect.h similarity index 54% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h rename to paddle/fluid/pir/dialect/operator/ir/op_dialect.h index 285a796982f85..bc85b789a058b 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h +++ b/paddle/fluid/pir/dialect/operator/ir/op_dialect.h @@ -14,25 +14,25 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" namespace paddle { namespace dialect { -class PaddleDialect : public ir::Dialect { +class OperatorDialect : public pir::Dialect { public: - explicit PaddleDialect(ir::IrContext* context); + explicit OperatorDialect(pir::IrContext* context); - static const char* name() { return "pd"; } + static const char* name() { return "pd_op"; } - ir::Type ParseType(ir::IrParser& parser) override; // NOLINT - ir::Attribute ParseAttribute(ir::IrParser& parser) override; // NOLINT + pir::Type ParseType(pir::IrParser& parser) override; // NOLINT + pir::Attribute ParseAttribute(pir::IrParser& parser) override; // NOLINT - void PrintType(ir::Type type, std::ostream& os) const override; - void PrintAttribute(ir::Attribute type, std::ostream& os) const override; + void PrintType(pir::Type type, std::ostream& os) const override; + void PrintAttribute(pir::Attribute type, std::ostream& os) const override; - void PrintOperation(ir::Operation* op, - ir::IrPrinter& printer) const override; // NOLINT + void PrintOperation(pir::Operation* op, + pir::IrPrinter& printer) const override; // NOLINT private: void initialize(); @@ -41,4 +41,4 @@ class PaddleDialect : public ir::Dialect { } // namespace dialect } // namespace paddle -IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::PaddleDialect) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::OperatorDialect) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.cc b/paddle/fluid/pir/dialect/operator/ir/op_type.cc similarity index 88% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.cc rename to paddle/fluid/pir/dialect/operator/ir/op_type.cc index 31ba23b0e1bbc..c9fc8bcd65b10 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.cc +++ b/paddle/fluid/pir/dialect/operator/ir/op_type.cc @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" namespace paddle { namespace dialect { -const ir::Type& SelectedRowsType::dtype() const { return storage()->dtype_; } +const pir::Type& SelectedRowsType::dtype() const { return storage()->dtype_; } const phi::DDim& SelectedRowsType::dims() const { return storage()->dims_; } diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h b/paddle/fluid/pir/dialect/operator/ir/op_type.h similarity index 79% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h rename to paddle/fluid/pir/dialect/operator/ir/op_type.h index 9525e1a88b346..a09a84c31d84a 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h +++ b/paddle/fluid/pir/dialect/operator/ir/op_type.h @@ -14,20 +14,20 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type_storage.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/type.h" +#include "paddle/fluid/pir/dialect/operator/ir/type_storage.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/type.h" namespace paddle { namespace dialect { -using DenseTensorType = ir::DenseTensorType; -class SelectedRowsType : public ir::Type { +using DenseTensorType = pir::DenseTensorType; +class SelectedRowsType : public pir::Type { public: using Type::Type; DECLARE_TYPE_UTILITY_FUNCTOR(SelectedRowsType, SelectedRowsTypeStorage); - const ir::Type &dtype() const; + const pir::Type &dtype() const; const phi::DDim &dims() const; diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml similarity index 100% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops.yaml rename to paddle/fluid/pir/dialect/operator/ir/ops.yaml diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops_backward.yaml b/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml similarity index 100% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_ops_backward.yaml rename to paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type_storage.h b/paddle/fluid/pir/dialect/operator/ir/type_storage.h similarity index 78% rename from paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type_storage.h rename to paddle/fluid/pir/dialect/operator/ir/type_storage.h index 1a74b6d6c1059..e001f7b78716b 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type_storage.h +++ b/paddle/fluid/pir/dialect/operator/ir/type_storage.h @@ -16,17 +16,17 @@ #include -#include "paddle/ir/core/builtin_type_storage.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/type_base.h" -#include "paddle/ir/core/utils.h" #include "paddle/phi/core/tensor_meta.h" +#include "paddle/pir/core/builtin_type_storage.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/type_base.h" +#include "paddle/pir/core/utils.h" namespace paddle { namespace dialect { -using DenseTensorTypeStorage = ir::DenseTensorTypeStorage; +using DenseTensorTypeStorage = pir::DenseTensorTypeStorage; -struct SelectedRowsTypeStorage : public ir::TypeStorage { +struct SelectedRowsTypeStorage : public pir::TypeStorage { using DataLayout = phi::DataLayout; using Dim = phi::DDim; using LoD = std::vector>; @@ -34,9 +34,9 @@ struct SelectedRowsTypeStorage : public ir::TypeStorage { /// \brief Declare ParamKey according to parameter type. /// using ParamKey = - std::tuple; + std::tuple; - SelectedRowsTypeStorage(const ir::Type& dtype, + SelectedRowsTypeStorage(const pir::Type& dtype, const phi::DDim& dims, const phi::DataLayout& layout, const phi::LoD& lod, @@ -66,22 +66,22 @@ struct SelectedRowsTypeStorage : public ir::TypeStorage { std::size_t hash_value = 317; // hash dtype hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<0>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<0>(key))); // hash dims hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<1>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<1>(key))); // hash layout - hash_value = ir::hash_combine( + hash_value = pir::hash_combine( hash_value, std::hash::type>()( static_cast::type>( std::get<2>(key)))); // hash lod hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<3>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<3>(key))); // hash offset hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<4>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<4>(key))); return hash_value; } @@ -100,7 +100,7 @@ struct SelectedRowsTypeStorage : public ir::TypeStorage { /// \brief DenseTensorTypeStorage include five parameters: dims, dtype, /// layout, lod, offset. /// - ir::Type dtype_; + pir::Type dtype_; phi::DDim dims_; phi::DataLayout layout_; phi::LoD lod_; diff --git a/paddle/fluid/ir/dialect/paddle_dialect/trait/CMakeLists.txt b/paddle/fluid/pir/dialect/operator/trait/CMakeLists.txt similarity index 83% rename from paddle/fluid/ir/dialect/paddle_dialect/trait/CMakeLists.txt rename to paddle/fluid/pir/dialect/operator/trait/CMakeLists.txt index 53c3060d6f182..0689edb35655e 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/trait/CMakeLists.txt +++ b/paddle/fluid/pir/dialect/operator/trait/CMakeLists.txt @@ -3,4 +3,4 @@ file(GLOB PD_INTERFACE_SRCS "*.cc") cc_library( pd_trait SRCS ${PD_INTERFACE_SRCS} - DEPS ir_core) + DEPS pir_core) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h b/paddle/fluid/pir/dialect/operator/trait/inplace.h similarity index 80% rename from paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h rename to paddle/fluid/pir/dialect/operator/trait/inplace.h index 38dfaaeac000e..e50f1e3a8349d 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h +++ b/paddle/fluid/pir/dialect/operator/trait/inplace.h @@ -14,14 +14,14 @@ #pragma once -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/op_base.h" namespace paddle { namespace dialect { -class InplaceTrait : public ir::OpTraitBase { +class InplaceTrait : public pir::OpTraitBase { public: - explicit InplaceTrait(ir::Operation *op) - : ir::OpTraitBase(op) {} + explicit InplaceTrait(pir::Operation *op) + : pir::OpTraitBase(op) {} }; } // namespace dialect diff --git a/paddle/fluid/ir/dialect/paddle_dialect/trait/trait.cc b/paddle/fluid/pir/dialect/operator/trait/trait.cc similarity index 91% rename from paddle/fluid/ir/dialect/paddle_dialect/trait/trait.cc rename to paddle/fluid/pir/dialect/operator/trait/trait.cc index c086b98e34bc7..d88ba207edf17 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/trait/trait.cc +++ b/paddle/fluid/pir/dialect/operator/trait/trait.cc @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h" +#include "paddle/fluid/pir/dialect/operator/trait/inplace.h" IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::InplaceTrait) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/transforms/CMakeLists.txt b/paddle/fluid/pir/dialect/operator/transforms/CMakeLists.txt similarity index 68% rename from paddle/fluid/ir/dialect/paddle_dialect/transforms/CMakeLists.txt rename to paddle/fluid/pir/dialect/operator/transforms/CMakeLists.txt index 8d90edd3feb74..7116a12be50ef 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/transforms/CMakeLists.txt +++ b/paddle/fluid/pir/dialect/operator/transforms/CMakeLists.txt @@ -1,4 +1,4 @@ cc_library( param_to_variable SRCS param_to_variable.cc - DEPS pd_dialect_core) + DEPS pd_op_dialect_core) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.cc b/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.cc similarity index 73% rename from paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.cc rename to paddle/fluid/pir/dialect/operator/transforms/param_to_variable.cc index 0113e38b8fd5e..1d93e27c59b0b 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.cc +++ b/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.cc @@ -12,19 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.h" +#include "paddle/fluid/pir/dialect/operator/transforms/param_to_variable.h" #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/dense_tensor.h" namespace paddle { namespace dialect { std::shared_ptr -ParameterConvertInterface::ParameterToVariable(ir::Parameter *parameter) { +ParameterConvertInterface::ParameterToVariable(pir::Parameter *parameter) { if (parameter->type().isa()) { VLOG(4) << "Convert a DenseTensor Parameter to a variable."; std::shared_ptr var = @@ -56,21 +56,21 @@ ParameterConvertInterface::ParameterToVariable(ir::Parameter *parameter) { } } -std::unique_ptr ParameterConvertInterface::VariableToParameter( +std::unique_ptr ParameterConvertInterface::VariableToParameter( paddle::framework::Variable *var) { if (var->IsType()) { phi::DenseTensor *tensor = var->GetMutable(); // Get Meta - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Type data_type = TransToIrDataType(tensor->dtype(), ctx); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Type data_type = TransToIrDataType(tensor->dtype(), ctx); void *data = tensor->data(); - ir::Type dense_tensor_type = DenseTensorType::get(ctx, - data_type, - tensor->dims(), - tensor->layout(), - tensor->lod(), - tensor->meta().offset); - return std::make_unique( + pir::Type dense_tensor_type = DenseTensorType::get(ctx, + data_type, + tensor->dims(), + tensor->layout(), + tensor->lod(), + tensor->meta().offset); + return std::make_unique( data, tensor->numel() * phi::SizeOf(tensor->dtype()), dense_tensor_type); diff --git a/paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.h b/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.h similarity index 76% rename from paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.h rename to paddle/fluid/pir/dialect/operator/transforms/param_to_variable.h index 4194cbae53ddf..bdb7bed12c970 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.h +++ b/paddle/fluid/pir/dialect/operator/transforms/param_to_variable.h @@ -14,21 +14,21 @@ #pragma once #include "paddle/fluid/framework/variable.h" -#include "paddle/ir/core/dialect_interface.h" -#include "paddle/ir/core/parameter.h" +#include "paddle/pir/core/dialect_interface.h" +#include "paddle/pir/core/parameter.h" namespace paddle { namespace dialect { class ParameterConvertInterface - : public ir::DialectInterface::Base { + : public pir::DialectInterface::Base { public: - explicit ParameterConvertInterface(ir::Dialect* dialect) : Base(dialect) {} + explicit ParameterConvertInterface(pir::Dialect* dialect) : Base(dialect) {} // NOTE(zhangbo): Only support new a CPU Variable. std::shared_ptr ParameterToVariable( - ir::Parameter* parameter); + pir::Parameter* parameter); - std::unique_ptr VariableToParameter( + std::unique_ptr VariableToParameter( paddle::framework::Variable* var); }; diff --git a/paddle/fluid/ir/dialect/paddle_dialect/utils/CMakeLists.txt b/paddle/fluid/pir/dialect/operator/utils/CMakeLists.txt similarity index 64% rename from paddle/fluid/ir/dialect/paddle_dialect/utils/CMakeLists.txt rename to paddle/fluid/pir/dialect/operator/utils/CMakeLists.txt index 325f13f619b51..58eafb2cc3921 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/utils/CMakeLists.txt +++ b/paddle/fluid/pir/dialect/operator/utils/CMakeLists.txt @@ -1,5 +1,5 @@ cc_library(op_yaml_info_parser SRCS op_yaml_info_parser.cc) cc_library( - pd_dialect_utils + pd_op_dialect_utils SRCS utils.cc - DEPS pd_dialect_core) + DEPS pd_op_dialect_core) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.cc b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.cc similarity index 98% rename from paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.cc rename to paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.cc index 8b5be8ff00cfd..eeb41ed3620ac 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.cc +++ b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" namespace paddle { namespace dialect { diff --git a/paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h similarity index 97% rename from paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h rename to paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h index acbc1b8e19649..9557a3d5b7763 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h +++ b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" namespace paddle { namespace dialect { diff --git a/paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h similarity index 96% rename from paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h rename to paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h index 3df6ce5e22c15..462e88f4da327 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h +++ b/paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type_storage.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/type_storage.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" namespace paddle { namespace dialect { diff --git a/paddle/fluid/ir/dialect/paddle_dialect/utils/utils.cc b/paddle/fluid/pir/dialect/operator/utils/utils.cc similarity index 64% rename from paddle/fluid/ir/dialect/paddle_dialect/utils/utils.cc rename to paddle/fluid/pir/dialect/operator/utils/utils.cc index e0ec875ca00d6..72e5f63d28673 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/utils/utils.cc +++ b/paddle/fluid/pir/dialect/operator/utils/utils.cc @@ -12,24 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" namespace paddle { namespace dialect { const std::unordered_set LegacyOpList = { - "pd.load_combine", - "pd.c_concat", - "pd.c_broadcast_", - "pd.fused_bn_add_activation_", - "pd.fused_bn_add_activation_grad", - "pd.c_sync_calc_stream_", - "pd.c_sync_comm_stream_", - "pd.send_v2", - "pd.recv_v2", - "pd.c_allreduce_sum", - "pd.c_allreduce_sum_"}; + "pd_op.load_combine", + "pd_op.c_concat", + "pd_op.c_broadcast_", + "pd_op.fused_bn_add_activation_", + "pd_op.fused_bn_add_activation_grad", + "pd_op.c_sync_calc_stream_", + "pd_op.c_sync_comm_stream_", + "pd_op.send_v2", + "pd_op.recv_v2", + "pd_op.c_allreduce_sum", + "pd_op.c_allreduce_sum_"}; enum class AttrType { UNDEFINED = 0, @@ -53,20 +53,20 @@ enum class AttrType { NUM_ATTR_TYPES, }; -static inline AttrType GetAttributeType(const ir::Attribute& attr) { - if (attr.isa()) { +static inline AttrType GetAttributeType(const pir::Attribute& attr) { + if (attr.isa()) { return AttrType::BOOL; - } else if (attr.isa()) { + } else if (attr.isa()) { return AttrType::FLOAT; - } else if (attr.isa()) { + } else if (attr.isa()) { return AttrType::DOUBLE; - } else if (attr.isa()) { + } else if (attr.isa()) { return AttrType::INT32; - } else if (attr.isa()) { + } else if (attr.isa()) { return AttrType::INT64; - } else if (attr.isa()) { + } else if (attr.isa()) { return AttrType::ARRAY; - } else if (attr.isa()) { + } else if (attr.isa()) { return AttrType::STRING; } else if (attr.isa()) { return AttrType::INT_ARRAY; @@ -81,53 +81,54 @@ static inline AttrType GetAttributeType(const ir::Attribute& attr) { } } -static std::unordered_map> +static std::unordered_map< + AttrType, + std::function> kAttrCastMap = { {AttrType::BOOL, - [](const ir::Attribute& attr) { - return VariantType{attr.dyn_cast().data()}; + [](const pir::Attribute& attr) { + return VariantType{attr.dyn_cast().data()}; }}, {AttrType::FLOAT, - [](const ir::Attribute& attr) { - return VariantType{attr.dyn_cast().data()}; + [](const pir::Attribute& attr) { + return VariantType{attr.dyn_cast().data()}; }}, {AttrType::DOUBLE, - [](const ir::Attribute& attr) { - return VariantType{attr.dyn_cast().data()}; + [](const pir::Attribute& attr) { + return VariantType{attr.dyn_cast().data()}; }}, {AttrType::INT32, - [](const ir::Attribute& attr) { - return VariantType{attr.dyn_cast().data()}; + [](const pir::Attribute& attr) { + return VariantType{attr.dyn_cast().data()}; }}, {AttrType::INT64, - [](const ir::Attribute& attr) { - return VariantType{attr.dyn_cast().data()}; + [](const pir::Attribute& attr) { + return VariantType{attr.dyn_cast().data()}; }}, {AttrType::INT_ARRAY, - [](const ir::Attribute& attr) { + [](const pir::Attribute& attr) { return VariantType{ attr.dyn_cast() .data() .GetData()}; }}, {AttrType::STRING, - [](const ir::Attribute& attr) { - return VariantType{attr.dyn_cast().AsString()}; + [](const pir::Attribute& attr) { + return VariantType{attr.dyn_cast().AsString()}; }}, {AttrType::DATA_TYPE, - [](const ir::Attribute& attr) { + [](const pir::Attribute& attr) { return VariantType{ attr.dyn_cast().data()}; }}, {AttrType::PLACE, - [](const ir::Attribute& attr) { + [](const pir::Attribute& attr) { return VariantType{ attr.dyn_cast().data()}; }}, {AttrType::ARRAY, - [](const ir::Attribute& attr) { - auto attr_vec = attr.dyn_cast().AsVector(); + [](const pir::Attribute& attr) { + auto attr_vec = attr.dyn_cast().AsVector(); if (attr_vec.size() == 0) { return VariantType{std::vector()}; } @@ -137,35 +138,35 @@ static std::unordered_map vec_bools; for (auto vec_element : attr_vec) { vec_bools.push_back( - vec_element.dyn_cast().data()); + vec_element.dyn_cast().data()); } return VariantType{vec_bools}; } else if (element_type == AttrType::INT32) { std::vector vec_int32; for (auto vec_element : attr_vec) { vec_int32.push_back( - vec_element.dyn_cast().data()); + vec_element.dyn_cast().data()); } return VariantType{vec_int32}; } else if (element_type == AttrType::INT64) { std::vector vec_int64; for (auto vec_element : attr_vec) { vec_int64.push_back( - vec_element.dyn_cast().data()); + vec_element.dyn_cast().data()); } return VariantType{vec_int64}; } else if (element_type == AttrType::FLOAT) { std::vector vec_float; for (auto vec_element : attr_vec) { vec_float.push_back( - vec_element.dyn_cast().data()); + vec_element.dyn_cast().data()); } return VariantType{vec_float}; } else if (element_type == AttrType::DOUBLE) { std::vector vec_double; for (auto vec_element : attr_vec) { vec_double.push_back( - vec_element.dyn_cast().data()); + vec_element.dyn_cast().data()); } return VariantType{vec_double}; } else { @@ -176,7 +177,7 @@ static std::unordered_map()) { +static inline phi::DataType TransToPhiDataType(pir::Type dtype) { + if (dtype.isa()) { return phi::DataType::BFLOAT16; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::FLOAT16; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::FLOAT32; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::FLOAT64; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::UINT8; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::INT8; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::INT16; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::INT32; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::INT64; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::INT32; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::BOOL; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::COMPLEX64; - } else if (dtype.isa()) { + } else if (dtype.isa()) { return phi::DataType::COMPLEX128; } else { PADDLE_THROW(phi::errors::Unimplemented( @@ -66,36 +66,36 @@ static inline phi::DataType TransToPhiDataType(ir::Type dtype) { // use phi::DataType::INT32 for IndexType from builtin type to phi::DataType, // but only use INT32 not IndexType from phi::DataType type to builtin type. -static inline ir::Type TransToIrDataType(phi::DataType dtype, - ir::IrContext* ctx = nullptr) { +static inline pir::Type TransToIrDataType(phi::DataType dtype, + pir::IrContext* ctx = nullptr) { if (ctx == nullptr) { - ctx = ir::IrContext::Instance(); + ctx = pir::IrContext::Instance(); } switch (dtype) { case phi::DataType::BFLOAT16: - return ir::BFloat16Type::get(ctx); + return pir::BFloat16Type::get(ctx); case phi::DataType::FLOAT16: - return ir::Float16Type::get(ctx); + return pir::Float16Type::get(ctx); case phi::DataType::FLOAT32: - return ir::Float32Type::get(ctx); + return pir::Float32Type::get(ctx); case phi::DataType::FLOAT64: - return ir::Float64Type::get(ctx); + return pir::Float64Type::get(ctx); case phi::DataType::UINT8: - return ir::UInt8Type::get(ctx); + return pir::UInt8Type::get(ctx); case phi::DataType::INT8: - return ir::Int8Type::get(ctx); + return pir::Int8Type::get(ctx); case phi::DataType::INT16: - return ir::Int16Type::get(ctx); + return pir::Int16Type::get(ctx); case phi::DataType::INT32: - return ir::Int32Type::get(ctx); + return pir::Int32Type::get(ctx); case phi::DataType::INT64: - return ir::Int64Type::get(ctx); + return pir::Int64Type::get(ctx); case phi::DataType::BOOL: - return ir::BoolType::get(ctx); + return pir::BoolType::get(ctx); case phi::DataType::COMPLEX64: - return ir::Complex64Type::get(ctx); + return pir::Complex64Type::get(ctx); case phi::DataType::COMPLEX128: - return ir::Complex128Type::get(ctx); + return pir::Complex128Type::get(ctx); default: PADDLE_THROW(phi::errors::Unimplemented( "Unsupported phi data type `%s` when casting it into " @@ -104,22 +104,22 @@ static inline ir::Type TransToIrDataType(phi::DataType dtype, } } -static inline ir::Attribute TransToIrAttribute(phi::Scalar scalar, - ir::IrContext* ctx = nullptr) { +static inline pir::Attribute TransToIrAttribute(phi::Scalar scalar, + pir::IrContext* ctx = nullptr) { if (ctx == nullptr) { - ctx = ir::IrContext::Instance(); + ctx = pir::IrContext::Instance(); } switch (scalar.dtype()) { case phi::DataType::FLOAT32: - return ir::FloatAttribute::get(ctx, scalar.to()); + return pir::FloatAttribute::get(ctx, scalar.to()); case phi::DataType::FLOAT64: - return ir::DoubleAttribute::get(ctx, scalar.to()); + return pir::DoubleAttribute::get(ctx, scalar.to()); case phi::DataType::INT32: - return ir::Int32Attribute::get(ctx, scalar.to()); + return pir::Int32Attribute::get(ctx, scalar.to()); case phi::DataType::INT64: - return ir::Int64Attribute::get(ctx, scalar.to()); + return pir::Int64Attribute::get(ctx, scalar.to()); case phi::DataType::BOOL: - return ir::BoolAttribute::get(ctx, scalar.to()); + return pir::BoolAttribute::get(ctx, scalar.to()); default: PADDLE_THROW(phi::errors::Unimplemented( "Unsupported phi data type `%s` when casting it into " @@ -166,7 +166,7 @@ inline DataType VarTypeToDataType( } } -VariantType GetAttributeData(const ir::Attribute& attr); +VariantType GetAttributeData(const pir::Attribute& attr); bool IsLegacyOp(const std::string& name); diff --git a/paddle/fluid/ir/phi_kernel_adaptor/CMakeLists.txt b/paddle/fluid/pir/phi_kernel_adaptor/CMakeLists.txt similarity index 56% rename from paddle/fluid/ir/phi_kernel_adaptor/CMakeLists.txt rename to paddle/fluid/pir/phi_kernel_adaptor/CMakeLists.txt index 1df1cc06db594..e1f8db179be6b 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/CMakeLists.txt +++ b/paddle/fluid/pir/phi_kernel_adaptor/CMakeLists.txt @@ -1,4 +1,4 @@ -# All source files of pd_dialect, except for the source file of op, which is generated in the compilation directory. +# All source files of pd_op_dialect, except for the source file of op, which is generated in the compilation directory. file(GLOB PHI_KERNEL_ADAPTOR_SRCS "*.cc") cc_library( diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_adaptor.h similarity index 62% rename from paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h rename to paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_adaptor.h index bb1b284ea1b6c..47c0d39856d2f 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h +++ b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_adaptor.h @@ -14,23 +14,23 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/infermeta/binary.h" #include "paddle/phi/kernels/elementwise_add_kernel.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" @@ -43,19 +43,19 @@ #include "paddle/fluid/platform/init.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" #include "glog/logging.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h" class PhiKernelAdaptor { public: explicit PhiKernelAdaptor(paddle::framework::Scope* scope) : scope_(scope) {} - void run_kernel_prog(ir::Program* program) { + void run_kernel_prog(pir::Program* program) { auto block = program->block(); - std::unordered_map value_2_var_name; + std::unordered_map value_2_var_name; std::unordered_map variable_2_var_name; std::map var_name_2_id; @@ -70,9 +70,9 @@ class PhiKernelAdaptor { &variable_2_var_name, &var_name_2_id, &variable_list); - ir::IrContext* ctx = ir::IrContext::Instance(); + pir::IrContext* ctx = pir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); auto* dev_ctx = phi::DeviceContextPool::Instance().Get(phi::CPUPlace()); phi::Place cpu_place(phi::AllocationType::CPU); @@ -80,9 +80,9 @@ class PhiKernelAdaptor { auto attr_map = (*it)->attributes(); auto op_name = - attr_map.at("op_name").dyn_cast().AsString(); + attr_map.at("op_name").dyn_cast().AsString(); - ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op_name); + pir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op_name); auto impl = op1_info.GetInterfaceImpl(); @@ -96,7 +96,7 @@ class PhiKernelAdaptor { phi::InferMetaContext ctx; paddle::dialect::OpYamlInfoParser op_yaml_info_parser(yaml_info); - ir::BuildPhiContext< + pir::BuildPhiContext< phi::InferMetaContext, phi::MetaTensor, phi::MetaTensor, @@ -108,7 +108,7 @@ class PhiKernelAdaptor { infer_meta_impl->infer_meta_(&ctx); auto kernel_name = - attr_map.at("kernel_name").dyn_cast().AsString(); + attr_map.at("kernel_name").dyn_cast().AsString(); auto kernel_key = attr_map.at("kernel_key") .dyn_cast() .data(); @@ -118,17 +118,17 @@ class PhiKernelAdaptor { phi::KernelContext kernel_ctx(dev_ctx); - ir::BuildPhiContext, - paddle::small_vector, - true>((*it), - value_2_var_name, - scope_, - nullptr, - op_yaml_info_parser, - &kernel_ctx); + pir::BuildPhiContext, + paddle::small_vector, + true>((*it), + value_2_var_name, + scope_, + nullptr, + op_yaml_info_parser, + &kernel_ctx); kernel_fn(&kernel_ctx); auto out_value = (*it)->result(0); diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc similarity index 82% rename from paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc rename to paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc index c72641046f520..475e06f936f19 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc +++ b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h" - -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h" + +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/core/meta_tensor.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" @@ -33,23 +33,23 @@ #include "paddle/fluid/framework/string_array.h" #include "paddle/fluid/framework/tensor_ref_array.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h" #include "paddle/fluid/ir_adaptor/translator/op_compat_info.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" #include "paddle/phi/core/enforce.h" #include "glog/logging.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/operator.h" -namespace ir { +namespace pir { -void AddNewData(ir::Value value, +void AddNewData(pir::Value value, std::string name, paddle::framework::Variable* var, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, @@ -71,10 +71,10 @@ void AddNewData(ir::Value value, "The size of variable_list and var_name_2_id map should be equal")); } -void RenameData(ir::Value value, +void RenameData(pir::Value value, std::string new_name, std::string orig_name, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id) { @@ -104,11 +104,11 @@ using VariableNameMap = std::unordered_map; paddle::framework::Variable* CreateVar( - ir::Value value, + pir::Value value, paddle::framework::Scope* inner_scope, const std::string& var_name_prefix, bool force_persisable, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, @@ -142,9 +142,9 @@ paddle::framework::Variable* CreateVar( } void CheckInputVars( - ir::Operation* op, + pir::Operation* op, const std::string& op_name, - const std::unordered_map& value_2_var_name) { + const std::unordered_map& value_2_var_name) { size_t input_num = op->num_operands(); if (input_num > 0) { for (size_t i = 0; i < input_num; ++i) { @@ -162,10 +162,10 @@ void CheckInputVars( } } -void BuildValue(ir::Value value, +void BuildValue(pir::Value value, paddle::framework::Scope* inner_scope, const std::string& var_name_prefix, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, @@ -190,12 +190,12 @@ void BuildValue(ir::Value value, var->GetMutable(); } else if (value.type().isa()) { var->GetMutable(); - } else if (value.type().isa()) { + } else if (value.type().isa()) { auto tensor_array = var->GetMutable(); - for (size_t i = 0; i < value.type().dyn_cast().size(); + for (size_t i = 0; i < value.type().dyn_cast().size(); i++) { PADDLE_ENFORCE(value.type() - .dyn_cast()[i] + .dyn_cast()[i] .isa(), paddle::platform::errors::Fatal( "Element of VectorType output only support " @@ -219,10 +219,10 @@ void BuildValue(ir::Value value, } void HandleForSpecialOp( - ir::Operation* op, + pir::Operation* op, paddle::framework::Scope* inner_scope, const std::string& var_name_prefix, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, @@ -230,13 +230,13 @@ void HandleForSpecialOp( std::string op_name = op->name(); if (op->attributes().count("op_name")) { op_name = - op->attributes().at("op_name").dyn_cast().AsString(); + op->attributes().at("op_name").dyn_cast().AsString(); } - if (op_name == "pd.fetch") { + if (op_name == "pd_op.fetch") { // fetch is a very special op, with no output auto fetch_src_name = - op->attributes().at("name").dyn_cast().AsString(); + op->attributes().at("name").dyn_cast().AsString(); auto fetch_var_name = fetch_src_name + "@fetch"; auto* var = const_cast(inner_scope->root()) @@ -253,13 +253,13 @@ void HandleForSpecialOp( variable_list); } - if (op_name == "pd.feed" || op_name == "pd.data") { + if (op_name == "pd_op.feed" || op_name == "pd_op.data") { VLOG(6) << "Handle for" << op_name; auto value = op->result(0); VLOG(6) << "link feed output to feed in variable" << inner_scope; std::string name = - op->attributes().at("name").dyn_cast().AsString(); + op->attributes().at("name").dyn_cast().AsString(); paddle::framework::Variable* var = inner_scope->FindVar(name); PADDLE_ENFORCE(var, paddle::platform::errors::InvalidArgument( @@ -310,7 +310,7 @@ void HandleForSpecialOp( VLOG(6) << "Handle for builtin.set_parameter:"; auto param_name = op->attributes() .at("parameter_name") - .dyn_cast() + .dyn_cast() .AsString(); auto value = op->operand_source(0); @@ -338,10 +338,10 @@ void HandleForSpecialOp( var_name_2_id); } - if (op_name == "pd.shadow_output") { - VLOG(6) << "Handle for pd.shadow_ouptut"; + if (op_name == "pd_op.shadow_output") { + VLOG(6) << "Handle for pd_op.shadow_ouptut"; auto var_name = - op->attributes().at("name").dyn_cast().AsString(); + op->attributes().at("name").dyn_cast().AsString(); auto value = op->operand_source(0); // change opreand name to param_name @@ -363,7 +363,7 @@ void HandleForSpecialOp( VLOG(6) << "Handle for builtin.get_parameter:"; auto param_name = op->attributes() .at("parameter_name") - .dyn_cast() + .dyn_cast() .AsString(); auto value = op->result(0); @@ -387,7 +387,7 @@ void HandleForSpecialOp( "input of buildin slice not in name map")); int index = - op->attributes().at("index").dyn_cast().data(); + op->attributes().at("index").dyn_cast().data(); auto in_var = inner_scope->FindVar(value_2_var_name->at(in_value)); auto variable_array = in_var->Get(); @@ -428,36 +428,36 @@ void HandleForSpecialOp( } void HandleForInplaceOp( - ir::Operation* op, + pir::Operation* op, paddle::framework::Scope* inner_scope, const std::string& var_name_prefix, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, std::vector* variable_list) { if (op->num_results() < 1) return; - ir::IrContext* ctx = ir::IrContext::Instance(); + pir::IrContext* ctx = pir::IrContext::Instance(); std::string op_name = op->name(); if (op->attributes().count("op_name")) { op_name = - op->attributes().at("op_name").dyn_cast().AsString(); + op->attributes().at("op_name").dyn_cast().AsString(); } - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); paddle::dialect::OpYamlInfoParser yaml_parser( op_info.GetInterfaceImpl() ->get_op_info_()); for (size_t i = 0; i < op->num_results(); ++i) { - ir::Value value = op->result(i); + pir::Value value = op->result(i); if (value.type().storage() == nullptr) { continue; } std::string value_name = yaml_parser.OutputNames()[i]; if (yaml_parser.HasInplace(value_name)) { const std::string& inplace_name = yaml_parser.InplaceName(value_name); - ir::Value inplace_value = + pir::Value inplace_value = op->operand_source(yaml_parser.InputName2Id().at(inplace_name)); std::string var_name = value_2_var_name->at(inplace_value); VLOG(4) << "inplace: " << value_name << " -> " << inplace_name @@ -465,7 +465,7 @@ void HandleForInplaceOp( value_2_var_name->emplace(value, var_name); } else if (yaml_parser.HasView(value_name)) { const std::string& view_name = yaml_parser.ViewName(value_name); - ir::Value view_value = + pir::Value view_value = op->operand_source(yaml_parser.InputName2Id().at(view_name)); const std::string& var_name = value_2_var_name->at(view_value); VLOG(4) << "view: " << value_name << " -> " << view_name @@ -485,10 +485,10 @@ void HandleForInplaceOp( // NOTE(zhiqiu): the persistable is created in inner_scope's root, and other is // created in inner_scope. -void BuildScope(const ir::Block& block, +void BuildScope(const pir::Block& block, paddle::framework::Scope* inner_scope, const std::string& var_name_prefix, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, @@ -503,16 +503,16 @@ void BuildScope(const ir::Block& block, if (op->attributes().count("op_name")) { op_name = op->attributes() .at("op_name") - .dyn_cast() + .dyn_cast() .AsString(); } VLOG(4) << "build op:" << op_name; - if (op_name == "pd.feed" || op_name == "pd.fetch" || + if (op_name == "pd_op.feed" || op_name == "pd_op.fetch" || op_name == "builtin.combine" || op_name == "builtin.set_parameter" || op_name == "builtin.get_parameter" || op_name == "builtin.slice" || - op_name == "builtin.split" || op_name == "pd.data" || - op_name == "pd.shadow_output") { + op_name == "builtin.split" || op_name == "pd_op.data" || + op_name == "pd_op.shadow_output") { HandleForSpecialOp(op, inner_scope, var_name_prefix, @@ -529,7 +529,7 @@ void BuildScope(const ir::Block& block, if (op->attributes().count("is_inplace") != 0 && op->attributes() .at("is_inplace") - .dyn_cast() + .dyn_cast() .data()) { HandleForInplaceOp(op, inner_scope, @@ -559,8 +559,8 @@ void BuildScope(const ir::Block& block, } void BuildRuntimeContext( - ir::Operation* op, - const std::unordered_map& name_map, + pir::Operation* op, + const std::unordered_map& name_map, paddle::framework::Scope* scope, paddle::framework::Scope* local_scope, const paddle::dialect::OpYamlInfoParser& op_yaml_info, @@ -584,7 +584,7 @@ void BuildRuntimeContext( true, phi::errors::NotFound("param [%s] MUST in name2id map", name)); auto index = op_yaml_info.InputName2Id().at(name); - ir::Value ptr = op->operand_source(index); + pir::Value ptr = op->operand_source(index); auto in_var_name = name_map.at(ptr); VLOG(6) << "ctx->EmplaceBackInput: " << name << "\t" << in_var_name; @@ -602,7 +602,7 @@ void BuildRuntimeContext( auto& output_name_list = op_yaml_info.OutputNames(); for (size_t i = 0; i < output_name_list.size(); ++i) { auto name = output_name_list[i]; - ir::Value ptr = op->result(i); + pir::Value ptr = op->result(i); auto in_var_name = name_map.at(ptr); VLOG(6) << "ctx->EmplaceBackOutput: " << name << "\t" << in_var_name; @@ -618,7 +618,7 @@ void BuildRuntimeContext( if (type.isa() || type.isa()) { runtime_ctx->outputs[legacy_arg_name] = {var}; - } else if (type.isa()) { + } else if (type.isa()) { auto var_ref = var->Get(); std::vector vec_tmp; vec_tmp.reserve(var_ref.size()); @@ -629,14 +629,14 @@ void BuildRuntimeContext( } else { PADDLE_THROW(phi::errors::Unimplemented( "only support AllocatedDenseTensor, AllocatedSelectedRowsType and " - "ir::vector type")); + "pir::vector type")); } } } std::shared_ptr BuildOperatorBase( - ir::Operation* op, - const std::unordered_map& name_map, + pir::Operation* op, + const std::unordered_map& name_map, const paddle::dialect::OpYamlInfoParser& op_yaml_info, const std::unordered_map& variable_2_var_name, @@ -658,7 +658,7 @@ std::shared_ptr BuildOperatorBase( true, phi::errors::NotFound("param [%s] MUST in name2id map", name)); auto index = op_yaml_info.InputName2Id().at(name); - ir::Value ptr = op->operand_source(index); + pir::Value ptr = op->operand_source(index); auto in_var_name = name_map.at(ptr); @@ -672,52 +672,52 @@ std::shared_ptr BuildOperatorBase( for (auto& name : attr_name_list) { auto& val = op_attr_map.at(name); - if (val.isa()) { - attr_map[name] = val.dyn_cast().AsString(); - } else if (val.isa()) { - attr_map[name] = val.dyn_cast().data(); - } else if (val.isa()) { - attr_map[name] = val.dyn_cast().data(); - } else if (val.isa()) { - attr_map[name] = val.dyn_cast().data(); - } else if (val.isa()) { - attr_map[name] = val.dyn_cast().data(); - } else if (val.isa()) { - attr_map[name] = val.dyn_cast().data(); - } else if (val.isa()) { - auto array_list = val.dyn_cast().AsVector(); + if (val.isa()) { + attr_map[name] = val.dyn_cast().AsString(); + } else if (val.isa()) { + attr_map[name] = val.dyn_cast().data(); + } else if (val.isa()) { + attr_map[name] = val.dyn_cast().data(); + } else if (val.isa()) { + attr_map[name] = val.dyn_cast().data(); + } else if (val.isa()) { + attr_map[name] = val.dyn_cast().data(); + } else if (val.isa()) { + attr_map[name] = val.dyn_cast().data(); + } else if (val.isa()) { + auto array_list = val.dyn_cast().AsVector(); PADDLE_ENFORCE( array_list.size() > 0, paddle::platform::errors::Fatal("Attribute %s is empty", name)); - if (array_list[0].isa()) { + if (array_list[0].isa()) { std::vector vec_int; for (auto attribute : array_list) { - vec_int.push_back(attribute.dyn_cast().data()); + vec_int.push_back(attribute.dyn_cast().data()); } attr_map[name] = vec_int; - } else if (array_list[0].isa()) { + } else if (array_list[0].isa()) { std::vector vec_int64; for (auto attribute : array_list) { - vec_int64.push_back(attribute.dyn_cast().data()); + vec_int64.push_back(attribute.dyn_cast().data()); } attr_map[name] = vec_int64; - } else if (array_list[0].isa()) { + } else if (array_list[0].isa()) { std::vector vec_bool; for (auto attribute : array_list) { - vec_bool.push_back(attribute.dyn_cast().data()); + vec_bool.push_back(attribute.dyn_cast().data()); } attr_map[name] = vec_bool; - } else if (array_list[0].isa()) { + } else if (array_list[0].isa()) { std::vector vec_float; for (auto attribute : array_list) { - vec_float.push_back(attribute.dyn_cast().data()); + vec_float.push_back(attribute.dyn_cast().data()); } attr_map[name] = vec_float; - } else if (array_list[0].isa()) { + } else if (array_list[0].isa()) { std::vector vec_double; for (auto attribute : array_list) { vec_double.push_back( - attribute.dyn_cast().data()); + attribute.dyn_cast().data()); } attr_map[name] = vec_double; } else { @@ -740,7 +740,7 @@ std::shared_ptr BuildOperatorBase( auto& output_name_list = op_yaml_info.OutputNames(); for (size_t i = 0; i < output_name_list.size(); ++i) { auto name = output_name_list[i]; - ir::Value ptr = op->result(i); + pir::Value ptr = op->result(i); auto out_var_name = name_map.at(ptr); @@ -749,7 +749,7 @@ std::shared_ptr BuildOperatorBase( if (type.isa() || type.isa()) { out_name_map[legacy_arg_name].push_back(out_var_name); - } else if (type.isa()) { + } else if (type.isa()) { auto var = scope->FindVar(out_var_name); auto var_ref = var->Get(); for (size_t k = 0; k < var_ref.size(); ++k) { @@ -761,7 +761,7 @@ std::shared_ptr BuildOperatorBase( } else { PADDLE_THROW(phi::errors::Unimplemented( "only support AllocatedDenseTensor, AllocatedSelectedRowsType and " - "ir::vector type")); + "pir::vector type")); } } @@ -773,4 +773,4 @@ std::shared_ptr BuildOperatorBase( return res; } -} // namespace ir +} // namespace pir diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h similarity index 74% rename from paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h rename to paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h index b1916d5418f77..037674467bc67 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h +++ b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.h @@ -14,16 +14,16 @@ #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/core/meta_tensor.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" #include "paddle/fluid/framework/new_executor/interpreter/execution_config.h" #include "paddle/fluid/framework/scope.h" @@ -33,36 +33,36 @@ #include "paddle/phi/core/kernel_context.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h" -#include "paddle/ir/core/type_name.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" #include "paddle/phi/core/infermeta_utils.h" +#include "paddle/pir/core/type_name.h" #include "glog/logging.h" -namespace ir { -void BuildScope(const ir::Block& block, +namespace pir { +void BuildScope(const pir::Block& block, paddle::framework::Scope* inner_scope, const std::string& var_name_prefix, - std::unordered_map* value_2_var_name, + std::unordered_map* value_2_var_name, std::unordered_map* variable_2_var_name, std::map* var_name_2_id, std::vector* variable_list); void BuildRuntimeContext( - ir::Operation* op, - const std::unordered_map& name_map, + pir::Operation* op, + const std::unordered_map& name_map, paddle::framework::Scope* scope, paddle::framework::Scope* local_scope, const paddle::dialect::OpYamlInfoParser& op_yaml_info, paddle::framework::RuntimeContext* runtime_ctx); std::shared_ptr BuildOperatorBase( - ir::Operation* op, - const std::unordered_map& name_map, + pir::Operation* op, + const std::unordered_map& name_map, const paddle::dialect::OpYamlInfoParser& op_yaml_info, const std::unordered_map& variable_2_var_name, @@ -74,12 +74,13 @@ template -void BuildPhiContext(ir::Operation* op, - const std::unordered_map& name_map, - paddle::framework::Scope* scope, - paddle::framework::Scope* local_scope, - const paddle::dialect::OpYamlInfoParser& op_yaml_info, - Context* ctx) { +void BuildPhiContext( + pir::Operation* op, + const std::unordered_map& name_map, + paddle::framework::Scope* scope, + paddle::framework::Scope* local_scope, + const paddle::dialect::OpYamlInfoParser& op_yaml_info, + Context* ctx) { paddle::framework::Scope* inner_scope = local_scope != nullptr ? local_scope : scope; VLOG(6) << "Build " << get_type_name() << " in scope[" << scope @@ -96,7 +97,7 @@ void BuildPhiContext(ir::Operation* op, true, phi::errors::NotFound("param [%s] MUST in name2id map", t)); auto index = op_yaml_info.InputName2Id().at(t); - ir::Value ptr = op->operand_source(index); + pir::Value ptr = op->operand_source(index); if (!ptr) { phi::DenseTensor* ptr = nullptr; OutType in_ptr(ptr); @@ -142,7 +143,7 @@ void BuildPhiContext(ir::Operation* op, for (auto& t : vec_kernel_fn_attr_params) { if (name2id.count(t)) { // tensor attribute, get information from input - ir::Value ptr = op->operand_source(name2id.at(t)); + pir::Value ptr = op->operand_source(name2id.at(t)); auto in_var_name = name_map.at(ptr); @@ -153,7 +154,7 @@ void BuildPhiContext(ir::Operation* op, phi::Attribute attr = phi::TensorRef( &(inner_scope->FindVar(in_var_name)->Get())); ctx->EmplaceBackAttr(attr); - } else if (ptr.type().isa()) { + } else if (ptr.type().isa()) { auto& tensor_array = inner_scope->FindVar(in_var_name) ->Get(); if (tensor_array.size() == 1) { @@ -193,19 +194,20 @@ void BuildPhiContext(ir::Operation* op, } else if (attr_type_name == "paddle::dialect::DataTypeAttribute") { ctx->EmplaceBackAttr( attr_map[t].dyn_cast().data()); - } else if (attr_type_name == "ir::Int32Attribute") { - ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); - } else if (attr_type_name == "ir::Int64Attribute") { - ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); - } else if (attr_type_name == "ir::FloatAttribute") { - ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); - } else if (attr_type_name == "ir::BoolAttribute") { - ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); - } else if (attr_type_name == "ir::StrAttribute") { - ctx->EmplaceBackAttr(attr_map[t].dyn_cast().AsString()); + } else if (attr_type_name == "pir::Int32Attribute") { + ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); + } else if (attr_type_name == "pir::Int64Attribute") { + ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); + } else if (attr_type_name == "pir::FloatAttribute") { + ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); + } else if (attr_type_name == "pir::BoolAttribute") { + ctx->EmplaceBackAttr(attr_map[t].dyn_cast().data()); + } else if (attr_type_name == "pir::StrAttribute") { + ctx->EmplaceBackAttr( + attr_map[t].dyn_cast().AsString()); } else if (attr_type_name == - "ir::ArrayAttribute") { - auto array_list = attr_map[t].dyn_cast().AsVector(); + "pir::ArrayAttribute") { + auto array_list = attr_map[t].dyn_cast().AsVector(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( @@ -220,29 +222,29 @@ void BuildPhiContext(ir::Operation* op, } } ctx->EmplaceBackAttr(vec_res); - } else if (attr_type_name == "ir::ArrayAttribute") { - auto array_list = attr_map[t].dyn_cast().AsVector(); + } else if (attr_type_name == "pir::ArrayAttribute") { + auto array_list = attr_map[t].dyn_cast().AsVector(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( - array_list[0].isa(), + array_list[0].isa(), true, phi::errors::Unimplemented( - "the 0th elementwise MUST be ir::Int32Attribute")); + "the 0th elementwise MUST be pir::Int32Attribute")); for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( - array_list[i].dyn_cast().data()); + array_list[i].dyn_cast().data()); } } ctx->EmplaceBackAttr(vec_res); - } else if (attr_type_name == "ir::ArrayAttribute") { - auto array_list = attr_map[t].dyn_cast().AsVector(); + } else if (attr_type_name == "pir::ArrayAttribute") { + auto array_list = attr_map[t].dyn_cast().AsVector(); std::vector vec_res; if (array_list.size() > 0) { - if (array_list[0].isa()) { + if (array_list[0].isa()) { for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( - array_list[i].dyn_cast().data()); + array_list[i].dyn_cast().data()); } } else { @@ -251,37 +253,37 @@ void BuildPhiContext(ir::Operation* op, } } ctx->EmplaceBackAttr(vec_res); - } else if (attr_type_name == "ir::ArrayAttribute") { - auto array_list = attr_map[t].dyn_cast().AsVector(); + } else if (attr_type_name == "pir::ArrayAttribute") { + auto array_list = attr_map[t].dyn_cast().AsVector(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( - array_list[0].isa(), + array_list[0].isa(), true, phi::errors::PreconditionNotMet( - "Element in array list MUST be ir::Int64Attribute ")); + "Element in array list MUST be pir::Int64Attribute ")); for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( - array_list[i].dyn_cast().data()); + array_list[i].dyn_cast().data()); } } ctx->EmplaceBackAttr(vec_res); - } else if (attr_type_name == "ir::ArrayAttribute") { - auto array_list = attr_map[t].dyn_cast().AsVector(); + } else if (attr_type_name == "pir::ArrayAttribute") { + auto array_list = attr_map[t].dyn_cast().AsVector(); std::vector vec_res; if (array_list.size() > 0) { PADDLE_ENFORCE_EQ( - array_list[0].isa(), + array_list[0].isa(), true, phi::errors::PreconditionNotMet( - "Element in array list MUST be ir::Int64Attribute ")); + "Element in array list MUST be pir::Int64Attribute ")); for (size_t i = 0; i < array_list.size(); ++i) { vec_res.push_back( - array_list[i].dyn_cast().data()); + array_list[i].dyn_cast().data()); } } ctx->EmplaceBackAttr(vec_res); @@ -300,7 +302,7 @@ void BuildPhiContext(ir::Operation* op, // TODO(phlrain): use var type instead of op name for (size_t i = 0; i < op->num_results(); ++i) { - ir::Value out_ptr = op->result(i); + pir::Value out_ptr = op->result(i); auto out_type = out_ptr.type(); if (out_type) { auto& name = name_map.at(out_ptr); @@ -320,7 +322,7 @@ void BuildPhiContext(ir::Operation* op, ctx->EmplaceBackOutput(OutType(const_cast( &(inner_scope->FindVar(name_map.at(out_ptr)) ->Get())))); - } else if (out_type.isa()) { + } else if (out_type.isa()) { OutListType outputs; auto& variable_array = inner_scope->FindVar(name_map.at(out_ptr)) ->Get(); @@ -348,4 +350,4 @@ void BuildPhiContext(ir::Operation* op, VLOG(6) << "Done build phi context"; } -} // namespace ir +} // namespace pir diff --git a/paddle/fluid/ir/transforms/CMakeLists.txt b/paddle/fluid/pir/transforms/CMakeLists.txt similarity index 71% rename from paddle/fluid/ir/transforms/CMakeLists.txt rename to paddle/fluid/pir/transforms/CMakeLists.txt index 36e06410d338a..ce2cb40f0eba4 100644 --- a/paddle/fluid/ir/transforms/CMakeLists.txt +++ b/paddle/fluid/pir/transforms/CMakeLists.txt @@ -1,12 +1,12 @@ cc_library( transform_general_functions SRCS transform_general_functions.cc - DEPS pd_dialect_core) + DEPS pd_op_dialect_core) cc_library( pd_op_to_kernel_pass SRCS pd_op_to_kernel_pass.cc - DEPS pd_kernel_dialect pd_dialect_core pd_dialect_utils) + DEPS pd_kernel_dialect pd_op_dialect_core pd_op_dialect_utils) cc_library( _constant_folding_pass @@ -16,4 +16,4 @@ cc_library( cc_library( pd_inplace_pass SRCS inplace_pass.cc - DEPS pd_dialect_core op_yaml_info_parser) + DEPS pd_op_dialect_core op_yaml_info_parser) diff --git a/paddle/fluid/ir/transforms/constant_folding_pass.cc b/paddle/fluid/pir/transforms/constant_folding_pass.cc similarity index 61% rename from paddle/fluid/ir/transforms/constant_folding_pass.cc rename to paddle/fluid/pir/transforms/constant_folding_pass.cc index 93699e3eae165..d3f78787841f0 100644 --- a/paddle/fluid/ir/transforms/constant_folding_pass.cc +++ b/paddle/fluid/pir/transforms/constant_folding_pass.cc @@ -12,71 +12,74 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/transforms/constant_folding_pass.h" +#include "paddle/fluid/pir/transforms/constant_folding_pass.h" #include #include #include // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in -// paddle/fluid/ir/dialect/CMakeLists.txt. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" +// paddle/fluid/pir/dialect/CMakeLists.txt. +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" #include "paddle/fluid/framework/new_executor/interpretercore.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" -#include "paddle/fluid/ir/transforms/transform_general_functions.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/parameter.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h" -#include "paddle/ir/pattern_rewrite/pattern_match.h" -#include "paddle/ir/pattern_rewrite/pattern_rewrite_driver.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" +#include "paddle/fluid/pir/transforms/transform_general_functions.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/parameter.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/pattern_rewrite/pattern_rewrite_driver.h" namespace { -class ConstantFoldingPattern : public ir::RewritePattern { +class ConstantFoldingPattern : public pir::RewritePattern { public: - ConstantFoldingPattern(ir::IrContext* context, - ir::PatternBenefit benefit = 1, + ConstantFoldingPattern(pir::IrContext* context, + pir::PatternBenefit benefit = 1, const std::vector& generated_names = {}) : RewritePattern(MatchAnyOpTypeTag(), benefit, context, generated_names) { } - bool Match(ir::Operation* op) const override { + bool Match(pir::Operation* op) const override { // TODO(liuyuanle): Use trait to improve robustness. - if (op->dyn_cast() || - op->dyn_cast() || + if (op->dyn_cast() || + op->dyn_cast() || op->dyn_cast()) return false; // Inputs must come from get parameter op. for (uint32_t i = 0; i < op->num_operands(); ++i) - if (ir::GetDefiningOpForInput(op, i)->dyn_cast() == + if (pir::GetDefiningOpForInput(op, i)->dyn_cast() == nullptr) return false; return true; } - void Rewrite(ir::Operation* op, - ir::PatternRewriter& rewriter) const override { // NOLINT - ir::Program* program = op->GetParentProgram(); + void Rewrite(pir::Operation* op, + pir::PatternRewriter& rewriter) const override { // NOLINT + pir::Program* program = op->GetParentProgram(); auto temp_program = BuildProgramFromOperation(op); std::vector fetch_var_names; auto block = temp_program->block(); for (auto it = block->begin(); it != block->end(); ++it) { - if ((*it)->name() == "pd.fetch") { - size_t index = - (*it)->attributes().at("col").dyn_cast().data(); + if ((*it)->name() == "pd_op.fetch") { + size_t index = (*it) + ->attributes() + .at("col") + .dyn_cast() + .data(); if (fetch_var_names.size() < index + 1) { fetch_var_names.resize(index + 1); @@ -85,7 +88,7 @@ class ConstantFoldingPattern : public ir::RewritePattern { fetch_var_names[index] = (*it) ->attributes() .at("name") - .dyn_cast() + .dyn_cast() .AsString() + "@fetch"; } @@ -104,10 +107,11 @@ class ConstantFoldingPattern : public ir::RewritePattern { // TODO(liuyuanle): Support multiple output. auto out_tensor = PADDLE_GET_CONST(phi::DenseTensor, fetch_list[0]); - std::unique_ptr parameter = std::make_unique( - reinterpret_cast(out_tensor.data()), - out_tensor.numel() * phi::SizeOf(out_tensor.dtype()), - op->result(0).type()); + std::unique_ptr parameter = + std::make_unique( + reinterpret_cast(out_tensor.data()), + out_tensor.numel() * phi::SizeOf(out_tensor.dtype()), + op->result(0).type()); std::string param_name = "@constant_folding_pass@_" + std::to_string(suffix_++); @@ -119,20 +123,20 @@ class ConstantFoldingPattern : public ir::RewritePattern { program->SetParameter(param_name, std::move(parameter)); // rewriter.SetInsertionPoint(op); auto get_parameter_op = - rewriter.Build(param_name, op->result(0).type()); + rewriter.Build(param_name, op->result(0).type()); rewriter.ReplaceAllUsesWith(op->result(0), get_parameter_op->result(0)); rewriter.EraseOp(op); } private: - std::unique_ptr BuildProgramFromOperation( - ir::Operation* op) const { - auto program = std::make_unique(ir_context()); - ir::Builder builder = ir::Builder(ir_context(), program->block()); + std::unique_ptr BuildProgramFromOperation( + pir::Operation* op) const { + auto program = std::make_unique(ir_context()); + pir::Builder builder = pir::Builder(ir_context(), program->block()); // prepare op inputs - std::vector op_inputs; + std::vector op_inputs; for (uint32_t i = 0; i < op->num_operands(); i++) { PADDLE_ENFORCE_EQ( op->operand_source(i).type().isa(), @@ -141,22 +145,22 @@ class ConstantFoldingPattern : public ir::RewritePattern { "Op's input must be a dense tensor type.")); auto [param_name, param] = - ir::GetParameterFromValue(op->operand_source(i)); + pir::GetParameterFromValue(op->operand_source(i)); program->SetParameter(param_name, - std::make_unique(*param)); + std::make_unique(*param)); auto* param_var = scope_.FindVar(param_name); PADDLE_ENFORCE_NOT_NULL( param_var, phi::errors::InvalidArgument("Parameter var not in scope.")); - auto get_parameter_op = builder.Build( + auto get_parameter_op = builder.Build( param_name, op->operand_source(i).type()); op_inputs.push_back(get_parameter_op->result(0)); } // prepare op outputs - std::vector output_types; + std::vector output_types; for (uint32_t i = 0; i < op->num_results(); i++) { output_types.push_back(op->result(i).type()); } @@ -185,39 +189,39 @@ class ConstantFoldingPattern : public ir::RewritePattern { inline static paddle::framework::interpreter::ExecutionConfig exe_config_{}; }; -class ConstantFoldingPass : public ir::Pass { +class ConstantFoldingPass : public pir::Pass { public: // TODO(liuyuanle): Naming convention for pass. - ConstantFoldingPass() : ir::Pass("ConstantFoldingPass", 1) {} + ConstantFoldingPass() : pir::Pass("ConstantFoldingPass", 1) {} - bool Initialize(ir::IrContext* context) override { - ir::RewritePatternSet ps(context); + bool Initialize(pir::IrContext* context) override { + pir::RewritePatternSet ps(context); ps.Add(context); - patterns_ = ir::FrozenRewritePatternSet(std::move(ps)); + patterns_ = pir::FrozenRewritePatternSet(std::move(ps)); return true; } - void Run(ir::Operation* op) override { - ir::GreedyRewriteConfig cfg; + void Run(pir::Operation* op) override { + pir::GreedyRewriteConfig cfg; cfg.use_top_down_traversal = true; cfg.max_iterations = 10; - ir::ApplyPatternsGreedily(op->region(0), patterns_, cfg); + pir::ApplyPatternsGreedily(op->region(0), patterns_, cfg); } - bool CanApplyOn(ir::Operation* op) const override { + bool CanApplyOn(pir::Operation* op) const override { return op->name() == "builtin.module" && op->num_regions() > 0; } private: - ir::FrozenRewritePatternSet patterns_; + pir::FrozenRewritePatternSet patterns_; }; } // namespace -namespace ir { +namespace pir { std::unique_ptr CreateConstantFoldingPass() { return std::make_unique(); } -} // namespace ir +} // namespace pir diff --git a/paddle/fluid/ir/transforms/constant_folding_pass.h b/paddle/fluid/pir/transforms/constant_folding_pass.h similarity index 90% rename from paddle/fluid/ir/transforms/constant_folding_pass.h rename to paddle/fluid/pir/transforms/constant_folding_pass.h index 0c5ca794ad5bc..b49c9d90493b1 100644 --- a/paddle/fluid/ir/transforms/constant_folding_pass.h +++ b/paddle/fluid/pir/transforms/constant_folding_pass.h @@ -15,12 +15,12 @@ #pragma once #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { class Pass; IR_API std::unique_ptr CreateConstantFoldingPass(); -} // namespace ir +} // namespace pir diff --git a/paddle/fluid/ir/transforms/inplace_pass.cc b/paddle/fluid/pir/transforms/inplace_pass.cc similarity index 70% rename from paddle/fluid/ir/transforms/inplace_pass.cc rename to paddle/fluid/pir/transforms/inplace_pass.cc index 222abc8344895..adfa5866799b9 100644 --- a/paddle/fluid/ir/transforms/inplace_pass.cc +++ b/paddle/fluid/pir/transforms/inplace_pass.cc @@ -12,25 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/transforms/inplace_pass.h" - -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_registry.h" +#include "paddle/fluid/pir/transforms/inplace_pass.h" + +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/trait/inplace.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_registry.h" namespace details { // NOTE(zhangbo): Which kind of value can be deleted? // (1) Value's type needs to be AllocatedDenseTensorType or // AllocatedSelectedRowsType; (2) Value's is not persisable. -static bool CanBeDeleted(ir::Value value) { +static bool CanBeDeleted(pir::Value value) { if (!value.type()) { return false; } @@ -41,17 +41,17 @@ static bool CanBeDeleted(ir::Value value) { if (value.GetDefiningOp()->HasAttribute(kAttrIsPersisable)) { return !(value.GetDefiningOp() ->attribute(kAttrIsPersisable) - .dyn_cast<::ir::ArrayAttribute>() - .AsVector()[value.dyn_cast<::ir::OpResult>().GetResultIndex()] - .dyn_cast<::ir::BoolAttribute>() + .dyn_cast() + .AsVector()[value.dyn_cast().GetResultIndex()] + .dyn_cast() .data()); } return true; } -static bool CanDoInplace(const std::unordered_set& eager_dels, - ir::Value input, - ir::Value output) { +static bool CanDoInplace(const std::unordered_set& eager_dels, + pir::Value input, + pir::Value output) { if (input.type() != output.type()) { VLOG(9) << " -- input's type != output's type, can't do inplace"; return false; @@ -63,16 +63,17 @@ static bool CanDoInplace(const std::unordered_set& eager_dels, return true; } -static bool IsNoNeedBuffer(ir::Operation* op, ir::Value value) { - if (op->dialect()->name().compare( - paddle::dialect::PaddleKernelDialect::name()) != 0) { +static bool IsNoNeedBuffer(pir::Operation* op, pir::Value value) { + if (op->dialect()->name().compare(paddle::dialect::KernelDialect::name()) != + 0) { VLOG(8) << op->name() << "is not a kernel_dialect op, no need buffer is false"; return false; } auto op_name = - op->attributes().at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); - ir::OpInfo op_info = ir::IrContext::Instance()->GetRegisteredOpInfo(op_name); + op->attributes().at("op_name").dyn_cast().AsString(); + pir::OpInfo op_info = + pir::IrContext::Instance()->GetRegisteredOpInfo(op_name); if (op_info) { auto info_interface = op_info.GetInterfaceImpl(); @@ -90,27 +91,26 @@ static bool IsNoNeedBuffer(ir::Operation* op, ir::Value value) { return false; } -// NOTE(zhangbo): pd.feed's output and pd.fetch's input can not be eager +// NOTE(zhangbo): pd_op.feed's output and pd_op.fetch's input can not be eager // deleted. -static std::unordered_set GetSkipDeletionValues(ir::Block* block) { - std::unordered_set skip_dels; +static std::unordered_set GetSkipDeletionValues(pir::Block* block) { + std::unordered_set skip_dels; for (auto& op : *block) { - if (op->dialect()->name().compare( - paddle::dialect::PaddleKernelDialect::name()) != 0) { + if (op->dialect()->name().compare(paddle::dialect::KernelDialect::name()) != + 0) { continue; } IR_ENFORCE(op->attributes().count("op_name") > 0, "kernel_dialect op should own an 'op_name' attribute."); - auto upper_op_name = op->attributes() - .at("op_name") - .dyn_cast<::ir::StrAttribute>() - .AsString(); + auto upper_op_name = + op->attributes().at("op_name").dyn_cast().AsString(); - if (upper_op_name == "pd.feed" || upper_op_name == "pd.data") { + if (upper_op_name == "pd_op.feed" || upper_op_name == "pd_op.data") { skip_dels.insert(op->result(0)); continue; } - if (upper_op_name == "pd.fetch" || upper_op_name == "pd.shadow_output") { + if (upper_op_name == "pd_op.fetch" || + upper_op_name == "pd_op.shadow_output") { skip_dels.insert(op->operand_source(0)); continue; } @@ -121,20 +121,20 @@ static std::unordered_set GetSkipDeletionValues(ir::Block* block) { // NOTE(zhangbo): For inplace Pass, currently only the kernel_dialect operator // is supported. Therefore, this function only returns the values in the // kernel_dialect operator that can be eager deleted. -static std::unordered_map> -GetEagerDeletionValues(ir::Block* block) { - std::unordered_set skip_dels = GetSkipDeletionValues(block); +static std::unordered_map> +GetEagerDeletionValues(pir::Block* block) { + std::unordered_set skip_dels = GetSkipDeletionValues(block); - std::unordered_map del_value_2_op; + std::unordered_map del_value_2_op; for (auto& op : *block) { std::string upper_op_name = op->name(); - if (op->dialect()->name().compare( - paddle::dialect::PaddleKernelDialect::name()) == 0) { + if (op->dialect()->name().compare(paddle::dialect::KernelDialect::name()) == + 0) { IR_ENFORCE(op->attributes().count("op_name") > 0, "kernel_dialect op should own an 'op_name' attribute."); upper_op_name = op->attributes() .at("op_name") - .dyn_cast<::ir::StrAttribute>() + .dyn_cast() .AsString(); } @@ -154,14 +154,15 @@ GetEagerDeletionValues(ir::Block* block) { } for (size_t i = 0; i < op->num_results(); ++i) { - ir::Value output = op->result(i); + pir::Value output = op->result(i); if (output && CanBeDeleted(output)) { del_value_2_op[output] = op; } } } - std::unordered_map> eager_dels; + std::unordered_map> + eager_dels; for (auto& kv : del_value_2_op) { eager_dels[kv.second].insert(kv.first); } @@ -169,23 +170,23 @@ GetEagerDeletionValues(ir::Block* block) { return eager_dels; } -static std::unordered_map GetInplaceOps( - ir::Block* block) { +static std::unordered_map GetInplaceOps( + pir::Block* block) { const auto eager_dels = GetEagerDeletionValues(block); - std::unordered_map inplace_ops; + std::unordered_map inplace_ops; - std::unordered_set visited_values; - std::unordered_set reused_input_values; - std::unordered_set reused_output_values; + std::unordered_set visited_values; + std::unordered_set reused_input_values; + std::unordered_set reused_output_values; for (auto& op : *block) { for (size_t i = 0; i < op->num_operands(); ++i) { visited_values.insert(op->operand_source(i)); } - if (op->dialect()->name().compare( - paddle::dialect::PaddleKernelDialect::name()) != 0) { + if (op->dialect()->name().compare(paddle::dialect::KernelDialect::name()) != + 0) { VLOG(6) << op->name() << "is not a kernel_dialect op, inplace only support " "kernel_dialect operators"; @@ -197,13 +198,13 @@ static std::unordered_map GetInplaceOps( auto upper_op_attrs = op->attributes(); auto upper_op_name = - upper_op_attrs.at("op_name").dyn_cast<::ir::StrAttribute>().AsString(); + upper_op_attrs.at("op_name").dyn_cast().AsString(); VLOG(6) << "analyse op: " << upper_op_name; // NOTE(zhangbo): add_grad cpu kernel can't do inplace, for the reason shown // in the function: CommonElementwiseBroadcastBackward // (paddle/phi/kernels/funcs/elementwise_grad_base.h) - if ((upper_op_name == "pd.add_grad") && + if ((upper_op_name == "pd_op.add_grad") && (upper_op_attrs.at("kernel_key") .dyn_cast() .data() @@ -215,7 +216,7 @@ static std::unordered_map GetInplaceOps( } if (upper_op_attrs.count("is_inplace") != 0 && - upper_op_attrs.at("is_inplace").dyn_cast().data()) { + upper_op_attrs.at("is_inplace").dyn_cast().data()) { VLOG(6) << upper_op_name << " is already an inplace op."; for (size_t i = 0; i < op->num_operands(); ++i) { reused_input_values.insert(op->operand_source(i)); @@ -227,8 +228,8 @@ static std::unordered_map GetInplaceOps( continue; } - ir::OpInfo upper_inplace_op_info = - ir::IrContext::Instance()->GetRegisteredOpInfo(upper_op_name + "_"); + pir::OpInfo upper_inplace_op_info = + pir::IrContext::Instance()->GetRegisteredOpInfo(upper_op_name + "_"); if (eager_dels.count(op) == 0 || (!upper_inplace_op_info)) { VLOG(6) << upper_op_name @@ -300,12 +301,12 @@ static std::unordered_map GetInplaceOps( } } // namespace details -class InplacePass : public ir::Pass { +class InplacePass : public pir::Pass { public: - InplacePass() : ir::Pass("InplacePass", 3) {} + InplacePass() : pir::Pass("InplacePass", 3) {} - void Run(ir::Operation* op) override { - auto module_op = op->dyn_cast(); + void Run(pir::Operation* op) override { + auto module_op = op->dyn_cast(); IR_ENFORCE(module_op, "InplacePass should run on module op."); auto* block = module_op.block(); @@ -315,9 +316,9 @@ class InplacePass : public ir::Pass { VLOG(6) << "Do inplace for: " << kv.first->attributes() .at("op_name") - .dyn_cast<::ir::StrAttribute>() + .dyn_cast() .AsString(); - ir::Block::iterator insert_pos = + pir::Block::iterator insert_pos = std::find(block->begin(), block->end(), kv.first); IR_ENFORCE(insert_pos != block->end(), "Operator %s not found in block.", @@ -325,26 +326,26 @@ class InplacePass : public ir::Pass { kv.first->set_attribute( "op_name", - ir::StrAttribute::get(ir::IrContext::Instance(), kv.second)); + pir::StrAttribute::get(pir::IrContext::Instance(), kv.second)); kv.first->set_attribute( "is_inplace", - ir::BoolAttribute::get(ir::IrContext::Instance(), true)); + pir::BoolAttribute::get(pir::IrContext::Instance(), true)); } LOG_FIRST_N(INFO, 1) << "Apply inplace pass on lowering ::ir::Program to Kernel Dialect."; } - bool CanApplyOn(ir::Operation* op) const override { + bool CanApplyOn(pir::Operation* op) const override { return op->name() == "builtin.module" && op->num_regions() > 0; } }; -namespace ir { +namespace pir { -std::unique_ptr CreateInplacePass() { +std::unique_ptr CreateInplacePass() { return std::make_unique(); } -} // namespace ir +} // namespace pir REGISTER_IR_PASS(inplace, InplacePass); diff --git a/paddle/fluid/ir/transforms/inplace_pass.h b/paddle/fluid/pir/transforms/inplace_pass.h similarity index 90% rename from paddle/fluid/ir/transforms/inplace_pass.h rename to paddle/fluid/pir/transforms/inplace_pass.h index 028d6a9eb94e8..c6d540243edc9 100644 --- a/paddle/fluid/ir/transforms/inplace_pass.h +++ b/paddle/fluid/pir/transforms/inplace_pass.h @@ -15,12 +15,12 @@ #pragma once #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { class Pass; std::unique_ptr CreateInplacePass(); -} // namespace ir +} // namespace pir diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc similarity index 83% rename from paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc rename to paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc index 3555ebe354ab7..29b5c07f1dab9 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc @@ -14,19 +14,19 @@ #include -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" - -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_type.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" + +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_attribute.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_op.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_type.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/trait/inplace.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/fluid/platform/place.h" #include "paddle/phi/api/lib/data_transform.h" #include "paddle/phi/api/lib/kernel_dispatch.h" @@ -51,22 +51,22 @@ std::unordered_map Str2PhiDataType = { }; const std::unordered_set UnchangeOutputOps = { - "pd.data", + "pd_op.data", "builtin.combine", "builtin.slice", "builtin.split", - "pd.feed", - "pd.fetch", + "pd_op.feed", + "pd_op.fetch", "builtin.set_parameter", "builtin.get_parameter", - "pd.shadow_output"}; + "pd_op.shadow_output"}; const std::unordered_set SpecialOpList = { "builtin.combine", "builtin.slice", "builtin.split"}; -ir::OpResult GetNewInput( - const ir::Value cur_in, - const std::unordered_map& map_value_pair, +pir::OpResult GetNewInput( + const pir::Value cur_in, + const std::unordered_map& map_value_pair, const int index, const std::string op_name) { PADDLE_ENFORCE_EQ( @@ -79,16 +79,16 @@ ir::OpResult GetNewInput( } void DealWithSpecialBuiltinOps( - ir::Operation* op_item, - ir::Program* program, - std::unordered_map* map_op_pair, - std::unordered_map* map_value_pair, - ir::IrContext* ctx) { + pir::Operation* op_item, + pir::Program* program, + std::unordered_map* map_op_pair, + std::unordered_map* map_value_pair, + pir::IrContext* ctx) { if (op_item->name() == "builtin.combine") { std::vector out_places; // Copy op inputs - std::vector vec_inputs; - std::vector vec_inner_types; + std::vector vec_inputs; + std::vector vec_inner_types; if (op_item->num_operands() > 0) { for (size_t i = 0; i < op_item->num_operands(); ++i) { auto cur_in = op_item->operand_source(i); @@ -117,14 +117,14 @@ void DealWithSpecialBuiltinOps( } } // Copy op output type - std::vector op_output_types; - ir::Type t1 = ir::VectorType::get(ctx, vec_inner_types); + std::vector op_output_types; + pir::Type t1 = pir::VectorType::get(ctx, vec_inner_types); op_output_types.push_back(t1); // Get op info - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_item->name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_item->name()); // Generate new op - ir::Operation* op = ir::Operation::Create( + pir::Operation* op = pir::Operation::Create( vec_inputs, op_item->attributes(), op_output_types, op_info); program->block()->push_back(op); (*map_op_pair)[op_item] = op; @@ -137,8 +137,8 @@ void DealWithSpecialBuiltinOps( } if (op_item->name() == "builtin.slice") { - std::vector vec_inputs; - std::vector op_output_types; + std::vector vec_inputs; + std::vector op_output_types; if (op_item->num_operands() > 0) { for (size_t i = 0; i < op_item->num_operands(); ++i) { auto cur_in = op_item->operand_source(i); @@ -148,11 +148,11 @@ void DealWithSpecialBuiltinOps( } auto new_in = GetNewInput(cur_in, *map_value_pair, i, op_item->name()); vec_inputs.push_back(new_in); - if (new_in.type().isa()) { - auto vec_types = new_in.type().dyn_cast().data(); + if (new_in.type().isa()) { + auto vec_types = new_in.type().dyn_cast().data(); auto index = op_item->attributes() .at("index") - .dyn_cast() + .dyn_cast() .data(); op_output_types.push_back(vec_types[index]); } else { @@ -163,9 +163,9 @@ void DealWithSpecialBuiltinOps( } // Get op info - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_item->name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_item->name()); // Generate new op - ir::Operation* op = ir::Operation::Create( + pir::Operation* op = pir::Operation::Create( vec_inputs, op_item->attributes(), op_output_types, op_info); program->block()->push_back(op); (*map_op_pair)[op_item] = op; @@ -180,8 +180,8 @@ void DealWithSpecialBuiltinOps( if (op_item->name() == "builtin.split") { std::vector out_places(op_item->num_results()); // Copy op inputs - std::vector vec_inputs; - std::vector op_output_types; + std::vector vec_inputs; + std::vector op_output_types; if (op_item->num_operands() > 0) { for (size_t i = 0; i < op_item->num_operands(); ++i) { auto cur_in = op_item->operand_source(i); @@ -192,8 +192,8 @@ void DealWithSpecialBuiltinOps( auto new_in = GetNewInput(cur_in, *map_value_pair, i, op_item->name()); vec_inputs.push_back(new_in); - if (new_in.type().isa()) { - auto vec_types = new_in.type().dyn_cast().data(); + if (new_in.type().isa()) { + auto vec_types = new_in.type().dyn_cast().data(); for (uint64_t idx = 0; idx < vec_types.size(); idx++) { op_output_types.push_back(vec_types[idx]); } @@ -205,9 +205,9 @@ void DealWithSpecialBuiltinOps( } // Get op info - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_item->name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_item->name()); // Generate new op - ir::Operation* op = ir::Operation::Create( + pir::Operation* op = pir::Operation::Create( vec_inputs, op_item->attributes(), op_output_types, op_info); program->block()->push_back(op); (*map_op_pair)[op_item] = op; @@ -221,7 +221,7 @@ void DealWithSpecialBuiltinOps( VLOG(6) << "Deep copy a new builtin op: " << op_item->name(); } -bool NeedFallBackCpu(const ir::Operation* op, +bool NeedFallBackCpu(const pir::Operation* op, const std::string& kernel_fn_name, const phi::KernelKey& kernel_key) { if (UnchangeOutputOps.count(op->name())) { @@ -275,14 +275,16 @@ phi::Backend GetDstBackend(const std::string& op_name, return dst_backend; } -bool NeedFallBackFromGPUDNN2GPU(ir::Operation* op, +bool NeedFallBackFromGPUDNN2GPU(pir::Operation* op, const phi::KernelKey kernel_key) { // NOTE(phlrain): keep the same kernel select strategy with // GetExepectKernelKey - if (op->name() == "pd.pool2d" || op->name() == "pd.pool2d_grad") { + if (op->name() == "pd_op.pool2d" || op->name() == "pd_op.pool2d_grad") { if (kernel_key.backend() == phi::Backend::GPUDNN && - (op->attributes().at("adaptive").dyn_cast().data() == - true)) { + (op->attributes() + .at("adaptive") + .dyn_cast() + .data() == true)) { return true; } } @@ -290,26 +292,26 @@ bool NeedFallBackFromGPUDNN2GPU(ir::Operation* op, return false; } -std::set GetSkipFeedNames(ir::Block* block) { +std::set GetSkipFeedNames(pir::Block* block) { std::set data_op_names; for (auto op_item : *block) { - if (op_item->name() == "pd.data") { + if (op_item->name() == "pd_op.data") { data_op_names.insert(op_item->attributes() .at("name") - .dyn_cast() + .dyn_cast() .AsString()); } } return data_op_names; } -bool SkipFeedOp(ir::Operation* op, const std::set& feed_names) { +bool SkipFeedOp(pir::Operation* op, const std::set& feed_names) { return feed_names.count( - op->attributes().at("name").dyn_cast().AsString()); + op->attributes().at("name").dyn_cast().AsString()); } std::vector> GetFakeTensorList( - ir::Value new_input_tmp) { + pir::Value new_input_tmp) { std::vector> vec_res; auto input_type = new_input_tmp.type(); @@ -356,8 +358,8 @@ std::vector> GetFakeTensorList( } else if (input_type.isa()) { vec_res.push_back(build_fake_selected_rows( input_type.dyn_cast())); - } else if (input_type.isa()) { - auto vec_inner_types = input_type.dyn_cast().data(); + } else if (input_type.isa()) { + auto vec_inner_types = input_type.dyn_cast().data(); for (size_t i = 0; i < vec_inner_types.size(); ++i) { if (vec_inner_types[i].isa()) { vec_res.push_back(build_fake_dense_tensor( @@ -372,29 +374,29 @@ std::vector> GetFakeTensorList( return vec_res; } -ir::OpResult AddPlaceTransferOp(ir::OpResult in, - ir::Type out_type, - const phi::Place& src_place, - const phi::Place& dst_place, - const phi::KernelKey& kernel_key, - ir::Program* program) { - ir::IrContext* ctx = ir::IrContext::Instance(); +pir::OpResult AddPlaceTransferOp(pir::OpResult in, + pir::Type out_type, + const phi::Place& src_place, + const phi::Place& dst_place, + const phi::KernelKey& kernel_key, + pir::Program* program) { + pir::IrContext* ctx = pir::IrContext::Instance(); std::string op_name = paddle::dialect::PhiKernelOp::name(); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); if ((src_place.GetType() == phi::AllocationType::CPU) && (dst_place.GetType() == phi::AllocationType::GPU)) { auto copy_kernel_key = kernel_key; copy_kernel_key.set_backend(phi::Backend::GPU); - std::unordered_map op_attribute{ - {"op_name", ir::StrAttribute::get(ctx, "pd.memcpy_h2d")}, - {"kernel_name", ir::StrAttribute::get(ctx, "memcpy_h2d")}, + std::unordered_map op_attribute{ + {"op_name", pir::StrAttribute::get(ctx, "pd_op.memcpy_h2d")}, + {"kernel_name", pir::StrAttribute::get(ctx, "memcpy_h2d")}, {"kernel_key", dialect::KernelAttribute::get(ctx, copy_kernel_key)}, - {"dst_place_type", ir::Int32Attribute::get(ctx, 1)}}; + {"dst_place_type", pir::Int32Attribute::get(ctx, 1)}}; - ir::Operation* op = - ir::Operation::Create({in}, op_attribute, {out_type}, op_info); + pir::Operation* op = + pir::Operation::Create({in}, op_attribute, {out_type}, op_info); if (in.GetDefiningOp()->HasAttribute(kAttrIsPersisable)) { op->set_attribute(kAttrIsPersisable, @@ -409,14 +411,14 @@ ir::OpResult AddPlaceTransferOp(ir::OpResult in, (dst_place.GetType() == phi::AllocationType::CPU)) { auto copy_kernel_key = kernel_key; copy_kernel_key.set_backend(phi::Backend::GPU); - std::unordered_map op_attribute{ - {"op_name", ir::StrAttribute::get(ctx, "pd.memcpy_d2h")}, - {"kernel_name", ir::StrAttribute::get(ctx, "memcpy_d2h")}, + std::unordered_map op_attribute{ + {"op_name", pir::StrAttribute::get(ctx, "pd_op.memcpy_d2h")}, + {"kernel_name", pir::StrAttribute::get(ctx, "memcpy_d2h")}, {"kernel_key", dialect::KernelAttribute::get(ctx, copy_kernel_key)}, - {"dst_place_type", ir::Int32Attribute::get(ctx, 0)}}; + {"dst_place_type", pir::Int32Attribute::get(ctx, 0)}}; - ir::Operation* op = - ir::Operation::Create({in}, op_attribute, {out_type}, op_info); + pir::Operation* op = + pir::Operation::Create({in}, op_attribute, {out_type}, op_info); program->block()->push_back(op); @@ -428,10 +430,10 @@ ir::OpResult AddPlaceTransferOp(ir::OpResult in, } } -ir::Type BuildOutputType(ir::Type type, - const phi::Place& place, - phi::DataType data_type, - ir::IrContext* ctx) { +pir::Type BuildOutputType(pir::Type type, + const phi::Place& place, + phi::DataType data_type, + pir::IrContext* ctx) { if (type.isa()) { auto dense_tensor_type = type.dyn_cast(); auto out_dtype = dense_tensor_type.dtype(); @@ -473,8 +475,8 @@ ir::Type BuildOutputType(ir::Type type, } phi::DataType GetKernelDataTypeByYamlInfo( - const ir::Operation* op, - const std::unordered_map& map_value_pair, + const pir::Operation* op, + const std::unordered_map& map_value_pair, const dialect::OpYamlInfoParser* op_info_parser) { auto& attr_map = op->attributes(); auto& data_type_info = op_info_parser->OpRuntimeInfo().kernel_key_dtype; @@ -495,8 +497,8 @@ phi::DataType GetKernelDataTypeByYamlInfo( if (type.isa()) { kernel_data_type = TransToPhiDataType( type.dyn_cast().dtype()); - } else if (type.isa()) { - auto vec_data = type.dyn_cast().data(); + } else if (type.isa()) { + auto vec_data = type.dyn_cast().data(); if (vec_data.empty()) { kernel_data_type = phi::DataType::UNDEFINED; } else { @@ -547,8 +549,8 @@ phi::DataType GetKernelDataTypeByYamlInfo( } phi::Backend GetKernelBackendByYamlInfo( - const ir::Operation* op, - const std::unordered_map& map_value_pair, + const pir::Operation* op, + const std::unordered_map& map_value_pair, const dialect::OpYamlInfoParser* op_info_parser) { auto& attr_map = op->attributes(); auto& backend_info = op_info_parser->OpRuntimeInfo().kernel_key_backend; @@ -565,8 +567,8 @@ phi::Backend GetKernelBackendByYamlInfo( if (type.isa()) { kernel_backend = paddle::experimental::ParseBackend( type.dyn_cast().place()); - } else if (type.isa()) { - auto vec_data = type.dyn_cast().data(); + } else if (type.isa()) { + auto vec_data = type.dyn_cast().data(); if (vec_data.empty()) { kernel_backend = phi::Backend::UNDEFINED; } else { @@ -617,11 +619,11 @@ phi::Backend GetKernelBackendByYamlInfo( } phi::KernelKey GetKernelKey( - ir::Operation* op, + pir::Operation* op, const phi::Place& place, - const std::unordered_map& map_value_pair, + const std::unordered_map& map_value_pair, dialect::OpYamlInfoParser* op_info_parser = nullptr) { - if (op->name() == "pd.feed") { + if (op->name() == "pd_op.feed") { // NOTE, for now feed op don't need a kernel, so the data type from Op // Result the next op use base program datatype return {phi::Backend::CPU, @@ -630,7 +632,7 @@ phi::KernelKey GetKernelKey( op->result(0).type().dyn_cast().dtype())}; } - if (op->name() == "pd.data") { + if (op->name() == "pd_op.data") { // NOTE, for now feed op don't need a kernel, so the data type from Op // Result the next op use base program datatype auto data_place = @@ -659,14 +661,14 @@ phi::KernelKey GetKernelKey( GetKernelBackendByYamlInfo(op, map_value_pair, op_info_parser); // parse all the input tensor - if (tensor_input_number == 0 || op->name() == "pd.full_") { + if (tensor_input_number == 0 || op->name() == "pd_op.full_") { // all the information have to get from attribute and context - if (op->name() == "pd.uniform") { + if (op->name() == "pd_op.uniform") { // try to process uniform, use shape to determin backend // TODO(phlrain): shuold support other initilize op auto define_op = op->operand_source(0).GetDefiningOp(); - if (define_op->name() == "pd.full_int_array") { + if (define_op->name() == "pd_op.full_int_array") { auto shape = define_op->attributes() .at("value") .dyn_cast() @@ -714,7 +716,7 @@ phi::KernelKey GetKernelKey( // don't know how to select the kernel in the next of op that // uses data op outout as inputs. So, we need set kernel backend // manually. - if (op->operand_source(i).GetDefiningOp()->name() == "pd.data") { + if (op->operand_source(i).GetDefiningOp()->name() == "pd_op.data") { auto data_op = op->operand_source(i).GetDefiningOp(); auto data_place = data_op->attributes() .at("place") @@ -733,7 +735,7 @@ phi::KernelKey GetKernelKey( auto combine_op = op->operand_source(i).GetDefiningOp(); for (size_t j = 0; j < combine_op->num_operands(); ++j) { if (combine_op->operand_source(j).GetDefiningOp()->name() == - "pd.data") { + "pd_op.data") { auto data_op = combine_op->operand_source(j).GetDefiningOp(); auto data_place = data_op->attributes() .at("place") @@ -777,36 +779,36 @@ phi::KernelKey GetKernelKey( return res; } -std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, - phi::Place place) { +std::unique_ptr PdOpLowerToKernelPass(pir::Program* prog, + phi::Place place) { if (VLOG_IS_ON(2)) { std::stringstream ss; prog->Print(ss); VLOG(2) << "Program after lowering to kernel pass : " << ss.str(); } - auto program = std::make_unique(ir::IrContext::Instance()); + auto program = std::make_unique(pir::IrContext::Instance()); auto block = prog->block(); - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); - std::unordered_map map_op_pair; - std::unordered_map map_value_pair; + std::unordered_map map_op_pair; + std::unordered_map map_value_pair; std::string phi_kernel_op_name = paddle::dialect::PhiKernelOp::name(); - ir::OpInfo phi_kernel_op_info = ctx->GetRegisteredOpInfo(phi_kernel_op_name); + pir::OpInfo phi_kernel_op_info = ctx->GetRegisteredOpInfo(phi_kernel_op_name); std::string legacy_kernel_op_name = paddle::dialect::LegacyKernelOp::name(); - ir::OpInfo legacy_kernel_op_info = + pir::OpInfo legacy_kernel_op_info = ctx->GetRegisteredOpInfo(legacy_kernel_op_name); auto skip_feed_names = GetSkipFeedNames(block); for (auto op_item : *block) { VLOG(6) << "op name " << op_item->name(); - if ((op_item->name() == "pd.feed") && + if ((op_item->name() == "pd_op.feed") && SkipFeedOp(op_item, skip_feed_names)) { continue; } @@ -817,7 +819,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, continue; } - // Lower from PaddleDialect to KernelDialect + // Lower from OperatorDialect to KernelDialect paddle::dialect::OpYamlInfoInterface op_info_interface = op_item->dyn_cast(); @@ -832,8 +834,8 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, kernel_fn_str = op_info_parser->OpRuntimeInfo().kernel_func[0]; } - if (op_item->name() == "pd.add_n_" || - op_item->name() == "pd.add_n_with_kernel") { + if (op_item->name() == "pd_op.add_n_" || + op_item->name() == "pd_op.add_n_with_kernel") { if (op_item->result(0).type().isa()) { kernel_fn_str = "add_n_sr"; } @@ -843,7 +845,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, GetKernelKey(op_item, place, map_value_pair, op_info_parser.get()); VLOG(6) << "kernel type " << kernel_key; - if (op_item->name() == "pd.load_combine") { + if (op_item->name() == "pd_op.load_combine") { kernel_key.set_dtype(phi::DataType::FLOAT32); } if (NeedFallBackCpu((op_item), kernel_fn_str, kernel_key)) { @@ -857,7 +859,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, // only for single output // need update new kernel key layout and data tyep - std::vector op_output_types; + std::vector op_output_types; if (op_item->num_results() > 0) { auto phi_kernel = phi::KernelFactory::Instance().SelectKernelWithGPUDNN( kernel_fn_str, kernel_key); @@ -890,9 +892,9 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, result_type.isa()) { op_output_types.push_back( BuildOutputType(result_type, out_place, out_phi_dtype, ctx)); - } else if (result_type.isa()) { - std::vector vec_inner_types; - auto base_types = result_type.dyn_cast().data(); + } else if (result_type.isa()) { + std::vector vec_inner_types; + auto base_types = result_type.dyn_cast().data(); for (auto& base_type : base_types) { if (base_type) { if (base_type.isa()) { @@ -904,7 +906,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } } else { // NOTE(phlrain), kernel not support a nullptr in output - ir::Type fp32_dtype = ir::Float32Type::get(ctx); + pir::Type fp32_dtype = pir::Float32Type::get(ctx); phi::DDim dims = {}; phi::DataLayout data_layout = phi::DataLayout::NCHW; phi::LoD lod = {{}}; @@ -918,7 +920,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } } - ir::Type t1 = ir::VectorType::get(ctx, vec_inner_types); + pir::Type t1 = pir::VectorType::get(ctx, vec_inner_types); op_output_types.push_back(t1); } else { PADDLE_THROW(phi::errors::Unimplemented( @@ -929,7 +931,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } // constuct input - std::vector vec_inputs; + std::vector vec_inputs; if (op_item->num_operands() > 0) { for (size_t i = 0; i < op_item->num_operands(); ++i) { auto cur_in = op_item->operand_source(i); @@ -997,14 +999,14 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, kernel_key, program.get()); } - } else if (new_in_type.isa()) { + } else if (new_in_type.isa()) { // [ todo need update here, support combine data transfomer] // deal with pre combine op auto pre_define_op = cur_in.GetDefiningOp(); if (pre_define_op->name() == "builtin.combine") { - std::vector inner_inputs; - std::vector types_in_vec; + std::vector inner_inputs; + std::vector types_in_vec; bool is_trans = false; for (size_t j = 0; j < pre_define_op->num_operands(); ++j) { auto in_i = map_value_pair.at(pre_define_op->operand_source(j)); @@ -1043,7 +1045,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, auto out_place = phi::TransToPhiPlace(kernel.InputAt(i).backend); - ir::Type out_type; + pir::Type out_type; if (in_i_type.isa()) { out_type = dialect::AllocatedDenseTensorType::get( ctx, @@ -1081,12 +1083,12 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } if (is_trans) { // Add combine op - std::string combine_op_name(ir::CombineOp::name()); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(combine_op_name); + std::string combine_op_name(pir::CombineOp::name()); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(combine_op_name); - ir::Type target_vec_type = - ir::VectorType::get(ctx, types_in_vec); - ir::Operation* operation = ir::Operation::Create( + pir::Type target_vec_type = + pir::VectorType::get(ctx, types_in_vec); + pir::Operation* operation = pir::Operation::Create( inner_inputs, {}, {target_vec_type}, op_info); new_in = operation->result(0); @@ -1105,9 +1107,9 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } } - std::unordered_map op_attribute{ - {"op_name", ir::StrAttribute::get(ctx, op_item->name())}, - {"kernel_name", ir::StrAttribute::get(ctx, kernel_fn_str)}, + std::unordered_map op_attribute{ + {"op_name", pir::StrAttribute::get(ctx, op_item->name())}, + {"kernel_name", pir::StrAttribute::get(ctx, kernel_fn_str)}, {"kernel_key", dialect::KernelAttribute::get(ctx, kernel_key)}}; auto op_attr_map = op_item->attributes(); @@ -1116,15 +1118,15 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } if (op_item->HasTrait()) { - op_attribute.emplace("is_inplace", ir::BoolAttribute::get(ctx, true)); + op_attribute.emplace("is_inplace", pir::BoolAttribute::get(ctx, true)); } - ir::Operation* op; + pir::Operation* op; if (dialect::IsLegacyOp(op_item->name())) { - op = ir::Operation::Create( + op = pir::Operation::Create( vec_inputs, op_attribute, op_output_types, legacy_kernel_op_info); } else { - op = ir::Operation::Create( + op = pir::Operation::Create( vec_inputs, op_attribute, op_output_types, phi_kernel_op_info); } @@ -1139,8 +1141,8 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, program->block()->push_back(op); bool feed_op_add_shadow_feed = - (op_item->name() == "pd.feed") && platform::is_gpu_place(place); - bool data_op_add_shadow_feed = (op_item->name() == "pd.data") && + (op_item->name() == "pd_op.feed") && platform::is_gpu_place(place); + bool data_op_add_shadow_feed = (op_item->name() == "pd_op.data") && platform::is_gpu_place(place) && (op->attributes() .at("place") @@ -1155,9 +1157,9 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, phi::DataLayout::ANY, TransToPhiDataType( op_item->result(0).type().dyn_cast().dtype())}; - std::unordered_map attr_map{ - {"op_name", ir::StrAttribute::get(ctx, "pd.shadow_feed")}, - {"kernel_name", ir::StrAttribute::get(ctx, "shadow_feed")}, + std::unordered_map attr_map{ + {"op_name", pir::StrAttribute::get(ctx, "pd_op.shadow_feed")}, + {"kernel_name", pir::StrAttribute::get(ctx, "shadow_feed")}, {"kernel_key", dialect::KernelAttribute::get(ctx, shadow_key)}}; auto out_type = paddle::dialect::AllocatedDenseTensorType::get( @@ -1165,7 +1167,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, phi::TransToPhiPlace(shadow_key.backend()), op_item->result(0).type().dyn_cast()); - ir::Operation* shadow_op = ir::Operation::Create( + pir::Operation* shadow_op = pir::Operation::Create( {op->result(0)}, attr_map, {out_type}, phi_kernel_op_info); map_op_pair[op_item] = shadow_op; diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h similarity index 83% rename from paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h rename to paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h index 3e4848720f4ce..acf839391b8c5 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h +++ b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h @@ -13,14 +13,14 @@ // limitations under the License. #pragma once -#include "paddle/ir/core/program.h" #include "paddle/phi/common/place.h" +#include "paddle/pir/core/program.h" namespace paddle { namespace dialect { -std::unique_ptr PdOpLowerToKernelPass( - ir::Program* prog, phi::Place place = phi::CPUPlace()); +std::unique_ptr PdOpLowerToKernelPass( + pir::Program* prog, phi::Place place = phi::CPUPlace()); } // namespace dialect } // namespace paddle diff --git a/paddle/fluid/ir/transforms/transform_general_functions.cc b/paddle/fluid/pir/transforms/transform_general_functions.cc similarity index 74% rename from paddle/fluid/ir/transforms/transform_general_functions.cc rename to paddle/fluid/pir/transforms/transform_general_functions.cc index 587c0cdaacd1d..6da131ee5e0c0 100644 --- a/paddle/fluid/ir/transforms/transform_general_functions.cc +++ b/paddle/fluid/pir/transforms/transform_general_functions.cc @@ -12,36 +12,38 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/transforms/transform_general_functions.h" +#include "paddle/fluid/pir/transforms/transform_general_functions.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/parameter.h" -#include "paddle/ir/core/program.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/parameter.h" +#include "paddle/pir/core/program.h" -namespace ir { +namespace pir { -std::pair GetParameterFromValue(ir::Value value) { - ir::GetParameterOp op = value.GetDefiningOp()->dyn_cast(); +std::pair GetParameterFromValue( + pir::Value value) { + pir::GetParameterOp op = + value.GetDefiningOp()->dyn_cast(); PADDLE_ENFORCE_NOT_NULL( op, phi::errors::InvalidArgument( "Value must be a weight from a GetParameter op.")); - ir::Program* program = op->GetParentProgram(); + pir::Program* program = op->GetParentProgram(); PADDLE_ENFORCE_NOT_NULL( program, phi::errors::InvalidArgument("Program should not be null.")); std::string name = op->attributes() .at(op.attributes_name[0]) - .dyn_cast() + .dyn_cast() .AsString(); - ir::Parameter* param = program->GetParameter(name); + pir::Parameter* param = program->GetParameter(name); PADDLE_ENFORCE_NOT_NULL( param, phi::errors::InvalidArgument("Parameter should not be null.")); return {name, param}; } -const phi::DDim& GetShapeFromValue(ir::Value value) { +const phi::DDim& GetShapeFromValue(pir::Value value) { // TODO(dev): Support other types like DenseTensor. PADDLE_ENFORCE_EQ( value.type().isa(), @@ -50,7 +52,7 @@ const phi::DDim& GetShapeFromValue(ir::Value value) { return value.type().dyn_cast().dims(); } -ir::Type GetDataTypeFromValue(ir::Value value) { +pir::Type GetDataTypeFromValue(pir::Value value) { // TODO(dev): Support other types like DenseTensor. PADDLE_ENFORCE_EQ( value.type().isa(), @@ -75,4 +77,4 @@ Operation* GetFirstUseOperationForOutput(Operation* op, uint32_t index) { return op->result(index).first_use().owner(); } -} // namespace ir +} // namespace pir diff --git a/paddle/fluid/ir/transforms/transform_general_functions.h b/paddle/fluid/pir/transforms/transform_general_functions.h similarity index 76% rename from paddle/fluid/ir/transforms/transform_general_functions.h rename to paddle/fluid/pir/transforms/transform_general_functions.h index b086af090f7a1..77c790235b832 100644 --- a/paddle/fluid/ir/transforms/transform_general_functions.h +++ b/paddle/fluid/pir/transforms/transform_general_functions.h @@ -14,45 +14,45 @@ #pragma once -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/parameter.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/enforce.h" #include "paddle/phi/core/errors.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/parameter.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/value.h" -namespace ir { +namespace pir { /** * @brief Get the [name, parameter] pair of pararmeter from a value. * * @note The value must be a output of a GetParameterOp. * - * @param ir::Value + * @param pir::Value * - * @return std::pair + * @return std::pair */ -std::pair GetParameterFromValue(ir::Value value); +std::pair GetParameterFromValue(pir::Value value); /** * @brief Get tensor's shape from a value. * - * @param ir::Value + * @param pir::Value * * @return const phi::DDim& */ -const phi::DDim& GetShapeFromValue(ir::Value value); +const phi::DDim& GetShapeFromValue(pir::Value value); /** * @brief Get tensor's data type from a value. * - * @param ir::Value + * @param pir::Value * - * @return ir::Type + * @return pir::Type */ -ir::Type GetDataTypeFromValue(ir::Value value); +pir::Type GetDataTypeFromValue(pir::Value value); /** * @brief Get an operation that defines the specific input of the operation. @@ -75,4 +75,4 @@ Operation* GetDefiningOpForInput(Operation* op, uint32_t index); */ Operation* GetFirstUseOperationForOutput(Operation* op, uint32_t index); -} // namespace ir +} // namespace pir diff --git a/paddle/fluid/primitive/backend/CMakeLists.txt b/paddle/fluid/primitive/backend/CMakeLists.txt index deabc1f19d9b5..d352880871121 100644 --- a/paddle/fluid/primitive/backend/CMakeLists.txt +++ b/paddle/fluid/primitive/backend/CMakeLists.txt @@ -12,4 +12,4 @@ set(static_backend_files cc_library( primitive_backend_static_experimental SRCS ${static_backend_files} - DEPS pd_dialect_api) + DEPS pd_op_dialect_api) diff --git a/paddle/fluid/primitive/backend/manual/manual_static_backend.cc b/paddle/fluid/primitive/backend/manual/manual_static_backend.cc index de39a58473337..7d96b4ddfecc2 100644 --- a/paddle/fluid/primitive/backend/manual/manual_static_backend.cc +++ b/paddle/fluid/primitive/backend/manual/manual_static_backend.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_api.h" #include "paddle/fluid/primitive/backend/manual/manual_backend.h" #include "paddle/fluid/primitive/primitive/primitive.h" #include "paddle/fluid/primitive/type/lazy_tensor.h" diff --git a/paddle/fluid/primitive/codegen/gen.py b/paddle/fluid/primitive/codegen/gen.py index 6c0dd9d214889..cf346c82db1f9 100644 --- a/paddle/fluid/primitive/codegen/gen.py +++ b/paddle/fluid/primitive/codegen/gen.py @@ -29,7 +29,7 @@ import tests_utils as op_gen_tests from parse_utils import to_named_dict -# import from paddle/fluid/ir/dialect/op_generator/api_gen.py +# import from paddle/fluid/pir/dialect/op_generator/api_gen.py sys.path.append( str(pathlib.Path(__file__).resolve().parents[2] / 'ir/dialect/op_generator') ) diff --git a/paddle/fluid/primitive/codegen/templates/backend/generated/generated_static_backend.cc.j2 b/paddle/fluid/primitive/codegen/templates/backend/generated/generated_static_backend.cc.j2 index 3a0be475485f9..48292d27243e6 100644 --- a/paddle/fluid/primitive/codegen/templates/backend/generated/generated_static_backend.cc.j2 +++ b/paddle/fluid/primitive/codegen/templates/backend/generated/generated_static_backend.cc.j2 @@ -2,7 +2,7 @@ // Auto Generated, DO NOT EDIT! #include "paddle/fluid/primitive/backend/generated/generated_backend.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_api.h" #include "paddle/fluid/primitive/primitive/primitive.h" #include "paddle/fluid/primitive/type/lazy_tensor.h" @@ -20,23 +20,23 @@ template <> {%- macro prepare_ir_api_inputs(inputs)-%} {%- for input in inputs -%} {% if input.typename=='Tensor[]' and not input.optional %} -std::vector {{input.name}}_res({{input.name}}.size()); +std::vector {{input.name}}_res({{input.name}}.size()); std::transform({{input.name}}.begin(), {{input.name}}.end(), {{input.name}}_res.begin(), [](const Tensor& t) { - return std::static_pointer_cast(t.impl())->value().dyn_cast(); + return std::static_pointer_cast(t.impl())->value().dyn_cast(); }); {% elif input.typename=='Tensor[]' and input.optional %} -std::vector {{input.name}}_res({{input.name}}.size()); +std::vector {{input.name}}_res({{input.name}}.size()); if({{input.name}}) { std::transform({{input.name}}.get().begin(), {{input.name}}.get().end(), {{input.name}}_res.begin(), [](const Tensor& t) { - return std::static_pointer_cast(t.impl())->value().dyn_cast(); + return std::static_pointer_cast(t.impl())->value().dyn_cast(); }); } {% elif input.typename=='Tensor' and not input.optional %} -ir::OpResult {{input.name}}_res = std::static_pointer_cast({{input.name}}.impl())->value().dyn_cast(); +pir::OpResult {{input.name}}_res = std::static_pointer_cast({{input.name}}.impl())->value().dyn_cast(); {% else %} -ir::OpResult {{input.name}}_res; +pir::OpResult {{input.name}}_res; if({{input.name}}) { - {{input.name}}_res = std::static_pointer_cast({{input.name}}.get().impl())->value().dyn_cast(); + {{input.name}}_res = std::static_pointer_cast({{input.name}}.get().impl())->value().dyn_cast(); } {% endif %} {% endfor %} @@ -49,7 +49,7 @@ Tensor {{outputs[0].name}}(std::make_shared(op_res)); return {{outputs[0].name}}; {%- elif outputs[0].typename == 'Tensor[]' -%} std::vector {{outputs[0].name}}(op_res.size()); -std::transform(op_res.begin(), op_res.end(), {{outputs[0].name}}.begin(), [](const ir::OpResult& res) { +std::transform(op_res.begin(), op_res.end(), {{outputs[0].name}}.begin(), [](const pir::OpResult& res) { return Tensor(std::make_shared(res)); }); return {{outputs[0].name}}; @@ -62,7 +62,7 @@ auto op_res_{{i}} = std::get<{{i}}>(op_res); Tensor {{outputs[i].name}}(std::make_shared(op_res_{{i}})); {% elif outputs[i].typename == 'Tensor[]' %} std::vector {{outputs[i].name}}(op_res_{{i}}.size()); -std::transform(op_res_{{i}}.begin(), op_res_{{i}}.end(), {{outputs[i].name}}.begin(), [](const ir::OpResult& res) { +std::transform(op_res_{{i}}.begin(), op_res_{{i}}.end(), {{outputs[i].name}}.begin(), [](const pir::OpResult& res) { return Tensor(std::make_shared(res)); }); {% else %} {#- render nothing -#} @@ -79,7 +79,7 @@ return std::make_tuple({%- for i in range(outputs|length) -%}{{outputs[i].name}} {{prepare_ir_api_inputs(inputs)}} {%- for attr in attrs %} {% if mutable_attribute_as_inputs and attr is mutable_attribute %} -ir::OpResult {{attr.name}}_res = std::static_pointer_cast({{attr.name~'_'}}.impl())->value().dyn_cast(); +pir::OpResult {{attr.name}}_res = std::static_pointer_cast({{attr.name~'_'}}.impl())->value().dyn_cast(); {% endif %} {% endfor %} {%- set input_names = [] -%} diff --git a/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.cc.j2 b/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.cc.j2 index 10e382a5b2350..67485bdd5a5cd 100644 --- a/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.cc.j2 +++ b/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.cc.j2 @@ -2,14 +2,14 @@ // Auto Generated, DO NOT EDIT! #include "paddle/fluid/primitive/rule/vjp/generated/generated_vjp.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_api.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" #include "paddle/fluid/prim/utils/static/static_global_utils.h" #include "paddle/fluid/primitive/backend/backend.h" #include "paddle/fluid/primitive/rule/vjp/details.h" #include "paddle/fluid/primitive/type/lazy_tensor.h" #include "paddle/fluid/primitive/utils/utils.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/operation.h" #include "paddle/phi/core/flags.h" #include "paddle/utils/optional.h" @@ -41,16 +41,16 @@ return vjp_res; {% macro get_mutable_attribute(attrs, api_name) %} {% for i in attrs %} {%- if i is mutable_attribute -%} -auto* {{i.name}}_define_op = std::static_pointer_cast({{i.name~'_'}}.impl())->value().dyn_cast().GetDefiningOp(); +auto* {{i.name}}_define_op = std::static_pointer_cast({{i.name~'_'}}.impl())->value().dyn_cast().GetDefiningOp(); {% if i.typename is scalar %} -if({{i.name}}_define_op->name() != "pd.full") { +if({{i.name}}_define_op->name() != "pd_op.full") { PADDLE_THROW(platform::errors::Unimplemented( "We don't support dynamic tensors attribute {{i.name}} for {{api_name}} composite " "for now. ")); } auto {{i.name}} = {{i.name}}_define_op->attribute("value").dyn_cast().data(); {% elif i.typename is intarray %} -if({{i.name}}_define_op->name() != "pd.full_int_array"){ +if({{i.name}}_define_op->name() != "pd_op.full_int_array"){ PADDLE_THROW(platform::errors::Unimplemented( "We don't support dynamic tensors attribute {{i.name}} for {{api_name}} composite " "for now. ")); diff --git a/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.h.j2 b/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.h.j2 index b9e758aaa73ff..7f403661fea05 100644 --- a/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.h.j2 +++ b/paddle/fluid/primitive/codegen/templates/rule/vjp/generated/generated_vjp.h.j2 @@ -4,7 +4,7 @@ #pragma once #include "paddle/fluid/primitive/primitive/primitive.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/value.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/common/int_array.h" diff --git a/paddle/fluid/primitive/rule/vjp/CMakeLists.txt b/paddle/fluid/primitive/rule/vjp/CMakeLists.txt index 3d6906bb33ca5..4b790fd07900b 100644 --- a/paddle/fluid/primitive/rule/vjp/CMakeLists.txt +++ b/paddle/fluid/primitive/rule/vjp/CMakeLists.txt @@ -5,4 +5,4 @@ cc_library( primitive_vjp_experimental SRCS ${VJP_SRCS} DEPS primitive_backend_static_experimental static_global_utils - primitive_static_utils_experimental pd_dialect_core) + primitive_static_utils_experimental pd_op_dialect_core) diff --git a/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.cc b/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.cc index c56ac5c5f79ab..a882f78c52018 100644 --- a/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.cc +++ b/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.cc @@ -15,13 +15,13 @@ // Auto Generated, DO NOT EDIT! #include "paddle/fluid/primitive/rule/vjp/manual/manual_vjp.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_api.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_api.h" #include "paddle/fluid/prim/utils/static/static_global_utils.h" #include "paddle/fluid/primitive/backend/backend.h" #include "paddle/fluid/primitive/rule/vjp/details.h" #include "paddle/fluid/primitive/type/lazy_tensor.h" #include "paddle/fluid/primitive/utils/utils.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/operation.h" namespace paddle { namespace primitive {} // namespace primitive diff --git a/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.h b/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.h index 0fffd6ba31a4c..35810f6d652ca 100644 --- a/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.h +++ b/paddle/fluid/primitive/rule/vjp/manual/manual_vjp.h @@ -15,9 +15,9 @@ #pragma once #include "paddle/fluid/primitive/primitive/primitive.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/common/int_array.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace primitive { diff --git a/paddle/fluid/primitive/type/lazy_tensor.h b/paddle/fluid/primitive/type/lazy_tensor.h index b716d7ce73356..cde6ece54b163 100644 --- a/paddle/fluid/primitive/type/lazy_tensor.h +++ b/paddle/fluid/primitive/type/lazy_tensor.h @@ -13,12 +13,12 @@ // limitations under the License. #pragma once -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/value.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/extended_tensor.h" #include "paddle/phi/core/utils/data_type.h" +#include "paddle/pir/core/value.h" namespace paddle { namespace primitive { @@ -26,7 +26,7 @@ namespace primitive { class LazyTensor : public phi::ExtendedTensor, public phi::TypeInfoTraits { public: - explicit LazyTensor(ir::Value value) + explicit LazyTensor(pir::Value value) : value_(value), dims_(value.type().dyn_cast().dims()) {} @@ -41,16 +41,16 @@ class LazyTensor : public phi::ExtendedTensor, value_.type().dyn_cast().dtype()); } - ir::Value value() const { return value_; } + pir::Value value() const { return value_; } const phi::Place& place() const override { return place_; } bool initialized() const override { return value_.impl() != nullptr; } - void set_empty_type() { value_.set_type(ir::Type()); } + void set_empty_type() { value_.set_type(pir::Type()); } private: - ir::Value value_; + pir::Value value_; mutable phi::DDim dims_; phi::Place place_; }; diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 30cb90a5d2042..6c0c0fb4f81f2 100755 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -39,10 +39,10 @@ set(PYBIND_DEPS phi_utils phi phi_kernel_adaptor - pd_dialect + pd_op_dialect program_translator pd_inplace_pass - ir + pir new_profiler jit_layer jit_property @@ -344,7 +344,7 @@ if(WITH_PYTHON) add_custom_command( OUTPUT ${op_impl_path}/ir.dll COMMAND ${CMAKE_COMMAND} -E copy ${IR_LIB} ${op_impl_path} - DEPENDS ir) + DEPENDS pir) list(APPEND EAGER_OP_IMPL_DEPS ${op_impl_path}/ir.dll) endif() diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 95d86f544c4bf..90bde68961c96 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -11,7 +11,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/eager_utils.h" #include -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/value.h" // Avoid a problem with copysign defined in pyconfig.h on Windows. #ifdef copysign #undef copysign @@ -888,13 +888,13 @@ PyObject* ToPyObject(const phi::DenseTensor* value) { return obj.ptr(); } -PyObject* ToPyObject(const ir::OpResult& value) { +PyObject* ToPyObject(const pir::OpResult& value) { auto obj = ::pybind11::cast(value); obj.inc_ref(); return obj.ptr(); } -PyObject* ToPyObject(const std::vector& value) { +PyObject* ToPyObject(const std::vector& value) { PyObject* result = PyList_New((Py_ssize_t)value.size()); for (size_t i = 0; i < value.size(); i++) { @@ -1485,13 +1485,13 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, } } -ir::OpResult CastPyArg2OpResult(PyObject* obj, - const std::string& op_type, - size_t arg_pos) { +pir::OpResult CastPyArg2OpResult(PyObject* obj, + const std::string& op_type, + size_t arg_pos) { if (PyObject_TypeCheck(obj, g_ir_opresult_pytype)) { - return ::pybind11::handle(obj).cast(); + return ::pybind11::handle(obj).cast(); } else if (obj == nullptr || obj == Py_None) { - return ir::OpResult(); + return pir::OpResult(); } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " @@ -1502,17 +1502,17 @@ ir::OpResult CastPyArg2OpResult(PyObject* obj, } } -std::vector CastPyArg2VectorOfOpResult(PyObject* obj, - const std::string& op_type, - size_t arg_pos) { - std::vector result_list; +std::vector CastPyArg2VectorOfOpResult( + PyObject* obj, const std::string& op_type, size_t arg_pos) { + std::vector result_list; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { item = PyList_GetItem(obj, i); if (PyObject_TypeCheck(item, g_ir_opresult_pytype)) { - result_list.emplace_back(::pybind11::handle(item).cast()); + result_list.emplace_back( + ::pybind11::handle(item).cast()); } else if (item == Py_None) { continue; } else { @@ -1531,7 +1531,8 @@ std::vector CastPyArg2VectorOfOpResult(PyObject* obj, for (Py_ssize_t i = 0; i < len; i++) { item = PyTuple_GetItem(obj, i); if (PyObject_TypeCheck(item, g_ir_opresult_pytype)) { - result_list.emplace_back(::pybind11::handle(item).cast()); + result_list.emplace_back( + ::pybind11::handle(item).cast()); } else if (item == Py_None) { continue; } else { @@ -1545,7 +1546,7 @@ std::vector CastPyArg2VectorOfOpResult(PyObject* obj, } } } else if (PyObject_TypeCheck(obj, g_ir_opresult_pytype)) { - return {::pybind11::handle(obj).cast()}; + return {::pybind11::handle(obj).cast()}; } else if (obj == Py_None) { return {}; } else { diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index ad7ec2d42c437..64c578c81cd13 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -29,7 +29,6 @@ typedef SSIZE_T ssize_t; #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/jit/function.h" #include "paddle/fluid/platform/place.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/common/backend.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/int_array.h" @@ -38,6 +37,7 @@ typedef SSIZE_T ssize_t; #include "paddle/phi/core/distributed/auto_parallel/dist_attr.h" #include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h" #include "paddle/phi/core/selected_rows.h" +#include "paddle/pir/core/value.h" #include "paddle/utils/pybind.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" @@ -75,12 +75,11 @@ std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos); std::vector CastPyArg2VectorOfInt64(PyObject* obj, size_t arg_pos); std::vector CastPyArg2VectorOfSize_t(PyObject* obj, size_t arg_pos); std::vector CastPyArg2VectorOfFloat(PyObject* obj, size_t arg_pos); -ir::OpResult CastPyArg2OpResult(PyObject* obj, - const std::string& op_type, - size_t arg_pos); -std::vector CastPyArg2VectorOfOpResult(PyObject* obj, - const std::string& op_type, - size_t arg_pos); +pir::OpResult CastPyArg2OpResult(PyObject* obj, + const std::string& op_type, + size_t arg_pos); +std::vector CastPyArg2VectorOfOpResult( + PyObject* obj, const std::string& op_type, size_t arg_pos); std::vector> CastPyArg2VectorOfVectorOfSize_t( PyObject* obj, size_t arg_pos); framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, @@ -131,8 +130,8 @@ PyObject* ToPyObject(const paddle::framework::Vocab& value); PyObject* ToPyObject(std::shared_ptr grad_node); -PyObject* ToPyObject(const ir::OpResult& value); -PyObject* ToPyObject(const std::vector& value); +PyObject* ToPyObject(const pir::OpResult& value); +PyObject* ToPyObject(const std::vector& value); class PyTensorHook : public egr::TensorHook { public: diff --git a/paddle/fluid/pybind/ir.cc b/paddle/fluid/pybind/ir.cc index 4dc36fe785ecc..8d17d2ed68db4 100644 --- a/paddle/fluid/pybind/ir.cc +++ b/paddle/fluid/pybind/ir.cc @@ -25,38 +25,38 @@ #include "paddle/fluid/pybind/pybind_variant_caster.h" #include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/fluid/ir/transforms/inplace_pass.h" #include "paddle/fluid/ir_adaptor/translator/translate.h" #include "paddle/fluid/ir_adaptor/translator/utils.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/value.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_manager.h" -#include "paddle/ir/pass/pass_registry.h" -#include "paddle/ir/transforms/dead_code_elimination_pass.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/api_builder.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/fluid/pir/transforms/inplace_pass.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/value.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_manager.h" +#include "paddle/pir/pass/pass_registry.h" +#include "paddle/pir/transforms/dead_code_elimination_pass.h" #include "pybind11/stl.h" namespace py = pybind11; -using ir::Block; -using ir::Operation; -using ir::OpOperand; -using ir::OpResult; -using ir::Pass; -using ir::PassManager; -using ir::Program; -using ir::Type; -using ir::Value; using paddle::dialect::APIBuilder; using paddle::dialect::DenseTensorType; +using pir::Block; +using pir::Operation; +using pir::OpOperand; +using pir::OpResult; +using pir::Pass; +using pir::PassManager; +using pir::Program; +using pir::Type; +using pir::Value; using pybind11::return_value_policy; USE_PASS(dead_code_elimination); @@ -111,9 +111,10 @@ void BindProgram(py::module *m) { print("start up program is: {}".format(startup_program)) )DOC"); program - .def( - "__init__", - [](Program &self) { new (&self) Program(ir::IrContext::Instance()); }) + .def("__init__", + [](Program &self) { + new (&self) Program(pir::IrContext::Instance()); + }) .def("__str__", [](const std::shared_ptr &self) { std::ostringstream print_stream; @@ -287,7 +288,7 @@ void BindValue(py::module *m) { return self.impl() == other.value_impl(); }) .def("__hash__", - [](const Value &self) { return std::hash{}(self); }); + [](const Value &self) { return std::hash{}(self); }); } void BindOpOperand(py::module *m) { @@ -315,10 +316,10 @@ bool GetStopGradient(const OpResult &self) { auto *defining_op = self.owner(); if (defining_op->HasAttribute(kAttrStopGradients)) { auto stop_gradients = defining_op->attribute(kAttrStopGradients) - .dyn_cast() + .dyn_cast() .AsVector(); return stop_gradients[self.GetResultIndex()] - .dyn_cast() + .dyn_cast() .data(); } else { return false; @@ -327,21 +328,21 @@ bool GetStopGradient(const OpResult &self) { void SetStopGradient(const OpResult &self, bool stop_gradient) { auto *defining_op = self.owner(); - std::vector stop_gradients; + std::vector stop_gradients; if (defining_op->HasAttribute(kAttrStopGradients)) { stop_gradients = defining_op->attribute(kAttrStopGradients) - .dyn_cast() + .dyn_cast() .AsVector(); } else { - stop_gradients = std::vector( + stop_gradients = std::vector( defining_op->num_results(), - ir::BoolAttribute::get(ir::IrContext::Instance(), false)); + pir::BoolAttribute::get(pir::IrContext::Instance(), false)); } stop_gradients[self.GetResultIndex()] = - ir::BoolAttribute::get(ir::IrContext::Instance(), stop_gradient); + pir::BoolAttribute::get(pir::IrContext::Instance(), stop_gradient); defining_op->set_attribute( kAttrStopGradients, - ir::ArrayAttribute::get(ir::IrContext::Instance(), stop_gradients)); + pir::ArrayAttribute::get(pir::IrContext::Instance(), stop_gradients)); } void BindOpResult(py::module *m) { @@ -360,7 +361,7 @@ void BindOpResult(py::module *m) { }) .def("__hash__", [](OpResult &self) { - return std::hash{}(self.dyn_cast()); + return std::hash{}(self.dyn_cast()); }) .def("get_defining_op", &OpResult::GetDefiningOp, @@ -427,8 +428,8 @@ void BindUtils(pybind11::module *m) { m->def("reset_insertion_point_to_end", []() { APIBuilder::Instance().ResetInsertionPointToEnd(); }); m->def("register_paddle_dialect", []() { - ir::IrContext::Instance() - ->GetOrRegisterDialect(); + pir::IrContext::Instance() + ->GetOrRegisterDialect(); }); m->def( "translate_to_new_ir", @@ -476,7 +477,7 @@ void BindUtils(pybind11::module *m) { m->def( "check_unregistered_ops", [](const framework::ProgramDesc &legacy_program) { - ir::IrContext *ctx = ir::IrContext::Instance(); + pir::IrContext *ctx = pir::IrContext::Instance(); return paddle::translator::CheckUnregisteredOperation(ctx, legacy_program); }, @@ -516,13 +517,13 @@ void BindPassManager(pybind11::module *m) { .def( "__init__", [](PassManager &self, uint8_t opt_level) { - new (&self) PassManager(ir::IrContext::Instance(), opt_level); + new (&self) PassManager(pir::IrContext::Instance(), opt_level); }, py::arg("opt_level") = 2) .def("add_pass", [](PassManager &self, const std::string &pass_name) { self.AddPass( - std::move(ir::PassRegistry::Instance().Get(pass_name))); + std::move(pir::PassRegistry::Instance().Get(pass_name))); }) .def("passes", [](PassManager &self) { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 056c4b0daadfc..122d2428449ae 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -195,17 +195,17 @@ limitations under the License. */ #include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/nan_inf_utils.h" #include "paddle/fluid/imperative/layout_autotune.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/vjp.h" +#include "paddle/fluid/pir/dialect/operator/interface/vjp.h" #include "paddle/fluid/prim/utils/eager/eager_tensor_operants.h" #include "paddle/fluid/prim/utils/static/static_tensor_operants.h" #include "paddle/fluid/pybind/eager_utils.h" -#include "paddle/ir/core/program.h" #include "paddle/phi/api/ext/op_meta_info.h" #include "paddle/phi/api/include/operants_manager.h" #include "paddle/phi/api/include/tensor_operants.h" #include "paddle/phi/core/flags.h" #include "paddle/phi/kernels/autotune/cache.h" #include "paddle/phi/kernels/autotune/switch_autotune.h" +#include "paddle/pir/core/program.h" #include "pybind11/stl.h" PHI_DECLARE_bool(use_mkldnn); @@ -692,19 +692,19 @@ static int GetNCCLVersion() { void BindVjp(pybind11::module *m) { m->def( "call_vjp", - [](ir::Operation &fwd_op, - const std::vector> &out_grads, + [](pir::Operation &fwd_op, + const std::vector> &out_grads, const std::vector> &stop_gradients) { py::list res; - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::OpInfo fwd_op_info = ctx->GetRegisteredOpInfo(fwd_op.name()); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::OpInfo fwd_op_info = ctx->GetRegisteredOpInfo(fwd_op.name()); auto vjp_interface_impl = fwd_op_info.GetInterfaceImpl(); if (vjp_interface_impl == nullptr) { PADDLE_THROW(phi::errors::InvalidArgument( "The vjp function is not registered in %s op ", fwd_op.name())); } - std::vector> vjp_res = + std::vector> vjp_res = vjp_interface_impl->vjp_(&fwd_op, out_grads, stop_gradients); PADDLE_ENFORCE_EQ( stop_gradients.size(), @@ -743,9 +743,9 @@ void BindVjp(pybind11::module *m) { return res; }); - m->def("has_vjp", [](ir::Operation &fwd_op) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::OpInfo fwd_op_info = ctx->GetRegisteredOpInfo(fwd_op.name()); + m->def("has_vjp", [](pir::Operation &fwd_op) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::OpInfo fwd_op_info = ctx->GetRegisteredOpInfo(fwd_op.name()); auto vjp_interface_impl = fwd_op_info.GetInterfaceImpl(); if (vjp_interface_impl == nullptr) return false; @@ -1983,7 +1983,7 @@ All parameter, weight, gradient are variables in Paddle. py::init< const std::vector> &, const std::unordered_map> &>(), + std::shared_ptr<::pir::Program>> &>(), py::arg("job_list"), py::arg("type_to_ir_program")) .def("job_list", &framework::interpreter::Plan::JobList) diff --git a/paddle/ir/core/CMakeLists.txt b/paddle/ir/core/CMakeLists.txt deleted file mode 100644 index 138b102fcbd89..0000000000000 --- a/paddle/ir/core/CMakeLists.txt +++ /dev/null @@ -1,10 +0,0 @@ -set(NEWIR_SOURCE_DIR "${PADDLE_SOURCE_DIR}/paddle/ir") -set(NEWIR_BINARY_DIR "${PADDLE_BINARY_DIR}/paddle/ir") - -file(GLOB IR_SRCS "*.cc") - -file(GLOB IR_PARSER_SRCS "parser/*.cc") - -list(APPEND IR_SRCS ${IR_PARSER_SRCS}) - -ir_library(ir_core SRCS ${IR_SRCS} DEPS ddim) diff --git a/paddle/ir/dialect/control_flow/CMakeLists.txt b/paddle/ir/dialect/control_flow/CMakeLists.txt deleted file mode 100644 index 5a693ba156ccd..0000000000000 --- a/paddle/ir/dialect/control_flow/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -file(GLOB_RECURSE CONTROL_FLOW_SRCS "*.cc") -ir_library(ir_control_flow SRCS ${CONTROL_FLOW_SRCS} DEPS ir_core) diff --git a/paddle/ir/dialect/shape/CMakeLists.txt b/paddle/ir/dialect/shape/CMakeLists.txt deleted file mode 100644 index 62d7c0d42c85c..0000000000000 --- a/paddle/ir/dialect/shape/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -file(GLOB_RECURSE SHAPE_SRCS "*.cc") -ir_library(ir_shape SRCS ${SHAPE_SRCS} DEPS ir_core) diff --git a/paddle/ir/pass/CMakeLists.txt b/paddle/ir/pass/CMakeLists.txt deleted file mode 100644 index b4a1d99ab5fcd..0000000000000 --- a/paddle/ir/pass/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -file(GLOB NEW_PASS_SRCS "*.cc") - -ir_library(ir_pass SRCS ${NEW_PASS_SRCS} DEPS ir_core) diff --git a/paddle/ir/pattern_rewrite/CMakeLists.txt b/paddle/ir/pattern_rewrite/CMakeLists.txt deleted file mode 100644 index e99611a4ca050..0000000000000 --- a/paddle/ir/pattern_rewrite/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -file(GLOB PATTERN_SRCS "*.cc") - -ir_library(ir_pattern_rewrite SRCS ${PATTERN_SRCS} DEPS ir_core) diff --git a/paddle/phi/core/flags.cc b/paddle/phi/core/flags.cc index a7df7f3203734..930afa2e3e924 100644 --- a/paddle/phi/core/flags.cc +++ b/paddle/phi/core/flags.cc @@ -1312,7 +1312,7 @@ PHI_DEFINE_EXPORTED_bool(enable_new_ir_in_executor_trace_run, PHI_DEFINE_EXPORTED_bool(new_ir_apply_inplace_pass, true, "Whether to apply inplace pass on lowering " - "::ir::Program to Kernel Dialect"); + "::pir::Program to Kernel Dialect"); PHI_DEFINE_EXPORTED_bool(enable_record_memory, false, "Enable memory recorder"); diff --git a/paddle/phi/core/meta_tensor.cc b/paddle/phi/core/meta_tensor.cc index 9b9df5c1ff4aa..8c1dddfeafa21 100644 --- a/paddle/phi/core/meta_tensor.cc +++ b/paddle/phi/core/meta_tensor.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include "glog/logging.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h" +#include "paddle/fluid/pir/dialect/operator/ir/meta_tensor.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h" #include "paddle/phi/core/enforce.h" diff --git a/paddle/phi/core/tensor_meta.cc b/paddle/phi/core/tensor_meta.cc index 59926ed0b8c25..600f815b1af01 100644 --- a/paddle/phi/core/tensor_meta.cc +++ b/paddle/phi/core/tensor_meta.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/core/tensor_meta.h" -#include "paddle/ir/core/enforce.h" +#include "paddle/pir/core/enforce.h" namespace phi { diff --git a/paddle/phi/core/utils/type_info.cc b/paddle/phi/core/utils/type_info.cc index 99b134b6e7960..2cb903fde7310 100644 --- a/paddle/phi/core/utils/type_info.cc +++ b/paddle/phi/core/utils/type_info.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h" +#include "paddle/fluid/pir/dialect/operator/ir/meta_tensor.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/custom/custom_context.h" #include "paddle/phi/backends/gpu/gpu_context.h" diff --git a/paddle/ir/CMakeLists.txt b/paddle/pir/CMakeLists.txt similarity index 91% rename from paddle/ir/CMakeLists.txt rename to paddle/pir/CMakeLists.txt index 5a778466b4c19..1f87a16ff36a6 100644 --- a/paddle/ir/CMakeLists.txt +++ b/paddle/pir/CMakeLists.txt @@ -43,31 +43,31 @@ add_subdirectory(dialect) if(WIN32) if(WITH_SHARED_IR) set(IR_NAME - ir.dll + pir.dll CACHE INTERNAL "" FORCE) else() set(IR_NAME - ir.lib + pir.lib CACHE INTERNAL "" FORCE) endif() elseif(APPLE) if(WITH_SHARED_IR) set(IR_NAME - libir.dylib + libpir.dylib CACHE INTERNAL "" FORCE) else() set(IR_NAME - libir.a + libpir.a CACHE INTERNAL "" FORCE) endif() else() if(WITH_SHARED_IR) set(IR_NAME - libir.so + libpir.so CACHE INTERNAL "" FORCE) else() set(IR_NAME - libir.a + libpir.a CACHE INTERNAL "" FORCE) endif() endif() @@ -78,7 +78,7 @@ set(IR_LIB get_property(ir_modules GLOBAL PROPERTY IR_MODULES) if(WITH_SHARED_IR) - add_library(ir SHARED ${ir_modules}) + add_library(pir SHARED ${ir_modules}) else() - add_library(ir STATIC ${ir_modules}) + add_library(pir STATIC ${ir_modules}) endif() diff --git a/paddle/pir/core/CMakeLists.txt b/paddle/pir/core/CMakeLists.txt new file mode 100644 index 0000000000000..0fffc4285e376 --- /dev/null +++ b/paddle/pir/core/CMakeLists.txt @@ -0,0 +1,9 @@ +set(NEWIR_SOURCE_DIR "${PADDLE_SOURCE_DIR}/paddle/pir") +set(NEWIR_BINARY_DIR "${PADDLE_BINARY_DIR}/paddle/pir") + +file(GLOB IR_SRCS "*.cc") + +file(GLOB IR_PARSER_SRCS "parser/*.cc") +list(APPEND IR_SRCS ${IR_PARSER_SRCS}) + +ir_library(pir_core SRCS ${IR_SRCS} DEPS ddim) diff --git a/paddle/ir/core/attribute.cc b/paddle/pir/core/attribute.cc similarity index 86% rename from paddle/ir/core/attribute.cc rename to paddle/pir/core/attribute.cc index 0eff9964292df..993076880fdda 100644 --- a/paddle/ir/core/attribute.cc +++ b/paddle/pir/core/attribute.cc @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/dialect.h" -namespace ir { +namespace pir { IrContext *Attribute::ir_context() const { return dialect().ir_context(); } TypeId Attribute::type_id() { return storage_->abstract_attribute().type_id(); } @@ -29,4 +29,4 @@ const Dialect &Attribute::dialect() const { return storage_->abstract_attribute().dialect(); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/attribute.h b/paddle/pir/core/attribute.h similarity index 87% rename from paddle/ir/core/attribute.h rename to paddle/pir/core/attribute.h index d83ea3b3c6045..86d6a62ceddfd 100644 --- a/paddle/ir/core/attribute.h +++ b/paddle/pir/core/attribute.h @@ -14,13 +14,13 @@ #pragma once -#include "paddle/ir/core/cast_utils.h" -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/cast_utils.h" +#include "paddle/pir/core/type_id.h" constexpr char kAttrStopGradients[] = "stop_gradient"; constexpr char kAttrIsPersisable[] = "is_persisable"; -namespace ir { +namespace pir { class AttributeStorage; class AbstractAttribute; class IrContext; @@ -77,12 +77,12 @@ class IR_API Attribute { template bool isa() const { - return ir::isa(*this); + return pir::isa(*this); } template U dyn_cast() const { - return ir::dyn_cast(*this); + return pir::dyn_cast(*this); } friend struct std::hash; @@ -92,13 +92,13 @@ class IR_API Attribute { }; IR_API std::ostream &operator<<(std::ostream &os, Attribute attr); -} // namespace ir +} // namespace pir namespace std { template <> -struct hash { - std::size_t operator()(const ir::Attribute &obj) const { - return std::hash()(obj.storage_); +struct hash { + std::size_t operator()(const pir::Attribute &obj) const { + return std::hash()(obj.storage_); } }; } // namespace std diff --git a/paddle/ir/core/attribute_base.h b/paddle/pir/core/attribute_base.h similarity index 91% rename from paddle/ir/core/attribute_base.h rename to paddle/pir/core/attribute_base.h index daa3fed14f8a3..e0cbb0253700a 100644 --- a/paddle/ir/core/attribute_base.h +++ b/paddle/pir/core/attribute_base.h @@ -14,11 +14,11 @@ #pragma once -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/storage_manager.h" -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/storage_manager.h" +#include "paddle/pir/core/type_id.h" -namespace ir { +namespace pir { class Dialect; /// @@ -155,7 +155,7 @@ struct IR_API AttributeManager { template static T get(IrContext *ctx, Args &&...args) { return get( - ctx, ir::TypeId::get(), std::forward(args)...); + ctx, pir::TypeId::get(), std::forward(args)...); } /// @@ -204,7 +204,7 @@ struct IR_API AttributeManager { /// template static void RegisterAttribute(IrContext *ctx) { - RegisterAttribute(ctx, ir::TypeId::get()); + RegisterAttribute(ctx, pir::TypeId::get()); } /// @@ -242,25 +242,25 @@ struct IR_API AttributeManager { /// /// \brief Add some necessary functions to the custom Attribute class. /// -#define DECLARE_ATTRIBUTE_UTILITY_FUNCTOR(concrete_attribute, storage_type) \ - using Storage = storage_type; \ - \ - const Storage *storage() const { \ - return static_cast(this->storage_); \ - } \ - \ - static ir::TypeId type_id() { \ - return ir::TypeId::get(); \ - } \ - \ - template \ - static bool classof(T val) { \ - return val.type_id() == type_id(); \ - } \ - \ - template \ - static concrete_attribute get(ir::IrContext *ctx, Args... args) { \ - return ir::AttributeManager::template get(ctx, \ - args...); \ +#define DECLARE_ATTRIBUTE_UTILITY_FUNCTOR(concrete_attribute, storage_type) \ + using Storage = storage_type; \ + \ + const Storage *storage() const { \ + return static_cast(this->storage_); \ + } \ + \ + static pir::TypeId type_id() { \ + return pir::TypeId::get(); \ + } \ + \ + template \ + static bool classof(T val) { \ + return val.type_id() == type_id(); \ + } \ + \ + template \ + static concrete_attribute get(pir::IrContext *ctx, Args... args) { \ + return pir::AttributeManager::template get(ctx, \ + args...); \ } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/block.cc b/paddle/pir/core/block.cc similarity index 93% rename from paddle/ir/core/block.cc rename to paddle/pir/core/block.cc index 04d59e2582ebe..f92d532298150 100644 --- a/paddle/ir/core/block.cc +++ b/paddle/pir/core/block.cc @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/block.h" +#include "paddle/pir/core/block.h" #include -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/region.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/region.h" -namespace ir { +namespace pir { Block::~Block() { assert(use_empty() && "block destroyed still has uses."); clear(); @@ -93,4 +93,4 @@ bool Block::TopoOrderCheck(const OpListType &op_list) { return true; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/block.h b/paddle/pir/core/block.h similarity index 93% rename from paddle/ir/core/block.h rename to paddle/pir/core/block.h index 7e612d6318d36..3a8b4fafc345d 100644 --- a/paddle/ir/core/block.h +++ b/paddle/pir/core/block.h @@ -17,12 +17,12 @@ #include #include -#include "paddle/ir/core/block_operand.h" -#include "paddle/ir/core/dll_decl.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/use_iterator.h" +#include "paddle/pir/core/block_operand.h" +#include "paddle/pir/core/dll_decl.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/use_iterator.h" -namespace ir { +namespace pir { class Operation; class IR_API Block { @@ -89,4 +89,4 @@ class IR_API Block { Region::iterator position_; BlockOperand first_use_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/block_operand.cc b/paddle/pir/core/block_operand.cc similarity index 91% rename from paddle/ir/core/block_operand.cc rename to paddle/pir/core/block_operand.cc index f64a07fd50dfe..78dd9c0b5d14e 100644 --- a/paddle/ir/core/block_operand.cc +++ b/paddle/pir/core/block_operand.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/block_operand.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/block_operand_impl.h" -#include "paddle/ir/core/enforce.h" +#include "paddle/pir/core/block_operand.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/block_operand_impl.h" +#include "paddle/pir/core/enforce.h" -namespace ir { +namespace pir { #define CHECK_BLOCKOPEREND_NULL_IMPL(func_name) \ IR_ENFORCE(impl_, \ @@ -75,7 +75,7 @@ void BlockOperandImpl::set_source(Block *source) { InsertToUdChain(); } -BlockOperandImpl::BlockOperandImpl(Block *source, ir::Operation *owner) +BlockOperandImpl::BlockOperandImpl(Block *source, pir::Operation *owner) : source_(source), owner_(owner) { if (!source) { return; @@ -110,4 +110,4 @@ void BlockOperandImpl::RemoveFromUdChain() { BlockOperandImpl::~BlockOperandImpl() { RemoveFromUdChain(); } } // namespace detail -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/block_operand.h b/paddle/pir/core/block_operand.h similarity index 93% rename from paddle/ir/core/block_operand.h rename to paddle/pir/core/block_operand.h index ec55a90a1c65d..9895af86e7ed7 100644 --- a/paddle/ir/core/block_operand.h +++ b/paddle/pir/core/block_operand.h @@ -14,10 +14,10 @@ #pragma once -#include "paddle/ir/core/cast_utils.h" -#include "paddle/ir/core/type.h" +#include "paddle/pir/core/cast_utils.h" +#include "paddle/pir/core/type.h" -namespace ir { +namespace pir { class Operation; class Value; class Block; @@ -70,4 +70,4 @@ class IR_API BlockOperand { detail::BlockOperandImpl *impl_{nullptr}; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/block_operand_impl.h b/paddle/pir/core/block_operand_impl.h similarity index 94% rename from paddle/ir/core/block_operand_impl.h rename to paddle/pir/core/block_operand_impl.h index 53d8257c10032..1e0f8659a9c10 100644 --- a/paddle/ir/core/block_operand_impl.h +++ b/paddle/pir/core/block_operand_impl.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/ir/core/block_operand.h" +#include "paddle/pir/core/block_operand.h" -namespace ir { +namespace pir { class Operation; class Block; @@ -58,4 +58,4 @@ class BlockOperandImpl { }; } // namespace detail -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/builder.cc b/paddle/pir/core/builder.cc similarity index 92% rename from paddle/ir/core/builder.cc rename to paddle/pir/core/builder.cc index 1bfbd2e2a8ca8..a91428ba99080 100644 --- a/paddle/ir/core/builder.cc +++ b/paddle/pir/core/builder.cc @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/value.h" -namespace ir { +namespace pir { /// Create an operation given the fields represented as an OperationState. Operation *Builder::Build(OperationArgument &&argument) { return Insert(Operation::Create(std::move(argument))); @@ -81,4 +81,4 @@ PointerAttribute Builder::pointer_attr(void *value) { return PointerAttribute::get(context_, value); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/builder.h b/paddle/pir/core/builder.h similarity index 92% rename from paddle/ir/core/builder.h rename to paddle/pir/core/builder.h index f3ae837ea9723..acb621e7808e7 100644 --- a/paddle/ir/core/builder.h +++ b/paddle/pir/core/builder.h @@ -16,11 +16,11 @@ #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" -namespace ir { +namespace pir { class Type; class UInt8Type; class Int8Type; @@ -97,10 +97,10 @@ class Builder { IR_API Operation *Build(OperationArgument &&argument); /// Creates an operation with the given fields. - IR_API Operation *Build(const std::vector &inputs, + IR_API Operation *Build(const std::vector &inputs, const AttributeMap &attribute, - const std::vector &output_types, - ir::OpInfo op_info); + const std::vector &output_types, + pir::OpInfo op_info); /// Create an operation of specific op type at the current insertion point. template @@ -141,4 +141,4 @@ class Builder { Block::iterator insert_point_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/builtin_attribute.cc b/paddle/pir/core/builtin_attribute.cc similarity index 81% rename from paddle/ir/core/builtin_attribute.cc rename to paddle/pir/core/builtin_attribute.cc index 38ca80cb1f9d7..e14a424c32c8e 100644 --- a/paddle/ir/core/builtin_attribute.cc +++ b/paddle/pir/core/builtin_attribute.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_attribute.h" -namespace ir { +namespace pir { bool BoolAttribute::data() const { return storage()->data(); } @@ -37,7 +37,7 @@ std::string StrAttribute::AsString() const { return storage()->AsString(); } size_t StrAttribute::size() const { return storage()->size(); } -StrAttribute StrAttribute::get(ir::IrContext* ctx, const std::string& value) { +StrAttribute StrAttribute::get(pir::IrContext* ctx, const std::string& value) { return AttributeManager::get(ctx, value); } @@ -79,14 +79,14 @@ ArrayAttributeStorage::~ArrayAttributeStorage() { } } -} // namespace ir - -IR_DEFINE_EXPLICIT_TYPE_ID(ir::StrAttribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::BoolAttribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::FloatAttribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::DoubleAttribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int32Attribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int64Attribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::ArrayAttribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::PointerAttribute) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::TypeAttribute) +} // namespace pir + +IR_DEFINE_EXPLICIT_TYPE_ID(pir::StrAttribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::BoolAttribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::FloatAttribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::DoubleAttribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Int32Attribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Int64Attribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::ArrayAttribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::PointerAttribute) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::TypeAttribute) diff --git a/paddle/ir/core/builtin_attribute.h b/paddle/pir/core/builtin_attribute.h similarity index 80% rename from paddle/ir/core/builtin_attribute.h rename to paddle/pir/core/builtin_attribute.h index 3969d962e1f4e..7d3f86144915c 100644 --- a/paddle/ir/core/builtin_attribute.h +++ b/paddle/pir/core/builtin_attribute.h @@ -14,11 +14,11 @@ #pragma once -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/builtin_attribute_storage.h" -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/builtin_attribute_storage.h" +#include "paddle/pir/core/utils.h" -namespace ir { +namespace pir { class IR_API BoolAttribute : public Attribute { public: using Attribute::Attribute; @@ -115,14 +115,14 @@ class IR_API ArrayAttribute : public Attribute { const std::vector& value); }; -} // namespace ir - -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::StrAttribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BoolAttribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::FloatAttribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::DoubleAttribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int32Attribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int64Attribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ArrayAttribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::PointerAttribute) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::TypeAttribute) +} // namespace pir + +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::StrAttribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::BoolAttribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::FloatAttribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::DoubleAttribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Int32Attribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Int64Attribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::ArrayAttribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::PointerAttribute) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::TypeAttribute) diff --git a/paddle/ir/core/builtin_attribute_storage.h b/paddle/pir/core/builtin_attribute_storage.h similarity index 95% rename from paddle/ir/core/builtin_attribute_storage.h rename to paddle/pir/core/builtin_attribute_storage.h index 624abaf004718..fd9dd6eb87128 100644 --- a/paddle/ir/core/builtin_attribute_storage.h +++ b/paddle/pir/core/builtin_attribute_storage.h @@ -18,13 +18,13 @@ #include #include -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/utils.h" -namespace ir { +namespace pir { #define DECLARE_BASE_TYPE_ATTRIBUTE_STORAGE(ConcreteStorage, BaseType) \ struct ConcreteStorage : public AttributeStorage { \ @@ -147,4 +147,4 @@ struct ArrayAttributeStorage : public AttributeStorage { const size_t size_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/builtin_dialect.cc b/paddle/pir/core/builtin_dialect.cc similarity index 87% rename from paddle/ir/core/builtin_dialect.cc rename to paddle/pir/core/builtin_dialect.cc index 375bf90d2b8fd..23ba43c3d292e 100644 --- a/paddle/ir/core/builtin_dialect.cc +++ b/paddle/pir/core/builtin_dialect.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" -namespace ir { +namespace pir { BuiltinDialect::BuiltinDialect(IrContext *context) : Dialect(name(), context, TypeId::get()) { initialize(); @@ -59,6 +59,6 @@ void BuiltinDialect::initialize() { ConstantOp>(); } -} // namespace ir +} // namespace pir -IR_DEFINE_EXPLICIT_TYPE_ID(ir::BuiltinDialect) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::BuiltinDialect) diff --git a/paddle/ir/core/builtin_dialect.h b/paddle/pir/core/builtin_dialect.h similarity index 82% rename from paddle/ir/core/builtin_dialect.h rename to paddle/pir/core/builtin_dialect.h index c5872f8142e7b..13e669102d8cc 100644 --- a/paddle/ir/core/builtin_dialect.h +++ b/paddle/pir/core/builtin_dialect.h @@ -14,17 +14,17 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" -namespace ir { +namespace pir { /// /// \brief Built-in Dialect: automatically registered into global IrContext, /// all built-in types defined in builtin_type.h will be registered in this /// Dialect. /// -class IR_API BuiltinDialect : public ir::Dialect { +class IR_API BuiltinDialect : public pir::Dialect { public: - explicit BuiltinDialect(ir::IrContext *context); + explicit BuiltinDialect(pir::IrContext *context); /// /// \brief Each Dialect needs to provide a name function to return the name of /// the Dialect. @@ -37,6 +37,6 @@ class IR_API BuiltinDialect : public ir::Dialect { void initialize(); }; -} // namespace ir +} // namespace pir -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BuiltinDialect) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::BuiltinDialect) diff --git a/paddle/ir/core/builtin_op.cc b/paddle/pir/core/builtin_op.cc similarity index 85% rename from paddle/ir/core/builtin_op.cc rename to paddle/pir/core/builtin_op.cc index 1feb4d691d99b..af4909ff2f6a2 100644 --- a/paddle/ir/core/builtin_op.cc +++ b/paddle/pir/core/builtin_op.cc @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/enforce.h" +#include "paddle/pir/core/builtin_op.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/enforce.h" -namespace ir { +namespace pir { const char *ModuleOp::attributes_name[attributes_num] = {"program"}; // NOLINT @@ -38,7 +38,7 @@ Block *ModuleOp::block() { } ModuleOp ModuleOp::Create(IrContext *context, Program *pointer) { - ir::OpInfo info = context->GetRegisteredOpInfo(name()); + pir::OpInfo info = context->GetRegisteredOpInfo(name()); OperationArgument argument(info); argument.num_regions = 1; argument.AddAttribute("program", PointerAttribute::get(context, pointer)); @@ -77,7 +77,7 @@ void GetParameterOp::Build(Builder &builder, const std::string &name, Type type) { argument.attributes[attributes_name[0]] = - ir::StrAttribute::get(builder.ir_context(), name); + pir::StrAttribute::get(builder.ir_context(), name); argument.output_types.emplace_back(type); } @@ -105,7 +105,7 @@ void SetParameterOp::Build(Builder &builder, // NOLINT const std::string &name) { argument.AddOperand(parameter); argument.AddAttribute(attributes_name[0], - ir::StrAttribute::get(builder.ir_context(), name)); + pir::StrAttribute::get(builder.ir_context(), name)); } void SetParameterOp::Verify() const { VLOG(4) << "Verifying inputs, outputs and attributes for: SetParameterOp."; @@ -124,14 +124,14 @@ void SetParameterOp::Verify() const { void CombineOp::Build(Builder &builder, OperationArgument &argument, - const std::vector &inputs) { + const std::vector &inputs) { argument.inputs = inputs; - std::vector inputs_type(inputs.size()); + std::vector inputs_type(inputs.size()); for (size_t idx = 0; idx < inputs.size(); ++idx) { inputs_type[idx] = inputs[idx].type(); } argument.output_types.emplace_back( - ir::VectorType::get(builder.ir_context(), inputs_type)); + pir::VectorType::get(builder.ir_context(), inputs_type)); } void CombineOp::Verify() const { @@ -167,11 +167,11 @@ const char *SliceOp::attributes_name[attributes_num] = {"index"}; // NOLINT void SliceOp::Build(Builder &builder, OperationArgument &argument, - const ir::OpResult &input, + const pir::OpResult &input, int index) { argument.inputs = {input}; argument.output_types.emplace_back(input.type() - .dyn_cast() + .dyn_cast() .data()[static_cast(index)]); } @@ -182,7 +182,7 @@ void SliceOp::Verify() const { input_size == 1, "The size %d of inputs must be equal to 1.", input_size); // inputs[0].type == Vector - auto input_type = (*this)->operand(0).type().dyn_cast(); + auto input_type = (*this)->operand(0).type().dyn_cast(); IR_ENFORCE(input_type, "The type %s of inputs[0] must be equal to VectorType.", input_type); @@ -197,10 +197,10 @@ void SliceOp::Verify() const { auto &attributes = this->attributes(); IR_ENFORCE(attributes.count("index") != 0, "The attributes must contains index."); - const ir::Attribute &attr = attributes.at("index"); - IR_ENFORCE(attr.isa(), + const pir::Attribute &attr = attributes.at("index"); + IR_ENFORCE(attr.isa(), "The attribute index must be INT32."); - auto index = attr.dyn_cast().data(); + auto index = attr.dyn_cast().data(); // index >= 0 and < inputs[0].size() IR_ENFORCE( @@ -222,12 +222,12 @@ void SliceOp::Verify() const { void SplitOp::Build(Builder &builder, OperationArgument &argument, - const ir::OpResult &input) { + const pir::OpResult &input) { argument.inputs = {input}; - for (size_t idx = 0; idx < input.type().dyn_cast().size(); + for (size_t idx = 0; idx < input.type().dyn_cast().size(); ++idx) { argument.output_types.emplace_back( - input.type().dyn_cast().data()[idx]); + input.type().dyn_cast().data()[idx]); } } @@ -277,13 +277,13 @@ void ConstantOp::Verify() const { Attribute ConstantOp::value() const { return attributes().at("value"); } -} // namespace ir +} // namespace pir -IR_DEFINE_EXPLICIT_TYPE_ID(ir::ModuleOp) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::GetParameterOp) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::SetParameterOp) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::CombineOp) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::SliceOp) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::SplitOp) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::ConstantLikeTrait) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::ConstantOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::ModuleOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::GetParameterOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::SetParameterOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::CombineOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::SliceOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::SplitOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::ConstantLikeTrait) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::ConstantOp) diff --git a/paddle/ir/core/builtin_op.h b/paddle/pir/core/builtin_op.h similarity index 76% rename from paddle/ir/core/builtin_op.h rename to paddle/pir/core/builtin_op.h index ab2d0cb9efba6..fee0ca406a741 100644 --- a/paddle/ir/core/builtin_op.h +++ b/paddle/pir/core/builtin_op.h @@ -14,17 +14,17 @@ #pragma once -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/op_base.h" -namespace ir { +namespace pir { class Program; class Block; /// /// \brief ModuleOp /// -class IR_API ModuleOp : public ir::Op { +class IR_API ModuleOp : public pir::Op { public: using Op::Op; static const char *name() { return "builtin.module"; } @@ -45,7 +45,7 @@ class IR_API ModuleOp : public ir::Op { /// \brief GetParameterOp: OpResult = GetParameterOp({StrAttribute, /// StrAttribute}) /// -class IR_API GetParameterOp : public ir::Op { +class IR_API GetParameterOp : public pir::Op { public: using Op::Op; static const char *name() { return "builtin.get_parameter"; } @@ -62,7 +62,7 @@ class IR_API GetParameterOp : public ir::Op { /// \brief SetParameterOp: SetParameterOp(OpOperand, {StrAttribute, /// StrAttribute}) /// -class IR_API SetParameterOp : public ir::Op { +class IR_API SetParameterOp : public pir::Op { public: using Op::Op; static const char *name() { return "builtin.set_parameter"; } @@ -78,7 +78,7 @@ class IR_API SetParameterOp : public ir::Op { /// /// \brief CombineOp: CombineOp(OpOperand) /// -class IR_API CombineOp : public ir::Op { +class IR_API CombineOp : public pir::Op { public: using Op::Op; @@ -90,23 +90,23 @@ class IR_API CombineOp : public ir::Op { static void Build(Builder &builder, // NOLINT OperationArgument &argument, // NOLINT - const std::vector &inputs); + const std::vector &inputs); void Verify() const; - std::vector inputs() { - std::vector inputs; + std::vector inputs() { + std::vector inputs; for (uint32_t idx = 0; idx < num_operands(); idx++) { inputs.push_back(operand_source(static_cast(idx))); } return inputs; } - ir::OpResult out() { return result(0); } + pir::OpResult out() { return result(0); } }; /// /// \brief SliceOp: SliceOp(OpOperand) /// -class IR_API SliceOp : public ir::Op { +class IR_API SliceOp : public pir::Op { public: using Op::Op; @@ -118,17 +118,17 @@ class IR_API SliceOp : public ir::Op { static void Build(Builder &builder, // NOLINT OperationArgument &argument, // NOLINT - const ir::OpResult &input, + const pir::OpResult &input, int index); void Verify() const; - ir::Value input() { return operand_source(0); } + pir::Value input() { return operand_source(0); } }; /// /// \brief SplitOp: SplitOp(OpOperand) /// -class IR_API SplitOp : public ir::Op { +class IR_API SplitOp : public pir::Op { public: using Op::Op; @@ -140,12 +140,12 @@ class IR_API SplitOp : public ir::Op { static void Build(Builder &builder, // NOLINT OperationArgument &argument, // NOLINT - const ir::OpResult &input); + const pir::OpResult &input); void Verify() const; - ir::Value input() { return operand_source(0); } - std::vector outputs() { - std::vector outputs; + pir::Value input() { return operand_source(0); } + std::vector outputs() { + std::vector outputs; for (uint32_t idx = 0; idx < num_results(); idx++) { outputs.push_back(result(static_cast(idx))); } @@ -180,13 +180,13 @@ class IR_API ConstantOp : public Op { Attribute value() const; }; -} // namespace ir +} // namespace pir -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ModuleOp) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::GetParameterOp) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::SetParameterOp) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::CombineOp) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::SliceOp) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::SplitOp) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ConstantLikeTrait) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::ConstantOp) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::ModuleOp) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::GetParameterOp) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::SetParameterOp) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::CombineOp) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::SliceOp) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::SplitOp) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::ConstantLikeTrait) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::ConstantOp) diff --git a/paddle/ir/core/builtin_type.cc b/paddle/pir/core/builtin_type.cc similarity index 56% rename from paddle/ir/core/builtin_type.cc rename to paddle/pir/core/builtin_type.cc index 49a15484466b2..8d7de683e086a 100644 --- a/paddle/ir/core/builtin_type.cc +++ b/paddle/pir/core/builtin_type.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/builtin_type.h" +#include "paddle/pir/core/builtin_type.h" -namespace ir { +namespace pir { std::vector VectorType::data() const { return storage()->GetAsKey(); } -const ir::Type& DenseTensorType::dtype() const { return storage()->dtype_; } +const pir::Type& DenseTensorType::dtype() const { return storage()->dtype_; } const DenseTensorTypeStorage::Dim& DenseTensorType::dims() const { return storage()->dims_; @@ -32,20 +32,20 @@ const DenseTensorTypeStorage::LoD& DenseTensorType::lod() const { } const size_t& DenseTensorType::offset() const { return storage()->offset_; } -} // namespace ir - -IR_DEFINE_EXPLICIT_TYPE_ID(ir::UInt8Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int8Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::VectorType) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::BFloat16Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Float16Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Float32Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Float64Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int16Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int32Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Int64Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::IndexType) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::BoolType) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Complex64Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::Complex128Type) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::DenseTensorType) +} // namespace pir + +IR_DEFINE_EXPLICIT_TYPE_ID(pir::UInt8Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Int8Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::VectorType) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::BFloat16Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Float16Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Float32Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Float64Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Int16Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Int32Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Int64Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::IndexType) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::BoolType) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Complex64Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::Complex128Type) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::DenseTensorType) diff --git a/paddle/ir/core/builtin_type.h b/paddle/pir/core/builtin_type.h similarity index 74% rename from paddle/ir/core/builtin_type.h rename to paddle/pir/core/builtin_type.h index a660f065376b2..29c99f382ff52 100644 --- a/paddle/ir/core/builtin_type.h +++ b/paddle/pir/core/builtin_type.h @@ -15,10 +15,10 @@ #pragma once -#include "paddle/ir/core/builtin_type_storage.h" -#include "paddle/ir/core/type.h" +#include "paddle/pir/core/builtin_type_storage.h" +#include "paddle/pir/core/type.h" -namespace ir { +namespace pir { /// /// \brief Define built-in parameterless types. Please add the necessary /// interface functions for built-in types through the macro @@ -31,7 +31,7 @@ namespace ir { /// /// The built-in type object get method is as follows: /// \code{cpp} -/// ir::IrContext *ctx = ir::IrContext::Instance(); +/// pir::IrContext *ctx = pir::IrContext::Instance(); /// Type fp32 = Float32Type::get(ctx); /// \endcode /// @@ -54,13 +54,13 @@ class IR_API VectorType : public Type { Type operator[](size_t index) const { return data()[index]; } }; -class DenseTensorType : public ir::Type { +class DenseTensorType : public pir::Type { public: using Type::Type; DECLARE_TYPE_UTILITY_FUNCTOR(DenseTensorType, DenseTensorTypeStorage); - const ir::Type &dtype() const; + const pir::Type &dtype() const; const DenseTensorTypeStorage::Dim &dims() const; @@ -101,20 +101,20 @@ FOREACH_BUILTIN_TYPE(DECLARE_BUILTIN_TYPE) #undef FOREACH_BUILTIN_TYPE #undef DECLARE_BUILTIN_TYPE -} // namespace ir - -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::UInt8Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int8Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::VectorType) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BFloat16Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Float16Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Float32Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Float64Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int16Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int32Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Int64Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::BoolType) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::IndexType) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Complex64Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::Complex128Type) -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::DenseTensorType) +} // namespace pir + +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::UInt8Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Int8Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::VectorType) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::BFloat16Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Float16Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Float32Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Float64Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Int16Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Int32Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Int64Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::BoolType) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::IndexType) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Complex64Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::Complex128Type) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::DenseTensorType) diff --git a/paddle/ir/core/builtin_type_storage.h b/paddle/pir/core/builtin_type_storage.h similarity index 87% rename from paddle/ir/core/builtin_type_storage.h rename to paddle/pir/core/builtin_type_storage.h index 4488b28b07fa2..b8b18d09ddd26 100644 --- a/paddle/ir/core/builtin_type_storage.h +++ b/paddle/pir/core/builtin_type_storage.h @@ -14,11 +14,11 @@ #pragma once -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/type_base.h" -#include "paddle/ir/core/utils.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/core/ddim.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/type_base.h" +#include "paddle/pir/core/utils.h" namespace std { /// @@ -37,7 +37,7 @@ struct hash> { } // namespace std -namespace ir { +namespace pir { /// /// \brief Define Parametric TypeStorage for DenseTensorType. /// @@ -46,16 +46,16 @@ namespace ir { /// (3)define HashValue method, (4)overload operator==. /// -struct DenseTensorTypeStorage : public ir::TypeStorage { +struct DenseTensorTypeStorage : public pir::TypeStorage { /// /// \brief Declare ParamKey according to parameter type. /// using DataLayout = phi::DataLayout; using Dim = phi::DDim; using LoD = std::vector>; - using ParamKey = std::tuple; + using ParamKey = std::tuple; - DenseTensorTypeStorage(const ir::Type& dtype, + DenseTensorTypeStorage(const pir::Type& dtype, const Dim& dims, const DataLayout& layout, const LoD& lod, @@ -85,22 +85,22 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { std::size_t hash_value = 0; // hash dtype hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<0>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<0>(key))); // hash dims hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<1>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<1>(key))); // hash layout - hash_value = ir::hash_combine( + hash_value = pir::hash_combine( hash_value, std::hash::type>()( static_cast::type>( std::get<2>(key)))); // hash lod hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<3>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<3>(key))); // hash offset hash_value = - ir::hash_combine(hash_value, std::hash()(std::get<4>(key))); + pir::hash_combine(hash_value, std::hash()(std::get<4>(key))); return hash_value; } @@ -119,7 +119,7 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { /// \brief DenseTensorTypeStorage include five parameters: dims, dtype, /// layout, lod, offset. /// - ir::Type dtype_; + pir::Type dtype_; Dim dims_; DataLayout layout_; LoD lod_; @@ -183,4 +183,4 @@ struct VectorTypeStorage : public TypeStorage { size_t size_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/cast_utils.h b/paddle/pir/core/cast_utils.h similarity index 99% rename from paddle/ir/core/cast_utils.h rename to paddle/pir/core/cast_utils.h index dcc4b89fe8b04..3cc6e9abd09c4 100644 --- a/paddle/ir/core/cast_utils.h +++ b/paddle/pir/core/cast_utils.h @@ -16,7 +16,7 @@ #include -namespace ir { +namespace pir { /// /// \brief The template function actually called by isa_wrap. /// @@ -154,4 +154,4 @@ inline typename ReturnTypeDuduction::type dyn_cast(From *Val) { return isa(Val) ? cast(Val) : nullptr; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/dialect.cc b/paddle/pir/core/dialect.cc similarity index 88% rename from paddle/ir/core/dialect.cc rename to paddle/pir/core/dialect.cc index 0a4a6cc3b3854..e6831e977fa31 100644 --- a/paddle/ir/core/dialect.cc +++ b/paddle/pir/core/dialect.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" -namespace ir { -Dialect::Dialect(std::string name, ir::IrContext *context, ir::TypeId id) +namespace pir { +Dialect::Dialect(std::string name, pir::IrContext *context, pir::TypeId id) : name_(std::move(name)), context_(context), id_(id) {} Dialect::~Dialect() = default; @@ -32,4 +32,4 @@ IrContext *DialectInterface::ir_context() const { return dialect_->ir_context(); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/dialect.h b/paddle/pir/core/dialect.h similarity index 94% rename from paddle/ir/core/dialect.h rename to paddle/pir/core/dialect.h index f07a4242f362c..07debaf196041 100644 --- a/paddle/ir/core/dialect.h +++ b/paddle/pir/core/dialect.h @@ -17,15 +17,15 @@ #include #include -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/dialect_interface.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/op_base.h" -#include "paddle/ir/core/type_base.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/dialect_interface.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/op_base.h" +#include "paddle/pir/core/type_base.h" -namespace ir { +namespace pir { class Operation; class IrPrinter; @@ -174,4 +174,4 @@ class IR_API Dialect { std::unordered_map> registered_interfaces_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/dialect_interface.h b/paddle/pir/core/dialect_interface.h similarity index 96% rename from paddle/ir/core/dialect_interface.h rename to paddle/pir/core/dialect_interface.h index e24b3481f4ef4..7cb2b89de03eb 100644 --- a/paddle/ir/core/dialect_interface.h +++ b/paddle/pir/core/dialect_interface.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/type_id.h" -namespace ir { +namespace pir { class Dialect; class IrContext; /// @@ -64,4 +64,4 @@ class IR_API DialectInterface { TypeId interface_id_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/dll_decl.h b/paddle/pir/core/dll_decl.h similarity index 100% rename from paddle/ir/core/dll_decl.h rename to paddle/pir/core/dll_decl.h diff --git a/paddle/ir/core/enforce.h b/paddle/pir/core/enforce.h similarity index 95% rename from paddle/ir/core/enforce.h rename to paddle/pir/core/enforce.h index 10735297f305d..a3b1401b64d25 100644 --- a/paddle/ir/core/enforce.h +++ b/paddle/pir/core/enforce.h @@ -30,7 +30,7 @@ inline bool is_error(const T& stat) { return !stat; } -namespace ir { +namespace pir { class IrNotMetException : public std::exception { public: explicit IrNotMetException(const std::string& str) : err_str_(str) {} @@ -44,7 +44,7 @@ class IrNotMetException : public std::exception { #define IR_THROW(...) \ do { \ try { \ - throw ir::IrNotMetException( \ + throw pir::IrNotMetException( \ paddle::string::Sprintf("Error occured at: %s:%d :\n%s", \ __FILE__, \ __LINE__, \ @@ -60,7 +60,7 @@ class IrNotMetException : public std::exception { bool __cond__(COND); \ if (UNLIKELY(is_error(__cond__))) { \ try { \ - throw ir::IrNotMetException( \ + throw pir::IrNotMetException( \ paddle::string::Sprintf("Error occured at: %s:%d :\n%s", \ __FILE__, \ __LINE__, \ @@ -72,4 +72,4 @@ class IrNotMetException : public std::exception { } \ } while (0) -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/ir_context.cc b/paddle/pir/core/ir_context.cc similarity index 85% rename from paddle/ir/core/ir_context.cc rename to paddle/pir/core/ir_context.cc index 9fe79ac84b6a4..b7aca14e8f60b 100644 --- a/paddle/ir/core/ir_context.cc +++ b/paddle/pir/core/ir_context.cc @@ -12,19 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/ir_context.h" +#include "paddle/pir/core/ir_context.h" #include -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/op_info_impl.h" -#include "paddle/ir/core/spin_lock.h" -#include "paddle/ir/core/type_base.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/op_info_impl.h" +#include "paddle/pir/core/spin_lock.h" +#include "paddle/pir/core/type_base.h" -namespace ir { +namespace pir { // The implementation class of the IrContext class, cache registered // AbstractType, TypeStorage, AbstractAttribute, AttributeStorage, Dialect. class IrContextImpl { @@ -32,7 +32,7 @@ class IrContextImpl { IrContextImpl() = default; ~IrContextImpl() { - std::lock_guard guard(destructor_lock_); + std::lock_guard guard(destructor_lock_); for (auto &abstract_type_map : registed_abstract_types_) { delete abstract_type_map.second; } @@ -54,48 +54,48 @@ class IrContextImpl { registed_op_infos_.clear(); } - void RegisterAbstractType(ir::TypeId type_id, AbstractType *abstract_type) { - std::lock_guard guard(registed_abstract_types_lock_); + void RegisterAbstractType(pir::TypeId type_id, AbstractType *abstract_type) { + std::lock_guard guard(registed_abstract_types_lock_); VLOG(6) << "Register an abstract_type of: [TypeId_hash=" - << std::hash()(type_id) + << std::hash()(type_id) << ", AbstractType_ptr=" << abstract_type << "]."; registed_abstract_types_.emplace(type_id, abstract_type); } - AbstractType *GetAbstractType(ir::TypeId type_id) { - std::lock_guard guard(registed_abstract_types_lock_); + AbstractType *GetAbstractType(pir::TypeId type_id) { + std::lock_guard guard(registed_abstract_types_lock_); auto iter = registed_abstract_types_.find(type_id); if (iter != registed_abstract_types_.end()) { VLOG(6) << "Found a cached abstract_type of: [TypeId_hash=" - << std::hash()(type_id) + << std::hash()(type_id) << ", AbstractType_ptr=" << iter->second << "]."; return iter->second; } LOG(WARNING) << "No cache found abstract_type of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + << std::hash()(type_id) << "]."; return nullptr; } - void RegisterAbstractAttribute(ir::TypeId type_id, + void RegisterAbstractAttribute(pir::TypeId type_id, AbstractAttribute *abstract_attribute) { - std::lock_guard guard(registed_abstract_attributes_lock_); + std::lock_guard guard(registed_abstract_attributes_lock_); VLOG(6) << "Register an abstract_attribute of: [TypeId_hash=" - << std::hash()(type_id) + << std::hash()(type_id) << ", AbstractAttribute_ptr=" << abstract_attribute << "]."; registed_abstract_attributes_.emplace(type_id, abstract_attribute); } - AbstractAttribute *GetAbstractAttribute(ir::TypeId type_id) { - std::lock_guard guard(registed_abstract_attributes_lock_); + AbstractAttribute *GetAbstractAttribute(pir::TypeId type_id) { + std::lock_guard guard(registed_abstract_attributes_lock_); auto iter = registed_abstract_attributes_.find(type_id); if (iter != registed_abstract_attributes_.end()) { VLOG(4) << "Found a cached abstract_attribute of: [TypeId_hash=" - << std::hash()(type_id) + << std::hash()(type_id) << ", AbstractAttribute_ptr=" << iter->second << "]."; return iter->second; } LOG(WARNING) << "No cache found abstract_attribute of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + << std::hash()(type_id) << "]."; return nullptr; } @@ -104,14 +104,14 @@ class IrContextImpl { } void RegisterOpInfo(const std::string &name, OpInfo info) { - std::lock_guard guard(registed_op_infos_lock_); + std::lock_guard guard(registed_op_infos_lock_); VLOG(6) << "Register an operation of: [Name=" << name << ", OpInfo ptr=" << info.AsOpaquePointer() << "]."; registed_op_infos_.emplace(name, info); } OpInfo GetOpInfo(const std::string &name) { - std::lock_guard guard(registed_op_infos_lock_); + std::lock_guard guard(registed_op_infos_lock_); auto iter = registed_op_infos_.find(name); if (iter != registed_op_infos_.end()) { VLOG(8) << "Found a cached OpInfo of: [name=" << name @@ -124,7 +124,7 @@ class IrContextImpl { const OpInfoMap ®istered_op_info_map() { return registed_op_infos_; } void RegisterDialect(std::string name, Dialect *dialect) { - std::lock_guard guard(registed_dialect_lock_); + std::lock_guard guard(registed_dialect_lock_); VLOG(6) << "Register a dialect of: [name=" << name << ", dialect_ptr=" << dialect << "]."; registed_dialect_.emplace(name, dialect); @@ -135,7 +135,7 @@ class IrContextImpl { } Dialect *GetDialect(const std::string &name) { - std::lock_guard guard(registed_dialect_lock_); + std::lock_guard guard(registed_dialect_lock_); auto iter = registed_dialect_.find(name); if (iter != registed_dialect_.end()) { VLOG(6) << "Found a cached dialect of: [name=" << name @@ -148,7 +148,7 @@ class IrContextImpl { // Cached AbstractType instances. std::unordered_map registed_abstract_types_; - ir::SpinLock registed_abstract_types_lock_; + pir::SpinLock registed_abstract_types_lock_; // TypeStorage uniquer and cache instances. StorageManager registed_type_storage_manager_; // Cache some built-in type objects. @@ -168,19 +168,19 @@ class IrContextImpl { // Cached AbstractAttribute instances. std::unordered_map registed_abstract_attributes_; - ir::SpinLock registed_abstract_attributes_lock_; + pir::SpinLock registed_abstract_attributes_lock_; // AttributeStorage uniquer and cache instances. StorageManager registed_attribute_storage_manager_; // The dialect registered in the context. std::unordered_map registed_dialect_; - ir::SpinLock registed_dialect_lock_; + pir::SpinLock registed_dialect_lock_; // The Op registered in the context. OpInfoMap registed_op_infos_; - ir::SpinLock registed_op_infos_lock_; + pir::SpinLock registed_op_infos_lock_; - ir::SpinLock destructor_lock_; + pir::SpinLock destructor_lock_; }; IrContext *IrContext::Instance() { @@ -223,7 +223,7 @@ AbstractType *IrContext::GetRegisteredAbstractType(TypeId id) { } void IrContext::RegisterAbstractAttribute( - ir::TypeId type_id, AbstractAttribute &&abstract_attribute) { + pir::TypeId type_id, AbstractAttribute &&abstract_attribute) { if (GetRegisteredAbstractAttribute(type_id) == nullptr) { impl().RegisterAbstractAttribute( type_id, new AbstractAttribute(std::move(abstract_attribute))); @@ -274,7 +274,7 @@ Dialect *IrContext::GetRegisteredDialect(const std::string &dialect_name) { return nullptr; } -void IrContext::RegisterAbstractType(ir::TypeId type_id, +void IrContext::RegisterAbstractType(pir::TypeId type_id, AbstractType &&abstract_type) { if (GetRegisteredAbstractType(type_id) == nullptr) { impl().RegisterAbstractType(type_id, @@ -361,4 +361,4 @@ Complex128Type Complex128Type::get(IrContext *ctx) { return ctx->impl().complex128_type; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/ir_context.h b/paddle/pir/core/ir_context.h similarity index 97% rename from paddle/ir/core/ir_context.h rename to paddle/pir/core/ir_context.h index ebec8d202ceb5..a9b45d2cb8292 100644 --- a/paddle/ir/core/ir_context.h +++ b/paddle/pir/core/ir_context.h @@ -18,9 +18,9 @@ #include #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { class IrContextImpl; class StorageManager; class AbstractType; @@ -86,7 +86,7 @@ class IR_API IrContext { /// \param type_id The type id of the AbstractAttribute. /// \param abstract_attribute AbstractAttribute provided by user. /// - void RegisterAbstractAttribute(ir::TypeId type_id, + void RegisterAbstractAttribute(pir::TypeId type_id, AbstractAttribute &&abstract_attribute); /// @@ -190,4 +190,4 @@ class IR_API IrContext { IrContextImpl *impl_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/ir_parser.h b/paddle/pir/core/ir_parser.h similarity index 82% rename from paddle/ir/core/ir_parser.h rename to paddle/pir/core/ir_parser.h index dbba3e2aaba80..07aa74e1d5691 100644 --- a/paddle/ir/core/ir_parser.h +++ b/paddle/pir/core/ir_parser.h @@ -13,16 +13,16 @@ // limitations under the License. #pragma once -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/parser/lexer.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/parser/lexer.h" +#include "paddle/pir/core/program.h" -using OpResultMap = std::map; -using AttributeMap = std::unordered_map; +using OpResultMap = std::map; +using AttributeMap = std::unordered_map; using OpAttributeInfoMap = std::map; -namespace ir { +namespace pir { class IrParser { public: std::unique_ptr lexer; @@ -68,4 +68,4 @@ class IrParser { void ConsumeAToken(std::string expect_token_val); }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/ir_printer.cc b/paddle/pir/core/ir_printer.cc similarity index 95% rename from paddle/ir/core/ir_printer.cc rename to paddle/pir/core/ir_printer.cc index 0d0ce64f679de..7fa8e076ad147 100644 --- a/paddle/ir/core/ir_printer.cc +++ b/paddle/pir/core/ir_printer.cc @@ -17,17 +17,17 @@ #include #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_printer.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" -#include "paddle/ir/core/value.h" - -namespace ir { +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_printer.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" +#include "paddle/pir/core/value.h" + +namespace pir { namespace { constexpr char newline[] = "\n"; // NOLINT @@ -334,4 +334,4 @@ std::ostream& operator<<(std::ostream& os, const Program& prog) { return os; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/ir_printer.h b/paddle/pir/core/ir_printer.h similarity index 86% rename from paddle/ir/core/ir_printer.h rename to paddle/pir/core/ir_printer.h index c393d2dfbe90a..a845bec52490c 100644 --- a/paddle/ir/core/ir_printer.h +++ b/paddle/pir/core/ir_printer.h @@ -18,15 +18,15 @@ #include #include -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/value.h" -namespace ir { +namespace pir { class BasicIrPrinter { public: @@ -75,4 +75,4 @@ class IR_API IrPrinter : public BasicIrPrinter { std::unordered_map aliases_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/macros.h b/paddle/pir/core/macros.h similarity index 97% rename from paddle/ir/core/macros.h rename to paddle/pir/core/macros.h index 962ca6d4107f3..25d6dd5a812ab 100644 --- a/paddle/ir/core/macros.h +++ b/paddle/pir/core/macros.h @@ -13,7 +13,7 @@ // limitations under the License. #pragma once -namespace ir { +namespace pir { // TODO(Aurelius84): We also has DISABLE_COPY_AND_ASSIGN in phi/core/maros.h, // howere it's not recommended to use it in ir namspace. So we define this again // here. @@ -28,4 +28,4 @@ namespace ir { classname& operator=(classname&&) = delete #endif -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/op_base.cc b/paddle/pir/core/op_base.cc similarity index 92% rename from paddle/ir/core/op_base.cc rename to paddle/pir/core/op_base.cc index 6f6dca0cdc125..a7ebd9febe973 100644 --- a/paddle/ir/core/op_base.cc +++ b/paddle/pir/core/op_base.cc @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/op_base.h" -namespace ir { +#include "paddle/pir/core/op_base.h" +namespace pir { InterfaceValue::~InterfaceValue() { if (model_) free(model_); } @@ -28,4 +28,4 @@ InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) noexcept { swap(std::move(val)); return *this; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/op_base.h b/paddle/pir/core/op_base.h similarity index 96% rename from paddle/ir/core/op_base.h rename to paddle/pir/core/op_base.h index 0a491795d4eed..a19f906cbe8ca 100644 --- a/paddle/ir/core/op_base.h +++ b/paddle/pir/core/op_base.h @@ -15,11 +15,11 @@ #pragma once #include -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/utils.h" -namespace ir { +namespace pir { class IR_API InterfaceValue { public: @@ -94,7 +94,7 @@ class IR_API OpBase { OpResult result(uint32_t index) const { return operation()->result(index); } - ir::Attribute attribute(const std::string &name) { + pir::Attribute attribute(const std::string &name) { return operation()->attribute(name); } @@ -174,7 +174,7 @@ class ConstructInterfacesOrTraits { /// Placement new trait. template - static void PlacementConstrctTrait(ir::TypeId *&p_trait) { // NOLINT + static void PlacementConstrctTrait(pir::TypeId *&p_trait) { // NOLINT *p_trait = TypeId::get(); VLOG(6) << "New a trait: id[" << p_trait->AsOpaquePointer() << "]."; ++p_trait; @@ -246,4 +246,4 @@ class Op : public OpBase { } }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/op_info.cc b/paddle/pir/core/op_info.cc similarity index 87% rename from paddle/ir/core/op_info.cc rename to paddle/pir/core/op_info.cc index 6c9b62f56e63f..b018bec30448d 100644 --- a/paddle/ir/core/op_info.cc +++ b/paddle/pir/core/op_info.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/op_info_impl.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/op_info_impl.h" -namespace ir { +namespace pir { bool OpInfo::HasTrait(TypeId trait_id) const { return impl_ && impl_->HasTrait(trait_id); } @@ -40,4 +40,4 @@ void OpInfo::Verify(Operation *operation) const { impl_->verify()(operation); } void *OpInfo::GetInterfaceImpl(TypeId interface_id) const { return impl_ ? impl_->GetInterfaceImpl(interface_id) : nullptr; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/op_info.h b/paddle/pir/core/op_info.h similarity index 91% rename from paddle/ir/core/op_info.h rename to paddle/pir/core/op_info.h index f92d37d4b33e0..322229d027c34 100644 --- a/paddle/ir/core/op_info.h +++ b/paddle/pir/core/op_info.h @@ -16,9 +16,9 @@ #include #include -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/type_id.h" -namespace ir { +namespace pir { class OpInfoImpl; class IrContext; class OpResult; @@ -93,13 +93,13 @@ typename Interface::Concept *OpInfo::GetInterfaceImpl() const { return reinterpret_cast(model); } -} // namespace ir +} // namespace pir namespace std { template <> -struct hash { - std::size_t operator()(const ir::OpInfo &obj) const { - return std::hash()(obj.impl_); +struct hash { + std::size_t operator()(const pir::OpInfo &obj) const { + return std::hash()(obj.impl_); } }; } // namespace std diff --git a/paddle/ir/core/op_info_impl.cc b/paddle/pir/core/op_info_impl.cc similarity index 93% rename from paddle/ir/core/op_info_impl.cc rename to paddle/pir/core/op_info_impl.cc index 90469f1731be9..e77bf4342f586 100644 --- a/paddle/ir/core/op_info_impl.cc +++ b/paddle/pir/core/op_info_impl.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/op_info_impl.h" -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/op_info_impl.h" +#include "paddle/pir/core/dialect.h" -namespace ir { +namespace pir { OpInfo OpInfoImpl::Create(Dialect *dialect, TypeId op_id, const char *op_name, @@ -69,7 +69,7 @@ void OpInfoImpl::Destroy(OpInfo info) { } } -ir::IrContext *OpInfoImpl::ir_context() const { +pir::IrContext *OpInfoImpl::ir_context() const { return dialect_ ? dialect_->ir_context() : nullptr; } @@ -77,7 +77,7 @@ bool OpInfoImpl::HasTrait(TypeId trait_id) const { if (num_traits_ > 0) { const TypeId *p_first_trait = reinterpret_cast(reinterpret_cast(this) - - sizeof(ir::TypeId) * num_traits_); + sizeof(pir::TypeId) * num_traits_); return std::binary_search( p_first_trait, p_first_trait + num_traits_, trait_id); } @@ -89,7 +89,7 @@ bool OpInfoImpl::HasInterface(TypeId interface_id) const { const InterfaceValue *p_first_interface = reinterpret_cast( reinterpret_cast(this) - - sizeof(ir::TypeId) * num_traits_ - + sizeof(pir::TypeId) * num_traits_ - sizeof(InterfaceValue) * num_interfaces_); return std::binary_search(p_first_interface, p_first_interface + num_interfaces_, @@ -124,7 +124,7 @@ void OpInfoImpl::Destroy() { VLOG(10) << "Destroy op_info impl at " << this; // (1) free interfaces char *base_ptr = reinterpret_cast(this) - - sizeof(ir::TypeId) * num_traits_ - + sizeof(pir::TypeId) * num_traits_ - sizeof(InterfaceValue) * num_interfaces_; if (num_interfaces_ > 0) { InterfaceValue *p_interface_val = @@ -138,4 +138,4 @@ void OpInfoImpl::Destroy() { free(base_ptr); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/op_info_impl.h b/paddle/pir/core/op_info_impl.h similarity index 94% rename from paddle/ir/core/op_info_impl.h rename to paddle/pir/core/op_info_impl.h index 52666f1b377c8..cc63a52d40064 100644 --- a/paddle/ir/core/op_info_impl.h +++ b/paddle/pir/core/op_info_impl.h @@ -19,11 +19,11 @@ #include #include -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/op_base.h" -#include "paddle/ir/core/type.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/op_base.h" +#include "paddle/pir/core/type.h" -namespace ir { +namespace pir { class Dialect; /// @@ -69,7 +69,7 @@ class OpInfoImpl { } private: - OpInfoImpl(ir::Dialect *dialect, + OpInfoImpl(pir::Dialect *dialect, TypeId op_id, const char *op_name, uint32_t num_interfaces, @@ -111,4 +111,4 @@ class OpInfoImpl { VerifyPtr verify_{nullptr}; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/operation.cc b/paddle/pir/core/operation.cc similarity index 93% rename from paddle/ir/core/operation.cc rename to paddle/pir/core/operation.cc index 3d316847d9fc1..d08bb607c40f9 100644 --- a/paddle/ir/core/operation.cc +++ b/paddle/pir/core/operation.cc @@ -14,18 +14,18 @@ #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/block_operand_impl.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/utils.h" -#include "paddle/ir/core/value_impl.h" - -namespace ir { +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/block_operand_impl.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/utils.h" +#include "paddle/pir/core/value_impl.h" + +namespace pir { Operation *Operation::Create(OperationArgument &&argument) { return Create(argument.inputs, argument.attributes, @@ -38,10 +38,10 @@ Operation *Operation::Create(OperationArgument &&argument) { // Allocate the required memory based on the size and number of inputs, outputs, // and operators, and construct it in the order of: OpOutlineResult, // OpInlineResult, Operation, operand. -Operation *Operation::Create(const std::vector &inputs, +Operation *Operation::Create(const std::vector &inputs, const AttributeMap &attributes, const std::vector &output_types, - ir::OpInfo op_info, + pir::OpInfo op_info, size_t num_regions, const std::vector &successors) { // 1. Calculate the required memory size for OpResults + Operation + @@ -179,7 +179,7 @@ IrContext *Operation::ir_context() const { return info_.ir_context(); } Dialect *Operation::dialect() const { return info_.dialect(); } Operation::Operation(const AttributeMap &attributes, - ir::OpInfo op_info, + pir::OpInfo op_info, uint32_t num_results, uint32_t num_operands, uint32_t num_regions, @@ -191,7 +191,7 @@ Operation::Operation(const AttributeMap &attributes, num_regions_(num_regions), num_successors_(num_successors) {} -ir::OpResult Operation::result(uint32_t index) const { +pir::OpResult Operation::result(uint32_t index) const { if (index >= num_results_) { IR_THROW("index exceeds OP output range."); } @@ -204,10 +204,10 @@ ir::OpResult Operation::result(uint32_t index) const { : reinterpret_cast(this) - (index + 1) * sizeof(detail::OpInlineResultImpl); if (index > max_inline_idx) { - return ir::OpResult( + return pir::OpResult( reinterpret_cast(ptr)); } else { - return ir::OpResult( + return pir::OpResult( reinterpret_cast(ptr)); } } @@ -318,4 +318,4 @@ std::vector Operation::results() const { return res; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/operation.h b/paddle/pir/core/operation.h similarity index 90% rename from paddle/ir/core/operation.h rename to paddle/pir/core/operation.h index 961e4a5fccc50..28c0b42671c96 100644 --- a/paddle/ir/core/operation.h +++ b/paddle/pir/core/operation.h @@ -16,14 +16,14 @@ #include #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/macros.h" -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/core/operation_utils.h" -#include "paddle/ir/core/type.h" - -namespace ir { +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/macros.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/core/operation_utils.h" +#include "paddle/pir/core/type.h" + +namespace pir { class OpBase; class Program; class OpOperand; @@ -41,10 +41,10 @@ class IR_API alignas(8) Operation final { /// NOTE: Similar to new and delete, the destroy() and the create() need to be /// used in conjunction. /// - static Operation *Create(const std::vector &inputs, + static Operation *Create(const std::vector &inputs, const AttributeMap &attributes, - const std::vector &output_types, - ir::OpInfo op_info, + const std::vector &output_types, + pir::OpInfo op_info, size_t num_regions = 0, const std::vector &successors = {}); static Operation *Create(OperationArgument &&op_argument); @@ -96,7 +96,7 @@ class IR_API alignas(8) Operation final { return attributes_.find(key) != attributes_.end(); } - ir::OpInfo info() const { return info_; } + pir::OpInfo info() const { return info_; } uint32_t num_results() const { return num_results_; } @@ -164,7 +164,7 @@ class IR_API alignas(8) Operation final { private: DISABLE_COPY_AND_ASSIGN(Operation); Operation(const AttributeMap &attribute, - ir::OpInfo op_info, + pir::OpInfo op_info, uint32_t num_results, uint32_t num_operands, uint32_t num_regions, @@ -203,4 +203,4 @@ class IR_API alignas(8) Operation final { Block::iterator position_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/operation_utils.cc b/paddle/pir/core/operation_utils.cc similarity index 83% rename from paddle/ir/core/operation_utils.cc rename to paddle/pir/core/operation_utils.cc index f975de0c82807..a8eedcfcb8c48 100644 --- a/paddle/ir/core/operation_utils.cc +++ b/paddle/pir/core/operation_utils.cc @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/operation_utils.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/region.h" +#include "paddle/pir/core/operation_utils.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/region.h" -namespace ir { +namespace pir { OperationArgument::OperationArgument(IrContext* ir_context, const std::string& name) { info = ir_context->GetRegisteredOpInfo(name); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/operation_utils.h b/paddle/pir/core/operation_utils.h similarity index 93% rename from paddle/ir/core/operation_utils.h rename to paddle/pir/core/operation_utils.h index 9e317a6510f59..7fbd1c710a860 100644 --- a/paddle/ir/core/operation_utils.h +++ b/paddle/pir/core/operation_utils.h @@ -15,13 +15,13 @@ #pragma once #include -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/value.h" -namespace ir { +namespace pir { class Block; using AttributeMap = std::unordered_map; @@ -100,4 +100,4 @@ void OperationArgument::AddAttributes(InputIt first, InputIt last) { } } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/parameter.h b/paddle/pir/core/parameter.h similarity index 92% rename from paddle/ir/core/parameter.h rename to paddle/pir/core/parameter.h index 3dbe48935b09a..332ef23322e01 100644 --- a/paddle/ir/core/parameter.h +++ b/paddle/pir/core/parameter.h @@ -14,15 +14,15 @@ #pragma once -#include "paddle/ir/core/type.h" +#include "paddle/pir/core/type.h" -namespace ir { +namespace pir { /// /// \brief Parameter represents the weight in the calculation graph. /// class IR_API Parameter { public: - Parameter(void* data, size_t size, ir::Type type) { + Parameter(void* data, size_t size, pir::Type type) { data_ = malloc(size); memcpy(data_, data, size); size_ = size; @@ -67,4 +67,4 @@ class IR_API Parameter { bool is_mutable_ = false; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/parser/ir_parser.cc b/paddle/pir/core/parser/ir_parser.cc similarity index 98% rename from paddle/ir/core/parser/ir_parser.cc rename to paddle/pir/core/parser/ir_parser.cc index 8d7e437635165..cab3be87bddeb 100644 --- a/paddle/ir/core/parser/ir_parser.cc +++ b/paddle/pir/core/parser/ir_parser.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/ir_parser.h" +#include "paddle/pir/core/ir_parser.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_type.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_type.h" -namespace ir { +namespace pir { IrParser::IrParser(IrContext* ctx, std::istream& is) { lexer.reset(new Lexer{is}); this->ctx = ctx; @@ -218,7 +218,7 @@ Operation* IrParser::ParseOperation() { std::vector inputs = ParseOpRandList(); - ir::AttributeMap attributeMap = ParseAttributeMap(); + pir::AttributeMap attributeMap = ParseAttributeMap(); ConsumeAToken(":"); ConsumeAToken("("); @@ -348,4 +348,4 @@ std::unique_ptr Program::Parse(std::istream& is, IrContext* ctx) { return parser.ParseProgram(); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/parser/lexer.cc b/paddle/pir/core/parser/lexer.cc similarity index 99% rename from paddle/ir/core/parser/lexer.cc rename to paddle/pir/core/parser/lexer.cc index af1530a5b2961..c7f037de9927d 100644 --- a/paddle/ir/core/parser/lexer.cc +++ b/paddle/pir/core/parser/lexer.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/parser/lexer.h" +#include "paddle/pir/core/parser/lexer.h" Token Lexer::ConsumeToken() { SkipWhitespace(); diff --git a/paddle/ir/core/parser/lexer.h b/paddle/pir/core/parser/lexer.h similarity index 96% rename from paddle/ir/core/parser/lexer.h rename to paddle/pir/core/parser/lexer.h index 0561e1f60caa8..24694eb761317 100644 --- a/paddle/ir/core/parser/lexer.h +++ b/paddle/pir/core/parser/lexer.h @@ -16,7 +16,7 @@ #include #include -#include "paddle/ir/core/parser/token.h" +#include "paddle/pir/core/parser/token.h" class Lexer { private: diff --git a/paddle/ir/core/parser/token.h b/paddle/pir/core/parser/token.h similarity index 100% rename from paddle/ir/core/parser/token.h rename to paddle/pir/core/parser/token.h diff --git a/paddle/ir/core/program.cc b/paddle/pir/core/program.cc similarity index 90% rename from paddle/ir/core/program.cc rename to paddle/pir/core/program.cc index baf6a3cbdd57c..d4197a4a9bc4b 100644 --- a/paddle/ir/core/program.cc +++ b/paddle/pir/core/program.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/ir_context.h" -namespace ir { +namespace pir { Program::Program(IrContext* context) { module_ = ModuleOp::Create(context, this); @@ -39,4 +39,4 @@ void Program::SetParameter(const std::string& name, parameters_[name].reset(parameter.release()); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/program.h b/paddle/pir/core/program.h similarity index 89% rename from paddle/ir/core/program.h rename to paddle/pir/core/program.h index bf9c37210967e..8756b3aa70e1c 100644 --- a/paddle/ir/core/program.h +++ b/paddle/pir/core/program.h @@ -18,14 +18,14 @@ #include #include -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/parameter.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/parameter.h" -namespace ir { +namespace pir { class IrContext; /// @@ -75,4 +75,4 @@ class IR_API Program { std::ostream& operator<<(std::ostream& os, const Program& prog); -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/region.cc b/paddle/pir/core/region.cc similarity index 91% rename from paddle/ir/core/region.cc rename to paddle/pir/core/region.cc index e9fdb91758219..0f02e3d19e04e 100644 --- a/paddle/ir/core/region.cc +++ b/paddle/pir/core/region.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/operation.h" -namespace ir { +namespace pir { Region::~Region() { clear(); } void Region::push_back(Block *block) { insert(blocks_.end(), block); } @@ -61,4 +61,4 @@ IrContext *Region::ir_context() const { IR_ENFORCE(parent_, "Region is not attached to a container."); return parent_->ir_context(); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/region.h b/paddle/pir/core/region.h similarity index 96% rename from paddle/ir/core/region.h rename to paddle/pir/core/region.h index cc1c1ab791df5..06272f82a4378 100644 --- a/paddle/ir/core/region.h +++ b/paddle/pir/core/region.h @@ -17,9 +17,9 @@ #include #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { class Block; class Operation; @@ -68,4 +68,4 @@ class IR_API Region { Operation *parent_{nullptr}; // not owned std::list blocks_; // owned }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/spin_lock.h b/paddle/pir/core/spin_lock.h similarity index 97% rename from paddle/ir/core/spin_lock.h rename to paddle/pir/core/spin_lock.h index 4150f419c3159..5cba96823a817 100644 --- a/paddle/ir/core/spin_lock.h +++ b/paddle/pir/core/spin_lock.h @@ -23,7 +23,7 @@ #include #include -namespace ir { +namespace pir { static inline void CpuRelax() { #if defined(__PADDLE_x86__) _mm_pause(); @@ -63,4 +63,4 @@ class SpinLock { std::atomic mlock_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/storage_manager.cc b/paddle/pir/core/storage_manager.cc similarity index 86% rename from paddle/ir/core/storage_manager.cc rename to paddle/pir/core/storage_manager.cc index 0dcc7ca0ad855..07cc4e07cce2c 100644 --- a/paddle/ir/core/storage_manager.cc +++ b/paddle/pir/core/storage_manager.cc @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/storage_manager.h" +#include "paddle/pir/core/storage_manager.h" #include #include -#include "paddle/ir/core/enforce.h" +#include "paddle/pir/core/enforce.h" -namespace ir { +namespace pir { // This is a structure for creating, caching, and looking up Storage of // parametric types. struct ParametricStorageManager { @@ -75,9 +75,9 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( std::size_t hash_value, std::function equal_func, std::function constructor) { - std::lock_guard guard(parametric_instance_lock_); + std::lock_guard guard(parametric_instance_lock_); VLOG(6) << "Try to get a parametric storage of: [TypeId_hash=" - << std::hash()(type_id) << ", param_hash=" << hash_value + << std::hash()(type_id) << ", param_hash=" << hash_value << "]."; if (parametric_instance_.find(type_id) == parametric_instance_.end()) { IR_THROW("The input data pointer is null."); @@ -88,9 +88,9 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( StorageManager::StorageBase *StorageManager::GetParameterlessStorageImpl( TypeId type_id) { - std::lock_guard guard(parameterless_instance_lock_); + std::lock_guard guard(parameterless_instance_lock_); VLOG(6) << "Try to get a parameterless storage of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + << std::hash()(type_id) << "]."; if (parameterless_instance_.find(type_id) == parameterless_instance_.end()) IR_THROW("TypeId not found in IrContext."); StorageBase *parameterless_instance = parameterless_instance_[type_id]; @@ -99,21 +99,21 @@ StorageManager::StorageBase *StorageManager::GetParameterlessStorageImpl( void StorageManager::RegisterParametricStorageImpl( TypeId type_id, std::function destroy) { - std::lock_guard guard(parametric_instance_lock_); + std::lock_guard guard(parametric_instance_lock_); VLOG(6) << "Register a parametric storage of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + << std::hash()(type_id) << "]."; parametric_instance_.emplace( type_id, std::make_unique(destroy)); } void StorageManager::RegisterParameterlessStorageImpl( TypeId type_id, std::function constructor) { - std::lock_guard guard(parameterless_instance_lock_); + std::lock_guard guard(parameterless_instance_lock_); VLOG(6) << "Register a parameterless storage of: [TypeId_hash=" - << std::hash()(type_id) << "]."; + << std::hash()(type_id) << "]."; if (parameterless_instance_.find(type_id) != parameterless_instance_.end()) IR_THROW("storage class already registered"); parameterless_instance_.emplace(type_id, constructor()); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/storage_manager.h b/paddle/pir/core/storage_manager.h similarity index 96% rename from paddle/ir/core/storage_manager.h rename to paddle/pir/core/storage_manager.h index f2cda194ce215..1853207f5953f 100644 --- a/paddle/ir/core/storage_manager.h +++ b/paddle/pir/core/storage_manager.h @@ -18,10 +18,10 @@ #include #include -#include "paddle/ir/core/spin_lock.h" -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/spin_lock.h" +#include "paddle/pir/core/type_id.h" -namespace ir { +namespace pir { /// /// \brief The implementation of the class StorageManager. /// @@ -141,12 +141,12 @@ class IR_API StorageManager { std::unordered_map> parametric_instance_; - ir::SpinLock parametric_instance_lock_; + pir::SpinLock parametric_instance_lock_; // This map is a mapping between type id and parameterless type storage. std::unordered_map parameterless_instance_; - ir::SpinLock parameterless_instance_lock_; + pir::SpinLock parameterless_instance_lock_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/type.cc b/paddle/pir/core/type.cc similarity index 86% rename from paddle/ir/core/type.cc rename to paddle/pir/core/type.cc index 16713290d393d..fef0eb9c1a443 100644 --- a/paddle/ir/core/type.cc +++ b/paddle/pir/core/type.cc @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/type_base.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/type_base.h" -namespace ir { +namespace pir { IrContext *Type::ir_context() const { return dialect().ir_context(); } TypeId Type::type_id() { return storage_->abstract_type().type_id(); } @@ -24,4 +24,4 @@ TypeId Type::type_id() { return storage_->abstract_type().type_id(); } const AbstractType &Type::abstract_type() { return storage_->abstract_type(); } Dialect &Type::dialect() const { return storage_->abstract_type().dialect(); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/type.h b/paddle/pir/core/type.h similarity index 85% rename from paddle/ir/core/type.h rename to paddle/pir/core/type.h index f27503b3731f4..a78831f65b4b8 100644 --- a/paddle/ir/core/type.h +++ b/paddle/pir/core/type.h @@ -16,10 +16,10 @@ #include -#include "paddle/ir/core/cast_utils.h" -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/cast_utils.h" +#include "paddle/pir/core/type_id.h" -namespace ir { +namespace pir { class TypeStorage; class AbstractType; class IrContext; @@ -74,12 +74,12 @@ class IR_API Type { template bool isa() const { - return ir::isa(*this); + return pir::isa(*this); } template U dyn_cast() const { - return ir::dyn_cast(*this); + return pir::dyn_cast(*this); } void Print(std::ostream &os) const; @@ -97,17 +97,17 @@ class IR_API Type { IR_API std::ostream &operator<<(std::ostream &os, Type type); -} // namespace ir +} // namespace pir /// /// \brief This class represents the base of a type interface. /// // template -// class TypeInterface : public ir::DialectInterface { +// class TypeInterface : public pir::DialectInterface { // public: // using Base = TypeInterface; -// using DialectInterfaceBase = ir::DialectInterface; +// using DialectInterfaceBase = pir::DialectInterface; // using DialectInterfaceBase::Base; // private: @@ -125,9 +125,9 @@ namespace std { /// \brief Enable hashing Type. /// template <> -struct hash { - std::size_t operator()(const ir::Type &obj) const { - return std::hash()(obj.storage_); +struct hash { + std::size_t operator()(const pir::Type &obj) const { + return std::hash()(obj.storage_); } }; } // namespace std diff --git a/paddle/ir/core/type_base.h b/paddle/pir/core/type_base.h similarity index 92% rename from paddle/ir/core/type_base.h rename to paddle/pir/core/type_base.h index 5ff4618dab773..6c8334b3af663 100644 --- a/paddle/ir/core/type_base.h +++ b/paddle/pir/core/type_base.h @@ -14,11 +14,11 @@ #pragma once -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/storage_manager.h" -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/storage_manager.h" +#include "paddle/pir/core/type_id.h" -namespace ir { +namespace pir { class Dialect; /// @@ -155,7 +155,7 @@ struct IR_API TypeManager { template static T get(IrContext *ctx, Args &&...args) { return get( - ctx, ir::TypeId::get(), std::forward(args)...); + ctx, pir::TypeId::get(), std::forward(args)...); } /// @@ -204,7 +204,7 @@ struct IR_API TypeManager { /// template static void RegisterType(IrContext *ctx) { - RegisterType(ctx, ir::TypeId::get()); + RegisterType(ctx, pir::TypeId::get()); } /// @@ -241,22 +241,22 @@ struct IR_API TypeManager { /// \brief This macro definition is used to add some necessary functions to the /// custom Type class. /// -#define DECLARE_TYPE_UTILITY_FUNCTOR(concrete_type, storage_type) \ - using Storage = storage_type; \ - \ - const Storage *storage() const { \ - return static_cast(this->storage_); \ - } \ - \ - static ir::TypeId type_id() { return ir::TypeId::get(); } \ - \ - template \ - static bool classof(T val) { \ - return val.type_id() == type_id(); \ - } \ - \ - template \ - static concrete_type get(ir::IrContext *ctx, Args... args) { \ - return ir::TypeManager::template get(ctx, args...); \ +#define DECLARE_TYPE_UTILITY_FUNCTOR(concrete_type, storage_type) \ + using Storage = storage_type; \ + \ + const Storage *storage() const { \ + return static_cast(this->storage_); \ + } \ + \ + static pir::TypeId type_id() { return pir::TypeId::get(); } \ + \ + template \ + static bool classof(T val) { \ + return val.type_id() == type_id(); \ + } \ + \ + template \ + static concrete_type get(pir::IrContext *ctx, Args... args) { \ + return pir::TypeManager::template get(ctx, args...); \ } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/type_id.h b/paddle/pir/core/type_id.h similarity index 89% rename from paddle/ir/core/type_id.h rename to paddle/pir/core/type_id.h index aec6ecf343433..b6cb798785e6d 100644 --- a/paddle/ir/core/type_id.h +++ b/paddle/pir/core/type_id.h @@ -17,9 +17,9 @@ #include #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { /// /// \brief TypeId is the unique identification of Type, each Type corresponds to @@ -108,7 +108,7 @@ TypeId TypeId::get() { } #define IR_DECLARE_EXPLICIT_TYPE_ID(TYPE_CLASS) \ - namespace ir { \ + namespace pir { \ namespace detail { \ template <> \ class TypeIdResolver { \ @@ -117,10 +117,10 @@ TypeId TypeId::get() { static UniqueingId id_; \ }; \ } \ - } // namespace ir + } // namespace pir #define IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(TYPE_CLASS) \ - namespace ir { \ + namespace pir { \ namespace detail { \ template <> \ class IR_API TypeIdResolver { \ @@ -129,25 +129,25 @@ TypeId TypeId::get() { static UniqueingId id_; \ }; \ } \ - } // namespace ir + } // namespace pir #define IR_DEFINE_EXPLICIT_TYPE_ID(TYPE_CLASS) \ - namespace ir { \ + namespace pir { \ namespace detail { \ UniqueingId TypeIdResolver::id_ = {}; \ } \ - } // namespace ir + } // namespace pir -} // namespace ir +} // namespace pir namespace std { /// /// \brief Enable hashing TypeId instances. /// template <> -struct hash { - std::size_t operator()(const ir::TypeId &obj) const { - return std::hash()(obj.storage_); +struct hash { + std::size_t operator()(const pir::TypeId &obj) const { + return std::hash()(obj.storage_); } }; } // namespace std diff --git a/paddle/ir/core/type_name.h b/paddle/pir/core/type_name.h similarity index 98% rename from paddle/ir/core/type_name.h rename to paddle/pir/core/type_name.h index d7143d2d754b0..4eecde030a9a4 100644 --- a/paddle/ir/core/type_name.h +++ b/paddle/pir/core/type_name.h @@ -17,7 +17,7 @@ #include #include -namespace ir { +namespace pir { template inline std::string get_type_name() { @@ -56,4 +56,4 @@ inline std::string get_type_name() { #endif } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/use_iterator.h b/paddle/pir/core/use_iterator.h similarity index 97% rename from paddle/ir/core/use_iterator.h rename to paddle/pir/core/use_iterator.h index d7ef2a675649f..42705162d93e5 100644 --- a/paddle/ir/core/use_iterator.h +++ b/paddle/pir/core/use_iterator.h @@ -13,7 +13,7 @@ // limitations under the License. #pragma once -namespace ir { +namespace pir { class Operation; /// @@ -52,4 +52,4 @@ class ValueUseIterator { OperandType current_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/utils.cc b/paddle/pir/core/utils.cc similarity index 96% rename from paddle/ir/core/utils.cc rename to paddle/pir/core/utils.cc index eec502ee10b1f..51c26bd379cfe 100644 --- a/paddle/ir/core/utils.cc +++ b/paddle/pir/core/utils.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/utils.h" -namespace ir { +namespace pir { std::size_t hash_combine(std::size_t lhs, std::size_t rhs) { return lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2); } @@ -55,4 +55,4 @@ void aligned_free(void *mem_ptr) { #endif } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/utils.h b/paddle/pir/core/utils.h similarity index 98% rename from paddle/ir/core/utils.h rename to paddle/pir/core/utils.h index e7ddd5f26eadf..94ff80b27bbe5 100644 --- a/paddle/ir/core/utils.h +++ b/paddle/pir/core/utils.h @@ -20,9 +20,9 @@ #include #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { /// /// \brief Equivalent to boost::hash_combine. /// @@ -136,4 +136,4 @@ void PrintInterleave(ForwardIterator begin, } } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/value.cc b/paddle/pir/core/value.cc similarity index 88% rename from paddle/ir/core/value.cc rename to paddle/pir/core/value.cc index c652ef23a6dde..a2f493f7857a5 100644 --- a/paddle/ir/core/value.cc +++ b/paddle/pir/core/value.cc @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/value.h" #include -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/value_impl.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/value_impl.h" #define CHECK_NULL_IMPL(class_name, func_name) \ IR_ENFORCE(impl_, \ @@ -31,7 +31,7 @@ #define CHECK_VALUE_NULL_IMPL(func_name) CHECK_NULL_IMPL(Value, func_name) #define CHECK_OPRESULT_NULL_IMPL(func_name) CHECK_NULL_IMPL(OpResult, func_name) -namespace ir { +namespace pir { // Operand OpOperand::OpOperand(const detail::OpOperandImpl *impl) @@ -93,12 +93,12 @@ bool Value::operator!() const { return impl_ == nullptr; } Value::operator bool() const { return impl_; } -ir::Type Value::type() const { +pir::Type Value::type() const { CHECK_VALUE_NULL_IMPL(type); return impl_->type(); } -void Value::set_type(ir::Type type) { +void Value::set_type(pir::Type type) { CHECK_VALUE_NULL_IMPL(set_type); impl_->set_type(type); } @@ -114,7 +114,7 @@ std::string Value::PrintUdChain() { } Value::UseIterator Value::use_begin() const { - return ir::OpOperand(first_use()); + return pir::OpOperand(first_use()); } Value::UseIterator Value::use_end() const { return Value::UseIterator(); } @@ -155,7 +155,7 @@ void Value::ReplaceAllUsesWith(Value new_value) const { // OpResult bool OpResult::classof(Value value) { - return value && ir::isa(value.impl()); + return value && pir::isa(value.impl()); } Operation *OpResult::owner() const { @@ -183,17 +183,17 @@ detail::ValueImpl *OpResult::value_impl() const { uint32_t OpResult::GetValidInlineIndex(uint32_t index) { uint32_t max_inline_index = - ir::detail::OpResultImpl::GetMaxInlineResultIndex(); + pir::detail::OpResultImpl::GetMaxInlineResultIndex(); return index <= max_inline_index ? index : max_inline_index; } // details namespace detail { -ir::Operation *OpOperandImpl::owner() const { return owner_; } +pir::Operation *OpOperandImpl::owner() const { return owner_; } -ir::detail::OpOperandImpl *OpOperandImpl::next_use() { return next_use_; } +pir::detail::OpOperandImpl *OpOperandImpl::next_use() { return next_use_; } -ir::Value OpOperandImpl::source() const { return source_; } +pir::Value OpOperandImpl::source() const { return source_; } void OpOperandImpl::set_source(Value source) { RemoveFromUdChain(); @@ -204,7 +204,7 @@ void OpOperandImpl::set_source(Value source) { InsertToUdChain(); } -OpOperandImpl::OpOperandImpl(ir::Value source, ir::Operation *owner) +OpOperandImpl::OpOperandImpl(pir::Value source, pir::Operation *owner) : source_(source), owner_(owner) { if (!source) { return; @@ -267,17 +267,17 @@ std::string ValueImpl::PrintUdChain() { } uint32_t OpResultImpl::GetResultIndex() const { - if (const auto *outline_result = ir::dyn_cast(this)) { + if (const auto *outline_result = pir::dyn_cast(this)) { return outline_result->GetResultIndex(); } - return ir::dyn_cast(this)->GetResultIndex(); + return pir::dyn_cast(this)->GetResultIndex(); } OpResultImpl::~OpResultImpl() { assert(use_empty()); } -ir::Operation *OpResultImpl::owner() const { +pir::Operation *OpResultImpl::owner() const { // For inline result, pointer offset index to obtain the address of op. - if (const auto *result = ir::dyn_cast(this)) { + if (const auto *result = pir::dyn_cast(this)) { result += result->GetResultIndex() + 1; return reinterpret_cast( const_cast(result)); @@ -297,4 +297,4 @@ ir::Operation *OpResultImpl::owner() const { const_cast(inline_result)); } } // namespace detail -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/value.h b/paddle/pir/core/value.h similarity index 90% rename from paddle/ir/core/value.h rename to paddle/pir/core/value.h index 3bbeaa4e55a58..39641c2fb3b8d 100644 --- a/paddle/ir/core/value.h +++ b/paddle/pir/core/value.h @@ -14,11 +14,11 @@ #pragma once -#include "paddle/ir/core/cast_utils.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/use_iterator.h" +#include "paddle/pir/core/cast_utils.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/use_iterator.h" -namespace ir { +namespace pir { class Operation; class Value; @@ -92,12 +92,12 @@ class IR_API Value { template bool isa() const { - return ir::isa(*this); + return pir::isa(*this); } template U dyn_cast() const { - return ir::dyn_cast(*this); + return pir::dyn_cast(*this); } Type type() const; @@ -165,13 +165,13 @@ class IR_API OpResult : public Value { static uint32_t GetValidInlineIndex(uint32_t index); }; -} // namespace ir +} // namespace pir namespace std { template <> -struct hash { - std::size_t operator()(const ir::Value &obj) const { - return std::hash()(obj.impl_); +struct hash { + std::size_t operator()(const pir::Value &obj) const { + return std::hash()(obj.impl_); } }; diff --git a/paddle/ir/core/value_impl.h b/paddle/pir/core/value_impl.h similarity index 86% rename from paddle/ir/core/value_impl.h rename to paddle/pir/core/value_impl.h index 14a7b4d63f5d3..0b6fb9e3effce 100644 --- a/paddle/ir/core/value_impl.h +++ b/paddle/pir/core/value_impl.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/ir/core/value.h" +#include "paddle/pir/core/value.h" -namespace ir { +namespace pir { static const uint32_t OUTLINE_OP_RESULT_INDEX = 6; class Operation; @@ -27,11 +27,11 @@ namespace detail { /// class OpOperandImpl { public: - ir::Operation *owner() const; + pir::Operation *owner() const; - ir::detail::OpOperandImpl *next_use(); + pir::detail::OpOperandImpl *next_use(); - ir::Value source() const; + pir::Value source() const; void set_source(Value value); @@ -40,29 +40,29 @@ class OpOperandImpl { ~OpOperandImpl(); - friend ir::Operation; + friend pir::Operation; private: - OpOperandImpl(ir::Value source, ir::Operation *owner); + OpOperandImpl(pir::Value source, pir::Operation *owner); // Insert self to the UD chain holded by source_; // It is not safe. So set private. void InsertToUdChain(); - ir::detail::OpOperandImpl *next_use_ = nullptr; + pir::detail::OpOperandImpl *next_use_ = nullptr; - ir::detail::OpOperandImpl **prev_use_addr_ = nullptr; + pir::detail::OpOperandImpl **prev_use_addr_ = nullptr; - ir::Value source_; + pir::Value source_; - ir::Operation *const owner_ = nullptr; + pir::Operation *const owner_ = nullptr; }; /// /// \brief ValueImpl is the base class of all derived Value classes such as /// OpResultImpl. This class defines all the information and usage interface in /// the IR Value. Each Value include three attributes: -/// (1) type: ir::Type; (2) UD-chain of value: OpOperandImpl*, first op_operand +/// (1) type: pir::Type; (2) UD-chain of value: OpOperandImpl*, first op_operand /// address with offset of this value; (3) index: the position where the output /// list of the parent operator. /// @@ -71,9 +71,9 @@ class alignas(8) ValueImpl { /// /// \brief Interface functions of "type_" attribute. /// - ir::Type type() const { return type_; } + pir::Type type() const { return type_; } - void set_type(ir::Type type) { type_ = type; } + void set_type(pir::Type type) { type_ = type; } /// /// \brief Interface functions of "first_use_offseted_by_index_" attribute. @@ -108,7 +108,7 @@ class alignas(8) ValueImpl { /// /// \brief Only can be constructed by derived classes such as OpResultImpl. /// - explicit ValueImpl(ir::Type type, uint32_t index) { + explicit ValueImpl(pir::Type type, uint32_t index) { if (index > OUTLINE_OP_RESULT_INDEX) { throw("The value of index must not exceed 6"); } @@ -123,7 +123,7 @@ class alignas(8) ValueImpl { /// /// \brief Attribute1: Type of value. /// - ir::Type type_; + pir::Type type_; /// /// \brief Attribute2/3: Record the UD-chain of value and index. @@ -150,7 +150,7 @@ class alignas(8) OpResultImpl : public ValueImpl { /// \brief Get the parent operation of this result.(op_ptr = value_ptr + /// index) /// - ir::Operation *owner() const; + pir::Operation *owner() const; /// /// \brief Get the result index of the operation result. @@ -173,7 +173,7 @@ class alignas(8) OpResultImpl : public ValueImpl { /// class OpInlineResultImpl : public OpResultImpl { public: - OpInlineResultImpl(ir::Type type, uint32_t result_index) + OpInlineResultImpl(pir::Type type, uint32_t result_index) : OpResultImpl(type, result_index) { if (result_index > GetMaxInlineResultIndex()) { throw("Inline result index should not exceed MaxInlineResultIndex(5)"); @@ -193,7 +193,7 @@ class OpInlineResultImpl : public OpResultImpl { /// class OpOutlineResultImpl : public OpResultImpl { public: - OpOutlineResultImpl(ir::Type type, uint32_t outline_index) + OpOutlineResultImpl(pir::Type type, uint32_t outline_index) : OpResultImpl(type, OUTLINE_OP_RESULT_INDEX), outline_index_(outline_index) {} @@ -207,4 +207,4 @@ class OpOutlineResultImpl : public OpResultImpl { }; } // namespace detail -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/verify.cc b/paddle/pir/core/verify.cc similarity index 89% rename from paddle/ir/core/verify.cc rename to paddle/pir/core/verify.cc index 39248ec085f92..2d3485324a6ba 100644 --- a/paddle/ir/core/verify.cc +++ b/paddle/pir/core/verify.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/core/verify.h" -#include "paddle/ir/core/operation.h" -namespace ir { +#include "paddle/pir/core/verify.h" +#include "paddle/pir/core/operation.h" +namespace pir { void Verify(Operation *op, bool verify_recursively) { op->Verify(); if (!verify_recursively) return; @@ -27,4 +27,4 @@ void Verify(Operation *op, bool verify_recursively) { } } } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/core/verify.h b/paddle/pir/core/verify.h similarity index 93% rename from paddle/ir/core/verify.h rename to paddle/pir/core/verify.h index 92fe66054497e..de413c95e7657 100644 --- a/paddle/ir/core/verify.h +++ b/paddle/pir/core/verify.h @@ -13,9 +13,9 @@ // limitations under the License. #pragma once -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { class Operation; @@ -26,4 +26,4 @@ class Operation; /// invoke the verifier on nested operations. IR_API void Verify(Operation *op, bool verifyRecursively = true); -} // namespace ir +} // namespace pir diff --git a/paddle/ir/dialect/CMakeLists.txt b/paddle/pir/dialect/CMakeLists.txt similarity index 100% rename from paddle/ir/dialect/CMakeLists.txt rename to paddle/pir/dialect/CMakeLists.txt diff --git a/paddle/pir/dialect/control_flow/CMakeLists.txt b/paddle/pir/dialect/control_flow/CMakeLists.txt new file mode 100644 index 0000000000000..b30eb7fa567d7 --- /dev/null +++ b/paddle/pir/dialect/control_flow/CMakeLists.txt @@ -0,0 +1,2 @@ +file(GLOB_RECURSE CONTROL_FLOW_SRCS "*.cc") +ir_library(pir_control_flow SRCS ${CONTROL_FLOW_SRCS} DEPS pir_core) diff --git a/paddle/ir/dialect/control_flow/ir/cf_dialect.cc b/paddle/pir/dialect/control_flow/ir/cf_dialect.cc similarity index 77% rename from paddle/ir/dialect/control_flow/ir/cf_dialect.cc rename to paddle/pir/dialect/control_flow/ir/cf_dialect.cc index 8d26f862b562b..ed36c0c81cca6 100644 --- a/paddle/ir/dialect/control_flow/ir/cf_dialect.cc +++ b/paddle/pir/dialect/control_flow/ir/cf_dialect.cc @@ -11,10 +11,10 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/dialect/control_flow/ir/cf_dialect.h" -#include "paddle/ir/dialect/control_flow/ir/cf_ops.h" +#include "paddle/pir/dialect/control_flow/ir/cf_dialect.h" +#include "paddle/pir/dialect/control_flow/ir/cf_ops.h" -namespace ir { +namespace pir { void ControlFlowDialect::initialize() { RegisterOps(); } -} // namespace ir -IR_DEFINE_EXPLICIT_TYPE_ID(ir::ControlFlowDialect) +} // namespace pir +IR_DEFINE_EXPLICIT_TYPE_ID(pir::ControlFlowDialect) diff --git a/paddle/ir/dialect/control_flow/ir/cf_dialect.h b/paddle/pir/dialect/control_flow/ir/cf_dialect.h similarity index 87% rename from paddle/ir/dialect/control_flow/ir/cf_dialect.h rename to paddle/pir/dialect/control_flow/ir/cf_dialect.h index 867290cdd5bab..c195ba9638984 100644 --- a/paddle/ir/dialect/control_flow/ir/cf_dialect.h +++ b/paddle/pir/dialect/control_flow/ir/cf_dialect.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" -namespace ir { +namespace pir { class ControlFlowDialect : public Dialect { public: explicit ControlFlowDialect(IrContext *context) @@ -29,5 +29,5 @@ class ControlFlowDialect : public Dialect { void initialize(); }; -} // namespace ir -IR_DECLARE_EXPLICIT_TYPE_ID(ir::ControlFlowDialect) +} // namespace pir +IR_DECLARE_EXPLICIT_TYPE_ID(pir::ControlFlowDialect) diff --git a/paddle/ir/dialect/control_flow/ir/cf_ops.cc b/paddle/pir/dialect/control_flow/ir/cf_ops.cc similarity index 86% rename from paddle/ir/dialect/control_flow/ir/cf_ops.cc rename to paddle/pir/dialect/control_flow/ir/cf_ops.cc index dc5491d1ad5d3..7dd72ea12551e 100644 --- a/paddle/ir/dialect/control_flow/ir/cf_ops.cc +++ b/paddle/pir/dialect/control_flow/ir/cf_ops.cc @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/dialect/control_flow/ir/cf_ops.h" +#include "paddle/pir/dialect/control_flow/ir/cf_ops.h" -namespace ir { +namespace pir { void YieldOp::Build(Builder &builder, OperationArgument &argument, std::vector &&inputs) { argument.AddOperands(inputs.begin(), inputs.end()); } -} // namespace ir +} // namespace pir -IR_DEFINE_EXPLICIT_TYPE_ID(ir::YieldOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::YieldOp) diff --git a/paddle/ir/dialect/control_flow/ir/cf_ops.h b/paddle/pir/dialect/control_flow/ir/cf_ops.h similarity index 86% rename from paddle/ir/dialect/control_flow/ir/cf_ops.h rename to paddle/pir/dialect/control_flow/ir/cf_ops.h index d58e717136ae2..2f69aa9147224 100644 --- a/paddle/ir/dialect/control_flow/ir/cf_ops.h +++ b/paddle/pir/dialect/control_flow/ir/cf_ops.h @@ -14,10 +14,10 @@ #pragma once -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/op_base.h" -namespace ir { +namespace pir { class IR_API YieldOp : public Op { public: using Op::Op; @@ -30,6 +30,6 @@ class IR_API YieldOp : public Op { std::vector &&inputs); void Verify() {} }; -} // namespace ir +} // namespace pir -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::YieldOp); +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::YieldOp); diff --git a/paddle/pir/dialect/shape/CMakeLists.txt b/paddle/pir/dialect/shape/CMakeLists.txt new file mode 100644 index 0000000000000..0798e78f2b15a --- /dev/null +++ b/paddle/pir/dialect/shape/CMakeLists.txt @@ -0,0 +1,2 @@ +file(GLOB_RECURSE SHAPE_SRCS "*.cc") +ir_library(pir_shape SRCS ${SHAPE_SRCS} DEPS pir_core) diff --git a/paddle/ir/dialect/shape/ir/shape_dialect.cc b/paddle/pir/dialect/shape/ir/shape_dialect.cc similarity index 81% rename from paddle/ir/dialect/shape/ir/shape_dialect.cc rename to paddle/pir/dialect/shape/ir/shape_dialect.cc index d058924511bcd..7638e635be631 100644 --- a/paddle/ir/dialect/shape/ir/shape_dialect.cc +++ b/paddle/pir/dialect/shape/ir/shape_dialect.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/dialect/shape/ir/shape_dialect.h" -#include "paddle/ir/dialect/shape/ir/shape_op.h" +#include "paddle/pir/dialect/shape/ir/shape_dialect.h" +#include "paddle/pir/dialect/shape/ir/shape_op.h" -namespace ir { +namespace pir { namespace dialect { ShapeDialect::ShapeDialect(IrContext *context) : Dialect(name(), context, TypeId::get()) { @@ -27,6 +27,6 @@ void ShapeDialect::initialize() { } } // namespace dialect -} // namespace ir +} // namespace pir -IR_DEFINE_EXPLICIT_TYPE_ID(ir::dialect::ShapeDialect) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::dialect::ShapeDialect) diff --git a/paddle/ir/dialect/shape/ir/shape_dialect.h b/paddle/pir/dialect/shape/ir/shape_dialect.h similarity index 80% rename from paddle/ir/dialect/shape/ir/shape_dialect.h rename to paddle/pir/dialect/shape/ir/shape_dialect.h index eb47aa1345f28..16d5d2ea68e07 100644 --- a/paddle/ir/dialect/shape/ir/shape_dialect.h +++ b/paddle/pir/dialect/shape/ir/shape_dialect.h @@ -14,16 +14,16 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" -namespace ir { +namespace pir { namespace dialect { /// /// \brief Shape Dialect: /// -class IR_API ShapeDialect : public ir::Dialect { +class IR_API ShapeDialect : public pir::Dialect { public: - explicit ShapeDialect(ir::IrContext *context); + explicit ShapeDialect(pir::IrContext *context); /// /// \brief Each Dialect needs to provide a name function to return the name of /// the Dialect. @@ -37,6 +37,6 @@ class IR_API ShapeDialect : public ir::Dialect { }; } // namespace dialect -} // namespace ir +} // namespace pir -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::dialect::ShapeDialect) +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::dialect::ShapeDialect) diff --git a/paddle/ir/dialect/shape/ir/shape_op.cc b/paddle/pir/dialect/shape/ir/shape_op.cc similarity index 64% rename from paddle/ir/dialect/shape/ir/shape_op.cc rename to paddle/pir/dialect/shape/ir/shape_op.cc index 776503ea269e3..be7d378c7fe8a 100644 --- a/paddle/ir/dialect/shape/ir/shape_op.cc +++ b/paddle/pir/dialect/shape/ir/shape_op.cc @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/dialect/shape/ir/shape_op.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" +#include "paddle/pir/dialect/shape/ir/shape_op.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" -namespace ir { +namespace pir { namespace dialect { const char *SymbolicDim::attributes_name[attributes_num] = {"knownNegativeOne", @@ -35,73 +35,74 @@ void SymbolicDim::Build( bool knownNegativeOne, bool knownNonSizeOne, bool knownNonSizeZero) { - ir::Attribute attr_sym_name = - ir::StrAttribute::get(ir::IrContext::Instance(), sym_name); + pir::Attribute attr_sym_name = + pir::StrAttribute::get(pir::IrContext::Instance(), sym_name); argument.AddAttribute("sym_name", attr_sym_name); - ir::Attribute attr_value = - ir::Int64Attribute::get(ir::IrContext::Instance(), value); + pir::Attribute attr_value = + pir::Int64Attribute::get(pir::IrContext::Instance(), value); argument.AddAttribute("value", attr_value); - ir::Attribute attr_knownNonNegative = - ir::BoolAttribute::get(ir::IrContext::Instance(), knownNonNegative); + pir::Attribute attr_knownNonNegative = + pir::BoolAttribute::get(pir::IrContext::Instance(), knownNonNegative); argument.AddAttribute("knownNonNegative", attr_knownNonNegative); - ir::Attribute attr_knownNegativeOne = - ir::BoolAttribute::get(ir::IrContext::Instance(), knownNegativeOne); + pir::Attribute attr_knownNegativeOne = + pir::BoolAttribute::get(pir::IrContext::Instance(), knownNegativeOne); argument.AddAttribute("knownNegativeOne", attr_knownNegativeOne); - ir::Attribute attr_knownNonSizeOne = - ir::BoolAttribute::get(ir::IrContext::Instance(), knownNonSizeOne); + pir::Attribute attr_knownNonSizeOne = + pir::BoolAttribute::get(pir::IrContext::Instance(), knownNonSizeOne); argument.AddAttribute("knownNonSizeOne", attr_knownNonSizeOne); - ir::Attribute attr_knownNonSizeZero = - ir::BoolAttribute::get(ir::IrContext::Instance(), knownNonSizeZero); + pir::Attribute attr_knownNonSizeZero = + pir::BoolAttribute::get(pir::IrContext::Instance(), knownNonSizeZero); argument.AddAttribute("knownNonSizeZero", attr_knownNonSizeZero); } const std::string SymbolicDim::getSymName() { - return attribute("sym_name").AsString(); + return attribute("sym_name").AsString(); } int64_t SymbolicDim::getValue() { - return attribute("value").data(); + return attribute("value").data(); } bool SymbolicDim::getKnownNonNegative() { - return attribute("knownNonNegative").data(); + return attribute("knownNonNegative").data(); } bool SymbolicDim::getKnownNegativeOne() { - return attribute("knownNegativeOne").data(); + return attribute("knownNegativeOne").data(); } bool SymbolicDim::getKnownNonSizeOne() { - return attribute("knownNonSizeOne").data(); + return attribute("knownNonSizeOne").data(); } bool SymbolicDim::getKnownNonSizeZero() { - return attribute("knownNonSizeZero").data(); + return attribute("knownNonSizeZero").data(); } void SymbolicDim::updateSymName(std::string attrValue) { operation()->set_attribute( - "sym_name", ir::StrAttribute::get(ir::IrContext::Instance(), attrValue)); + "sym_name", + pir::StrAttribute::get(pir::IrContext::Instance(), attrValue)); } void SymbolicDim::updateValue(int64_t attrValue) { operation()->set_attribute( - "value", ir::Int64Attribute::get(ir::IrContext::Instance(), attrValue)); + "value", pir::Int64Attribute::get(pir::IrContext::Instance(), attrValue)); } void SymbolicDim::updateKnownNonNegative(bool attrValue) { operation()->set_attribute( "knownNonNegative", - ir::BoolAttribute::get(ir::IrContext::Instance(), attrValue)); + pir::BoolAttribute::get(pir::IrContext::Instance(), attrValue)); } void SymbolicDim::updateKnownNegativeOne(bool attrValue) { operation()->set_attribute( "knownNegativeOne", - ir::BoolAttribute::get(ir::IrContext::Instance(), attrValue)); + pir::BoolAttribute::get(pir::IrContext::Instance(), attrValue)); } void SymbolicDim::updateKnownNonSizeOne(bool attrValue) { operation()->set_attribute( "knownNonSizeOne", - ir::BoolAttribute::get(ir::IrContext::Instance(), attrValue)); + pir::BoolAttribute::get(pir::IrContext::Instance(), attrValue)); } void SymbolicDim::updateKnownNonSizeZero(bool attrValue) { operation()->set_attribute( "knownNonSizeZero", - ir::BoolAttribute::get(ir::IrContext::Instance(), attrValue)); + pir::BoolAttribute::get(pir::IrContext::Instance(), attrValue)); } bool SymbolicDim::isDynamic() { @@ -139,20 +140,20 @@ const char *DimOp::attributes_name[attributes_num] = {"name"}; // NOLINT void DimOp::Build(Builder &builder, OperationArgument &argument, const std::string &name) { - ir::Attribute attr_name = - ir::StrAttribute::get(ir::IrContext::Instance(), name); + pir::Attribute attr_name = + pir::StrAttribute::get(pir::IrContext::Instance(), name); argument.AddAttribute("name", attr_name); argument.output_types.emplace_back( - ir::IndexType::get(ir::IrContext::Instance())); + pir::IndexType::get(pir::IrContext::Instance())); } const std::string DimOp::getName() { - return attribute("name").AsString(); + return attribute("name").AsString(); } void DimOp::setName(std::string attrName) { operation()->set_attribute( - "name", ir::StrAttribute::get(ir::IrContext::Instance(), attrName)); + "name", pir::StrAttribute::get(pir::IrContext::Instance(), attrName)); } const char *TieProductEqualOp::attributes_name[attributes_num] = { @@ -162,28 +163,28 @@ void TieProductEqualOp::Build(Builder &builder, OperationArgument &argument, int64_t lhs_len, int64_t rhs_len, - const std::vector &inputs) { - ir::Attribute attr_lhs_len = - ir::Int64Attribute::get(ir::IrContext::Instance(), lhs_len); + const std::vector &inputs) { + pir::Attribute attr_lhs_len = + pir::Int64Attribute::get(pir::IrContext::Instance(), lhs_len); argument.AddAttribute("lhs_len", attr_lhs_len); - ir::Attribute attr_rhs_len = - ir::Int64Attribute::get(ir::IrContext::Instance(), rhs_len); + pir::Attribute attr_rhs_len = + pir::Int64Attribute::get(pir::IrContext::Instance(), rhs_len); argument.AddAttribute("rhs_len", attr_rhs_len); argument.inputs = inputs; } -std::vector TieProductEqualOp::getLhs() { - int64_t lhs_len = attribute("lhs_len").data(); - std::vector res; +std::vector TieProductEqualOp::getLhs() { + int64_t lhs_len = attribute("lhs_len").data(); + std::vector res; for (uint32_t idx = 0; idx < lhs_len; idx++) { res.push_back(operand_source(idx)); } return res; } -std::vector TieProductEqualOp::getRhs() { - int64_t lhs_len = attribute("lhs_len").data(); - int64_t rhs_len = attribute("rhs_len").data(); - std::vector res; +std::vector TieProductEqualOp::getRhs() { + int64_t lhs_len = attribute("lhs_len").data(); + int64_t rhs_len = attribute("rhs_len").data(); + std::vector res; for (uint32_t idx = 0; idx < rhs_len; idx++) { res.push_back(operand_source(lhs_len + idx)); } @@ -191,8 +192,8 @@ std::vector TieProductEqualOp::getRhs() { } } // namespace dialect -} // namespace ir +} // namespace pir -IR_DEFINE_EXPLICIT_TYPE_ID(ir::dialect::SymbolicDim) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::dialect::DimOp) -IR_DEFINE_EXPLICIT_TYPE_ID(ir::dialect::TieProductEqualOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::dialect::SymbolicDim) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::dialect::DimOp) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::dialect::TieProductEqualOp) diff --git a/paddle/ir/dialect/shape/ir/shape_op.h b/paddle/pir/dialect/shape/ir/shape_op.h similarity index 85% rename from paddle/ir/dialect/shape/ir/shape_op.h rename to paddle/pir/dialect/shape/ir/shape_op.h index af61393a24c9b..4df90213cd616 100644 --- a/paddle/ir/dialect/shape/ir/shape_op.h +++ b/paddle/pir/dialect/shape/ir/shape_op.h @@ -14,10 +14,10 @@ #pragma once -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/op_base.h" -namespace ir { +namespace pir { namespace dialect { class IR_API SymbolicDim : public Op { @@ -71,7 +71,7 @@ class IR_API DimOp : public Op { const std::string getName(); void setName(std::string attrValue); - ir::OpResult out() { return result(0); } + pir::OpResult out() { return result(0); } void Verify() {} }; @@ -87,15 +87,15 @@ class IR_API TieProductEqualOp : public Op { OperationArgument &argument, // NOLINT int64_t lhs_len, int64_t rhs_len, - const std::vector &inputs); - std::vector getLhs(); - std::vector getRhs(); + const std::vector &inputs); + std::vector getLhs(); + std::vector getRhs(); void Verify() {} }; } // namespace dialect -} // namespace ir +} // namespace pir -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::dialect::SymbolicDim); -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::dialect::DimOp); -IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(ir::dialect::TieProductEqualOp); +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::dialect::SymbolicDim); +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::dialect::DimOp); +IR_EXPORT_DECLARE_EXPLICIT_TYPE_ID(pir::dialect::TieProductEqualOp); diff --git a/paddle/ir/dialect/shape/utils/shape_utils.cc b/paddle/pir/dialect/shape/utils/shape_utils.cc similarity index 94% rename from paddle/ir/dialect/shape/utils/shape_utils.cc rename to paddle/pir/dialect/shape/utils/shape_utils.cc index f9d78a63184cb..be42eb68c20e4 100644 --- a/paddle/ir/dialect/shape/utils/shape_utils.cc +++ b/paddle/pir/dialect/shape/utils/shape_utils.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/dialect/shape/utils/shape_utils.h" +#include "paddle/pir/dialect/shape/utils/shape_utils.h" #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -namespace ir { +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +namespace pir { bool compareSymbolicDimNames(const std::string& lhs, const std::string& rhs) { if (lhs.size() < 1 || (lhs[0] != 'S' && lhs[0] != 'C')) return lhs < rhs; @@ -30,7 +30,7 @@ bool compareSymbolicDimNames(const std::string& lhs, const std::string& rhs) { return (lhs[0] < rhs[0]) || (lhs[0] == rhs[0] && lhsIdx < rhsIdx); } -const std::string SymbolTable::insert(ir::Operation* symbol) { +const std::string SymbolTable::insert(pir::Operation* symbol) { std::string name; if (symbol->name() == "shape.SymbolicDim") { name = symbol->dyn_cast().getSymName(); @@ -61,18 +61,19 @@ bool SymbolicDimMgr::loadShapeConstraintGraph() { // TODO(liujinnan): add more constraint function. currently, only support // tie_product_equal. auto constraint_vec = - symbolTable_.lookup("tie_product_equal"); + symbolTable_.lookup("tie_product_equal"); if (!constraint_vec.size()) return true; - auto build_sym_product = [&](std::vector range, + auto build_sym_product = [&](std::vector range, SymbolicDimProduct& product) { for (Value v : range) { auto definingOp = v.GetDefiningOp(); - if (auto constOp = definingOp->dyn_cast()) { - product.factor *= constOp.value().dyn_cast().data(); + if (auto constOp = definingOp->dyn_cast()) { + product.factor *= + constOp.value().dyn_cast().data(); continue; - } else if (auto dimOp = definingOp->dyn_cast()) { + } else if (auto dimOp = definingOp->dyn_cast()) { auto sym = symbolTable_.lookup(dimOp.getName()); if (!sym) return false; product.symbols.push_back(sym); @@ -202,11 +203,11 @@ const std::string SymbolicDimMgr::getNextName() { return name; } -SymbolicDimMgr::SymbolicDimMgr(ir::ModuleOp m) : m_(m), symbolTable_(m_) {} +SymbolicDimMgr::SymbolicDimMgr(pir::ModuleOp m) : m_(m), symbolTable_(m_) {} SymbolicDim SymbolicDimMgr::newSymbolicDim(const std::string& name) { - ::ir::Builder builder = ::ir::Builder(m_.ir_context(), m_.block()); - ir::dialect::SymbolicDim symbol = builder.Build( + ::pir::Builder builder = ::pir::Builder(m_.ir_context(), m_.block()); + pir::dialect::SymbolicDim symbol = builder.Build( name.empty() ? getNextName() : name); symbolDimUnionSet_[symbol] = symbol; symbolTable_.insert(symbol); @@ -226,7 +227,7 @@ SymbolicDim SymbolicDimMgr::newConstantSymbolicDim(int64_t val) { } std::vector SymbolicDimMgr::createSymbolicDimsForRankedValue( - ir::Value value) { + pir::Value value) { std::vector symbols; auto dims = value.type().dyn_cast().dims(); for (int idx = 0; idx < dims.size(); ++idx) { @@ -421,4 +422,4 @@ bool SymbolicDimMgr::isSymbolicDimProductEqual(const SymbolicDimProduct& lhs, IR_ENFORCE(updateProductEqualityMap(), "Update product equality map failed."); return isMultipleOfKnownSymbolicDimProductEqualPair(newLhs, newRhs); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/dialect/shape/utils/shape_utils.h b/paddle/pir/dialect/shape/utils/shape_utils.h similarity index 85% rename from paddle/ir/dialect/shape/utils/shape_utils.h rename to paddle/pir/dialect/shape/utils/shape_utils.h index 8d5fab1a1c811..86a7a64bf72e9 100644 --- a/paddle/ir/dialect/shape/utils/shape_utils.h +++ b/paddle/pir/dialect/shape/utils/shape_utils.h @@ -18,14 +18,14 @@ #include #include #include -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/utils.h" -#include "paddle/ir/dialect/shape/ir/shape_op.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/utils.h" +#include "paddle/pir/dialect/shape/ir/shape_op.h" -namespace ir { +namespace pir { -using ir::dialect::SymbolicDim; +using pir::dialect::SymbolicDim; struct SymbolicDimProduct { std::vector symbols; @@ -44,7 +44,7 @@ struct SymbolicDimProduct { class SymbolTable { public: - explicit SymbolTable(ir::Operation* symbolTableOp) + explicit SymbolTable(pir::Operation* symbolTableOp) : symbolTableOp_(symbolTableOp) {} template typename std::enable_if::value, @@ -69,22 +69,22 @@ class SymbolTable { } const std::string insert(Operation* symbol); - ir::Operation* getOp() const { return symbolTableOp_; } + pir::Operation* getOp() const { return symbolTableOp_; } private: - ir::Operation* symbolTableOp_; - std::unordered_map symbolTableMap_; - std::unordered_map> symbolFuncMap_; + pir::Operation* symbolTableOp_; + std::unordered_map symbolTableMap_; + std::unordered_map> symbolFuncMap_; }; struct SymDimHasher { - size_t operator()(const ir::dialect::SymbolicDim& symbol) const noexcept { - return std::hash{}(symbol.operation()); + size_t operator()(const pir::dialect::SymbolicDim& symbol) const noexcept { + return std::hash{}(symbol.operation()); } }; struct SymProductHasher { - size_t operator()(const ir::SymbolicDimProduct& symProd) const noexcept { + size_t operator()(const pir::SymbolicDimProduct& symProd) const noexcept { size_t hash = std::hash{}(symProd.symbols.size()); for (auto& symbol : symProd.symbols) { hash = hash_combine(hash, SymDimHasher{}(symbol)); // NOLINT @@ -96,7 +96,7 @@ struct SymProductHasher { class SymbolicDimMgr { public: - explicit SymbolicDimMgr(ir::ModuleOp m); + explicit SymbolicDimMgr(pir::ModuleOp m); bool load(); SymbolicDim newSymbolicDim(const std::string& name = {}); SymbolicDim newConstantSymbolicDim(int64_t val); @@ -129,7 +129,7 @@ class SymbolicDimMgr { bool loadShapeConstraintGraph(); private: - ir::ModuleOp m_; + pir::ModuleOp m_; SymbolTable symbolTable_; @@ -149,4 +149,4 @@ class SymbolicDimMgr { SymbolicDimProductMap productEqualityMap_; bool productEqualityMapUpdated_ = true; }; -} // namespace ir +} // namespace pir diff --git a/paddle/pir/pass/CMakeLists.txt b/paddle/pir/pass/CMakeLists.txt new file mode 100644 index 0000000000000..92f7de3531cf4 --- /dev/null +++ b/paddle/pir/pass/CMakeLists.txt @@ -0,0 +1,3 @@ +file(GLOB NEW_PASS_SRCS "*.cc") + +ir_library(pir_pass SRCS ${NEW_PASS_SRCS} DEPS pir_core) diff --git a/paddle/ir/pass/analysis_manager.h b/paddle/pir/pass/analysis_manager.h similarity index 94% rename from paddle/ir/pass/analysis_manager.h rename to paddle/pir/pass/analysis_manager.h index 417d9026b88d1..e21a0b948b42c 100644 --- a/paddle/ir/pass/analysis_manager.h +++ b/paddle/pir/pass/analysis_manager.h @@ -21,13 +21,13 @@ #include #include -#include "paddle/ir/core/cast_utils.h" -#include "paddle/ir/core/type_id.h" -#include "paddle/ir/core/type_name.h" -#include "paddle/ir/pass/pass_instrumentation.h" -#include "paddle/ir/pass/utils.h" +#include "paddle/pir/core/cast_utils.h" +#include "paddle/pir/core/type_id.h" +#include "paddle/pir/core/type_name.h" +#include "paddle/pir/pass/pass_instrumentation.h" +#include "paddle/pir/pass/utils.h" -namespace ir { +namespace pir { class Operation; class AnalysisManager; @@ -74,7 +74,7 @@ class PreservedAnalyses { preserved_ids_.erase(TypeId::get()); } - friend ir::detail::TypeIdResolver; + friend pir::detail::TypeIdResolver; private: template @@ -145,7 +145,7 @@ class AnalysisMap { std::is_constructible::value, AnalysisT&> GetAnalysis(PassInstrumentor* pi, AnalysisManager& am) { // NOLINT - return GetAnalysisImpl(pi, ir::cast(ir_), am); + return GetAnalysisImpl(pi, pir::cast(ir_), am); } template @@ -177,7 +177,7 @@ class AnalysisMap { private: template static std::string GetAnalysisName() { - std::string name = ir::get_type_name(); + std::string name = pir::get_type_name(); auto pos = name.rfind("::"); if (pos != std::string::npos) { name = name.substr(pos + 2); @@ -303,6 +303,6 @@ class AnalysisManagerHolder { PassInstrumentor* pi_; }; -} // namespace ir +} // namespace pir -IR_DECLARE_EXPLICIT_TYPE_ID(ir::detail::PreservedAnalyses::AllAnalysesType) +IR_DECLARE_EXPLICIT_TYPE_ID(pir::detail::PreservedAnalyses::AllAnalysesType) diff --git a/paddle/ir/pass/ir_printing.cc b/paddle/pir/pass/ir_printing.cc similarity index 91% rename from paddle/ir/pass/ir_printing.cc rename to paddle/pir/pass/ir_printing.cc index 87e0af4831f57..6171b71c090fc 100644 --- a/paddle/ir/pass/ir_printing.cc +++ b/paddle/pir/pass/ir_printing.cc @@ -16,13 +16,13 @@ #include #include -#include "paddle/ir/core/operation.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_instrumentation.h" -#include "paddle/ir/pass/pass_manager.h" -#include "paddle/ir/pass/utils.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_instrumentation.h" +#include "paddle/pir/pass/pass_manager.h" +#include "paddle/pir/pass/utils.h" -namespace ir { +namespace pir { namespace { void PrintIR(Operation *op, bool print_module, std::ostream &os) { @@ -85,4 +85,4 @@ void PassManager::EnableIRPrinting(std::unique_ptr option) { AddInstrumentation(std::make_unique(std::move(option))); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/pass.cc b/paddle/pir/pass/pass.cc similarity index 93% rename from paddle/ir/pass/pass.cc rename to paddle/pir/pass/pass.cc index bab98bdbd39e2..d0e3f5d3927a7 100644 --- a/paddle/ir/pass/pass.cc +++ b/paddle/pir/pass/pass.cc @@ -12,18 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/pass/pass.h" +#include "paddle/pir/pass/pass.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/verify.h" -#include "paddle/ir/pass/pass_adaptor.h" -#include "paddle/ir/pass/pass_instrumentation.h" -#include "paddle/ir/pass/pass_manager.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/verify.h" +#include "paddle/pir/pass/pass_adaptor.h" +#include "paddle/pir/pass/pass_instrumentation.h" +#include "paddle/pir/pass/pass_manager.h" -namespace ir { +namespace pir { //===----------------------------------------------------------------------===// // Pass @@ -110,7 +110,7 @@ bool detail::PassAdaptor::RunPass(Pass* pass, if (!pass_failed && verify) { bool verify_recursively = !dynamic_cast(pass); - ir::Verify(op, verify_recursively); + pir::Verify(op, verify_recursively); } return !pass_failed; @@ -224,6 +224,6 @@ void PassInstrumentor::AddInstrumentation( impl_->instrumentations.emplace_back(std::move(pi)); } -} // namespace ir +} // namespace pir -IR_DEFINE_EXPLICIT_TYPE_ID(ir::detail::PreservedAnalyses::AllAnalysesType) +IR_DEFINE_EXPLICIT_TYPE_ID(pir::detail::PreservedAnalyses::AllAnalysesType) diff --git a/paddle/ir/pass/pass.h b/paddle/pir/pass/pass.h similarity index 94% rename from paddle/ir/pass/pass.h rename to paddle/pir/pass/pass.h index 5499f2172f294..f916fcbb1e354 100644 --- a/paddle/ir/pass/pass.h +++ b/paddle/pir/pass/pass.h @@ -18,12 +18,12 @@ #include #include -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/pass/analysis_manager.h" -#include "paddle/ir/pass/pass_registry.h" #include "paddle/phi/core/enforce.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/pass/analysis_manager.h" +#include "paddle/pir/pass/pass_registry.h" -namespace ir { +namespace pir { class IrContext; class Operation; @@ -107,4 +107,4 @@ class IR_API Pass { friend class detail::PassAdaptor; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/pass_adaptor.h b/paddle/pir/pass/pass_adaptor.h similarity index 93% rename from paddle/ir/pass/pass_adaptor.h rename to paddle/pir/pass/pass_adaptor.h index 4b81e8362a7e2..631d6d8b398f9 100644 --- a/paddle/ir/pass/pass_adaptor.h +++ b/paddle/pir/pass/pass_adaptor.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/ir/pass/pass.h" +#include "paddle/pir/pass/pass.h" -namespace ir { +namespace pir { class Operation; class PassManager; @@ -50,8 +50,8 @@ class PassAdaptor final : public Pass { PassManager* pm_; // For accessing RunPipeline. - friend class ir::PassManager; + friend class pir::PassManager; }; } // namespace detail -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/pass_instrumentation.h b/paddle/pir/pass/pass_instrumentation.h similarity index 97% rename from paddle/ir/pass/pass_instrumentation.h rename to paddle/pir/pass/pass_instrumentation.h index 1c80682fc43c7..8d49819596765 100644 --- a/paddle/ir/pass/pass_instrumentation.h +++ b/paddle/pir/pass/pass_instrumentation.h @@ -16,9 +16,9 @@ #include -#include "paddle/ir/core/type_id.h" +#include "paddle/pir/core/type_id.h" -namespace ir { +namespace pir { class Operation; class Pass; @@ -84,4 +84,4 @@ class IR_API PassInstrumentor { std::unique_ptr impl_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/pass_manager.h b/paddle/pir/pass/pass_manager.h similarity index 98% rename from paddle/ir/pass/pass_manager.h rename to paddle/pir/pass/pass_manager.h index 67ac2d1ba3435..f606be139c42f 100644 --- a/paddle/ir/pass/pass_manager.h +++ b/paddle/pir/pass/pass_manager.h @@ -19,9 +19,9 @@ #include #include -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/program.h" -namespace ir { +namespace pir { class IrContext; class Operation; @@ -139,4 +139,4 @@ class IR_API PassManager { friend class detail::PassAdaptor; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/pass_registry.cc b/paddle/pir/pass/pass_registry.cc similarity index 90% rename from paddle/ir/pass/pass_registry.cc rename to paddle/pir/pass/pass_registry.cc index a0239219a694d..7ff08499a0222 100644 --- a/paddle/ir/pass/pass_registry.cc +++ b/paddle/pir/pass/pass_registry.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/pass/pass_registry.h" +#include "paddle/pir/pass/pass_registry.h" -namespace ir { +namespace pir { PassRegistry &PassRegistry::Instance() { static PassRegistry g_pass_info_map; return g_pass_info_map; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/pass_registry.h b/paddle/pir/pass/pass_registry.h similarity index 87% rename from paddle/ir/pass/pass_registry.h rename to paddle/pir/pass/pass_registry.h index a7d52edca027f..71140810b0324 100644 --- a/paddle/ir/pass/pass_registry.h +++ b/paddle/pir/pass/pass_registry.h @@ -18,11 +18,10 @@ #include #include -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/macros.h" -#include "paddle/ir/pass/pass.h" - -namespace ir { +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/macros.h" +#include "paddle/pir/pass/pass.h" +namespace pir { class Pass; @@ -80,18 +79,19 @@ class PassRegistrar { msg) // Register a new pass that can be applied on the IR. -#define REGISTER_IR_PASS(pass_type, pass_class) \ - STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ - __reg_pass__##pass_type, \ - "REGISTER_IR_PASS must be called in global namespace"); \ - static ::ir::PassRegistrar __pass_registrar_##pass_type##__( \ - #pass_type); \ - int TouchPassRegistrar_##pass_type() { \ - __pass_registrar_##pass_type##__.Touch(); \ - return 0; \ - } \ - static ::ir::PassRegistrar &__pass_tmp_registrar_##pass_type##__ \ - UNUSED = __pass_registrar_##pass_type##__ +#define REGISTER_IR_PASS(pass_type, pass_class) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __reg_pass__##pass_type, \ + "REGISTER_IR_PASS must be called in global namespace"); \ + static ::pir::PassRegistrar __pass_registrar_##pass_type##__( \ + #pass_type); \ + int TouchPassRegistrar_##pass_type() { \ + __pass_registrar_##pass_type##__.Touch(); \ + return 0; \ + } \ + static ::pir::PassRegistrar \ + &__pass_tmp_registrar_##pass_type##__ UNUSED = \ + __pass_registrar_##pass_type##__ #define USE_PASS(pass_type) \ STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ @@ -101,4 +101,4 @@ class PassRegistrar { static int use_pass_itself_##pass_type##_ UNUSED = \ TouchPassRegistrar_##pass_type() -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/pass_timing.cc b/paddle/pir/pass/pass_timing.cc similarity index 92% rename from paddle/ir/pass/pass_timing.cc rename to paddle/pir/pass/pass_timing.cc index 595320308dce3..9492a37fb6914 100644 --- a/paddle/ir/pass/pass_timing.cc +++ b/paddle/pir/pass/pass_timing.cc @@ -18,13 +18,13 @@ #include #include -#include "paddle/ir/core/operation.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_instrumentation.h" -#include "paddle/ir/pass/pass_manager.h" -#include "paddle/ir/pass/utils.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_instrumentation.h" +#include "paddle/pir/pass/pass_manager.h" +#include "paddle/pir/pass/utils.h" -namespace ir { +namespace pir { namespace { class Timer { public: @@ -53,7 +53,7 @@ class PassTimer : public PassInstrumentation { explicit PassTimer(bool print_module) : print_module_(print_module) {} ~PassTimer() override = default; - void RunBeforePipeline(ir::Operation* op) override { + void RunBeforePipeline(pir::Operation* op) override { pipeline_timers_[op] = Timer(); pipeline_timers_[op].Start(); } @@ -121,4 +121,4 @@ void PassManager::EnablePassTiming(bool print_module) { AddInstrumentation(std::make_unique(print_module)); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/utils.cc b/paddle/pir/pass/utils.cc similarity index 92% rename from paddle/ir/pass/utils.cc rename to paddle/pir/pass/utils.cc index 8c890943420d9..91d5975a07b5d 100644 --- a/paddle/ir/pass/utils.cc +++ b/paddle/pir/pass/utils.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/pass/utils.h" +#include "paddle/pir/pass/utils.h" -namespace ir { +namespace pir { namespace detail { void PrintHeader(const std::string &header, std::ostream &os) { @@ -25,4 +25,4 @@ void PrintHeader(const std::string &header, std::ostream &os) { } } // namespace detail -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pass/utils.h b/paddle/pir/pass/utils.h similarity index 97% rename from paddle/ir/pass/utils.h rename to paddle/pir/pass/utils.h index 61ee43037e852..a08c77c1791bb 100644 --- a/paddle/ir/pass/utils.h +++ b/paddle/pir/pass/utils.h @@ -18,7 +18,7 @@ #include #include -namespace ir { +namespace pir { namespace detail { template @@ -48,4 +48,4 @@ using is_detected = typename detector::value_t; void PrintHeader(const std::string &header, std::ostream &os); } // namespace detail -} // namespace ir +} // namespace pir diff --git a/paddle/pir/pattern_rewrite/CMakeLists.txt b/paddle/pir/pattern_rewrite/CMakeLists.txt new file mode 100644 index 0000000000000..27e939f5d05b9 --- /dev/null +++ b/paddle/pir/pattern_rewrite/CMakeLists.txt @@ -0,0 +1,3 @@ +file(GLOB PATTERN_SRCS "*.cc") + +ir_library(pir_pattern_rewrite SRCS ${PATTERN_SRCS} DEPS pir_core) diff --git a/paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.cc b/paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.cc similarity index 95% rename from paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.cc rename to paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.cc index 363595b91a988..546b9d5bd5034 100644 --- a/paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.cc +++ b/paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h" +#include "paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h" #include #include @@ -20,9 +20,9 @@ #include #include -#include "paddle/ir/core/op_info.h" +#include "paddle/pir/core/op_info.h" -namespace ir { +namespace pir { FrozenRewritePatternSet::FrozenRewritePatternSet() : impl_(std::make_shared()) {} @@ -38,7 +38,7 @@ FrozenRewritePatternSet::FrozenRewritePatternSet( enabled_patterns.insert(enabled_pattern_labels.begin(), enabled_pattern_labels.end()); - ir::OpInfoMap op_info_map; + pir::OpInfoMap op_info_map; auto AddToOpsWhen = [&](std::unique_ptr& pattern, std::function callback) { if (op_info_map.empty()) @@ -97,4 +97,4 @@ FrozenRewritePatternSet::FrozenRewritePatternSet( } } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h b/paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h similarity index 93% rename from paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h rename to paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h index 59d7e2a8e8141..a8a7e97c390f8 100644 --- a/paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h +++ b/paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h @@ -21,11 +21,11 @@ #include #include -#include "paddle/ir/core/dll_decl.h" -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/core/dll_decl.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" -namespace ir { +namespace pir { class IR_API FrozenRewritePatternSet { using NativePatternListT = std::vector>; @@ -71,4 +71,4 @@ class IR_API FrozenRewritePatternSet { std::shared_ptr impl_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pattern_rewrite/pattern_applicator.cc b/paddle/pir/pattern_rewrite/pattern_applicator.cc similarity index 96% rename from paddle/ir/pattern_rewrite/pattern_applicator.cc rename to paddle/pir/pattern_rewrite/pattern_applicator.cc index 7087efa9ac64f..c9ce27c0a4384 100644 --- a/paddle/ir/pattern_rewrite/pattern_applicator.cc +++ b/paddle/pir/pattern_rewrite/pattern_applicator.cc @@ -14,11 +14,11 @@ #include -#include "paddle/ir/pattern_rewrite/pattern_applicator.h" +#include "paddle/pir/pattern_rewrite/pattern_applicator.h" -#include "paddle/ir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" -namespace ir { +namespace pir { PatternApplicator::PatternApplicator( const FrozenRewritePatternSet& frozen_pattern_list) @@ -117,4 +117,4 @@ bool PatternApplicator::MatchAndRewrite( return result; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pattern_rewrite/pattern_applicator.h b/paddle/pir/pattern_rewrite/pattern_applicator.h similarity index 88% rename from paddle/ir/pattern_rewrite/pattern_applicator.h rename to paddle/pir/pattern_rewrite/pattern_applicator.h index d0eb4bce1acab..a0fdf58fd57e0 100644 --- a/paddle/ir/pattern_rewrite/pattern_applicator.h +++ b/paddle/pir/pattern_rewrite/pattern_applicator.h @@ -19,12 +19,12 @@ #include #include -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h" -#include "paddle/ir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" -namespace ir { +namespace pir { class PatternApplicator { public: @@ -53,4 +53,4 @@ class PatternApplicator { std::vector any_op_patterns_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pattern_rewrite/pattern_match.cc b/paddle/pir/pattern_rewrite/pattern_match.cc similarity index 97% rename from paddle/ir/pattern_rewrite/pattern_match.cc rename to paddle/pir/pattern_rewrite/pattern_match.cc index 1f465809be37c..eccaf66cca9ce 100644 --- a/paddle/ir/pattern_rewrite/pattern_match.cc +++ b/paddle/pir/pattern_rewrite/pattern_match.cc @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" #include #include -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/operation.h" -namespace ir { +namespace pir { //===----------------------------------------------------------------------===// // Pattern @@ -162,4 +162,4 @@ void RewriterBase::ReplaceOpWithResultsOfAnotherOp(Operation* op, // new_op->result(0)); return ReplaceOp(op, new_op->GetResults()); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pattern_rewrite/pattern_match.h b/paddle/pir/pattern_rewrite/pattern_match.h similarity index 96% rename from paddle/ir/pattern_rewrite/pattern_match.h rename to paddle/pir/pattern_rewrite/pattern_match.h index 8b3bbaa5b1cbd..0a91c226c519b 100644 --- a/paddle/ir/pattern_rewrite/pattern_match.h +++ b/paddle/pir/pattern_rewrite/pattern_match.h @@ -24,17 +24,17 @@ #include #include -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/dll_decl.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/type_id.h" -#include "paddle/ir/core/type_name.h" -#include "paddle/ir/core/value.h" - -namespace ir { +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/dll_decl.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/type_id.h" +#include "paddle/pir/core/type_name.h" +#include "paddle/pir/core/value.h" + +namespace pir { // This class reprensents the benefit of a pattern. The most common // unit to use is the `numver of operations` in the pattern. @@ -193,7 +193,7 @@ class IR_API RewritePattern : public Pattern { pattern->Initialize(); if (pattern->debug_name().empty()) - pattern->SetDebugName(ir::get_type_name()); + pattern->SetDebugName(pir::get_type_name()); return pattern; } @@ -388,4 +388,4 @@ class RewritePatternSet { NativePatternListT native_patterns_; }; -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pattern_rewrite/pattern_rewrite_driver.cc b/paddle/pir/pattern_rewrite/pattern_rewrite_driver.cc similarity index 72% rename from paddle/ir/pattern_rewrite/pattern_rewrite_driver.cc rename to paddle/pir/pattern_rewrite/pattern_rewrite_driver.cc index f574ed24afe27..1d8bafc2bfae0 100644 --- a/paddle/ir/pattern_rewrite/pattern_rewrite_driver.cc +++ b/paddle/pir/pattern_rewrite/pattern_rewrite_driver.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/pattern_rewrite/pattern_rewrite_driver.h" +#include "paddle/pir/pattern_rewrite/pattern_rewrite_driver.h" #include #include @@ -22,30 +22,30 @@ #include #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/core/value.h" -#include "paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h" -#include "paddle/ir/pattern_rewrite/pattern_applicator.h" -#include "paddle/ir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/core/value.h" +#include "paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h" +#include "paddle/pir/pattern_rewrite/pattern_applicator.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" namespace { -class GreedyPatternRewriteDriver : public ir::PatternRewriter { +class GreedyPatternRewriteDriver : public pir::PatternRewriter { public: explicit GreedyPatternRewriteDriver( - ir::IrContext* ctx, - const ir::FrozenRewritePatternSet& patterns, - const ir::GreedyRewriteConfig& config) - : ir::PatternRewriter(ctx), + pir::IrContext* ctx, + const pir::FrozenRewritePatternSet& patterns, + const pir::GreedyRewriteConfig& config) + : pir::PatternRewriter(ctx), config_(config), region_(*config.region), matcher_(patterns) { worklist_.reserve(128); matcher_.ApplyDefaultCostModel(); - if (config.strict_mode != ir::GreedyRewriteStrictness::AnyOp) { + if (config.strict_mode != pir::GreedyRewriteStrictness::AnyOp) { for (auto& block : region_) { for (auto& op_item : *block) { strict_mode_filtered_ops_.insert(op_item); @@ -60,7 +60,7 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { do { // Check if the iteration limit was reached. if (iteration++ >= config_.max_iterations && - config_.max_iterations != ir::GreedyRewriteConfig::kNoLimit) + config_.max_iterations != pir::GreedyRewriteConfig::kNoLimit) break; VLOG(6) << "Iteration[" << iteration << "] for PatternRewrite"; worklist_.clear(); @@ -95,7 +95,7 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { while (!worklist_.empty() && (num_rewrites < config_.max_num_rewrites || - config_.max_num_rewrites == ir::GreedyRewriteConfig::kNoLimit)) { + config_.max_num_rewrites == pir::GreedyRewriteConfig::kNoLimit)) { auto* op = PopFromWorklist(); if (op == nullptr) continue; VLOG(6) << "PopFromWorklist, get op: " << op->name(); @@ -117,17 +117,17 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { } // TODO(wilber): OpResult support GetUsers method. - void NotifyRootReplaced(ir::Operation* op, - const std::vector& replacement) override { + void NotifyRootReplaced(pir::Operation* op, + const std::vector& replacement) override { // for (uint32_t i = 0; i < op->num_results(); ++i) { // auto res = op->GetResultByIndex(i); // } // } } - void FinalizeRootUpdate(ir::Operation* op) override { AddToWorklist(op); } + void FinalizeRootUpdate(pir::Operation* op) override { AddToWorklist(op); } - void NotifyOperationRemoved(ir::Operation* op) override { + void NotifyOperationRemoved(pir::Operation* op) override { for (uint32_t i = 0; i < op->num_operands(); ++i) { AddOperandToWorklist(op->operand_source(i)); } @@ -144,20 +144,20 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { } } - if (config_.strict_mode != ir::GreedyRewriteStrictness::AnyOp) { + if (config_.strict_mode != pir::GreedyRewriteStrictness::AnyOp) { strict_mode_filtered_ops_.erase(op); } } - void NotifyOperationInserted(ir::Operation* op) override { - if (config_.strict_mode == ir::GreedyRewriteStrictness::ExistingAndNewOps) + void NotifyOperationInserted(pir::Operation* op) override { + if (config_.strict_mode == pir::GreedyRewriteStrictness::ExistingAndNewOps) strict_mode_filtered_ops_.insert(op); AddToWorklist(op); } /// Add the given operation to the worklist. - void AddToWorklist(ir::Operation* op) { - if (config_.strict_mode == ir::GreedyRewriteStrictness::AnyOp || + void AddToWorklist(pir::Operation* op) { + if (config_.strict_mode == pir::GreedyRewriteStrictness::AnyOp || strict_mode_filtered_ops_.count(op)) { if (worklist_map_.count(op)) return; @@ -166,7 +166,7 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { } } - void AddOperandToWorklist(ir::Value operand) { + void AddOperandToWorklist(pir::Value operand) { // If the use count of this operand is now < 2, we re-add the defining // operation to the worklist. // This is based on the fact that zero use operations may be deleted, and @@ -176,14 +176,14 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { if (auto* def_op = operand.GetDefiningOp()) AddToWorklist(def_op); } - void AddOperandsToWorklist(const std::vector operands) { + void AddOperandsToWorklist(const std::vector operands) { for (auto& v : operands) { AddOperandToWorklist(v); } } /// Pop the next operation from the worklist - ir::Operation* PopFromWorklist() { + pir::Operation* PopFromWorklist() { auto* op = worklist_.back(); worklist_.pop_back(); if (op) worklist_map_.erase(op); @@ -191,7 +191,7 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { } /// If the specified operation is in the worklist, remove it. - void RemoveFromWorklist(ir::Operation* op) { + void RemoveFromWorklist(pir::Operation* op) { auto it = worklist_map_.find(op); if (it != worklist_map_.end()) { worklist_[it->second] = nullptr; @@ -200,17 +200,17 @@ class GreedyPatternRewriteDriver : public ir::PatternRewriter { } private: - std::vector worklist_; - std::unordered_map worklist_map_; - ir::GreedyRewriteConfig config_; - std::unordered_set strict_mode_filtered_ops_; - ir::Region& region_; - ir::PatternApplicator matcher_; + std::vector worklist_; + std::unordered_map worklist_map_; + pir::GreedyRewriteConfig config_; + std::unordered_set strict_mode_filtered_ops_; + pir::Region& region_; + pir::PatternApplicator matcher_; }; } // namespace -namespace ir { +namespace pir { bool ApplyPatternsGreedily(Region& region, // NOLINT const FrozenRewritePatternSet& patterns, @@ -226,4 +226,4 @@ bool ApplyPatternsGreedily(Region& region, // NOLINT return converged; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/pattern_rewrite/pattern_rewrite_driver.h b/paddle/pir/pattern_rewrite/pattern_rewrite_driver.h similarity index 92% rename from paddle/ir/pattern_rewrite/pattern_rewrite_driver.h rename to paddle/pir/pattern_rewrite/pattern_rewrite_driver.h index 2e87eac5fef0b..94f30f1b4ac52 100644 --- a/paddle/ir/pattern_rewrite/pattern_rewrite_driver.h +++ b/paddle/pir/pattern_rewrite/pattern_rewrite_driver.h @@ -14,12 +14,12 @@ #pragma once -#include "paddle/ir/core/dll_decl.h" -#include "paddle/ir/core/region.h" -#include "paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h" -#include "paddle/ir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/core/dll_decl.h" +#include "paddle/pir/core/region.h" +#include "paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" -namespace ir { +namespace pir { /// This enum will control which ops will be added to the worklist during the /// match rewrite process @@ -83,4 +83,4 @@ inline IR_API bool ApplyPatternsGreedily( return !failed; } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/transforms/CMakeLists.txt b/paddle/pir/transforms/CMakeLists.txt similarity index 52% rename from paddle/ir/transforms/CMakeLists.txt rename to paddle/pir/transforms/CMakeLists.txt index 2b9f63a64d4f9..4f9f0fa196e9a 100644 --- a/paddle/ir/transforms/CMakeLists.txt +++ b/paddle/pir/transforms/CMakeLists.txt @@ -1,10 +1,10 @@ file(GLOB PATTERN_SRCS "*.cc") ir_library( - ir_builtin_transforms + pir_builtin_transforms SRCS ${PATTERN_SRCS} DEPS - ir_core - ir_pattern_rewrite - ir_pass) + pir_core + pir_pattern_rewrite + pir_pass) diff --git a/paddle/ir/transforms/dead_code_elimination_pass.cc b/paddle/pir/transforms/dead_code_elimination_pass.cc similarity index 69% rename from paddle/ir/transforms/dead_code_elimination_pass.cc rename to paddle/pir/transforms/dead_code_elimination_pass.cc index c74d71ea34569..152cca23fd5b0 100644 --- a/paddle/ir/transforms/dead_code_elimination_pass.cc +++ b/paddle/pir/transforms/dead_code_elimination_pass.cc @@ -12,27 +12,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/transforms/dead_code_elimination_pass.h" +#include "paddle/pir/transforms/dead_code_elimination_pass.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_registry.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_registry.h" namespace { // TODO(wilber): After support SideEffectTrait, Only NoSideEffectTrait op can be // removed by dce pass. // Now just a naive implementation. -class DeadCodeEliminationPass : public ir::Pass { +class DeadCodeEliminationPass : public pir::Pass { public: - DeadCodeEliminationPass() : ir::Pass("dead_code_elimination", 0) {} + DeadCodeEliminationPass() : pir::Pass("dead_code_elimination", 0) {} - void Run(ir::Operation *op) override { - auto module_op = op->dyn_cast(); + void Run(pir::Operation *op) override { + auto module_op = op->dyn_cast(); IR_ENFORCE(module_op, "DcePass should run on module op."); auto *block = module_op.block(); - std::vector erased_op; + std::vector erased_op; for (auto &op : *block) { // TODO(wilber): Support NoSideEffect trait. // if (!op->HasTrait()) continue; @@ -42,39 +42,39 @@ class DeadCodeEliminationPass : public ir::Pass { use_empty &= op->result(i).use_empty(); } // TODO(wilber): Support Terminator trait. - if (use_empty && op->name() != "pd.fetch") { + if (use_empty && op->name() != "pd_op.fetch") { erased_op.push_back(op); } } for (auto *op : erased_op) { - if (op->dyn_cast()) { + if (op->dyn_cast()) { // Delete parameter from program. - ir::GetParameterOp get_parameter_op = - op->dyn_cast(); + pir::GetParameterOp get_parameter_op = + op->dyn_cast(); get_parameter_op->GetParentProgram()->parameters().erase( get_parameter_op->attributes() .at(get_parameter_op.attributes_name[0]) - .dyn_cast() + .dyn_cast() .AsString()); } block->erase(*op); } } - bool CanApplyOn(ir::Operation *op) const override { + bool CanApplyOn(pir::Operation *op) const override { return op->name() == "builtin.module" && op->num_regions() > 0; } }; } // namespace -namespace ir { +namespace pir { std::unique_ptr CreateDeadCodeEliminationPass() { return std::make_unique(); } -} // namespace ir +} // namespace pir REGISTER_IR_PASS(dead_code_elimination, DeadCodeEliminationPass); diff --git a/paddle/ir/transforms/dead_code_elimination_pass.h b/paddle/pir/transforms/dead_code_elimination_pass.h similarity index 90% rename from paddle/ir/transforms/dead_code_elimination_pass.h rename to paddle/pir/transforms/dead_code_elimination_pass.h index f03c024ae1d17..d0c86105662d1 100644 --- a/paddle/ir/transforms/dead_code_elimination_pass.h +++ b/paddle/pir/transforms/dead_code_elimination_pass.h @@ -15,12 +15,12 @@ #pragma once #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { class Pass; IR_API std::unique_ptr CreateDeadCodeEliminationPass(); -} // namespace ir +} // namespace pir diff --git a/paddle/ir/transforms/reorder_block_ops_pass.cc b/paddle/pir/transforms/reorder_block_ops_pass.cc similarity index 79% rename from paddle/ir/transforms/reorder_block_ops_pass.cc rename to paddle/pir/transforms/reorder_block_ops_pass.cc index 91b4b52229f10..db2d29fe9b0a7 100644 --- a/paddle/ir/transforms/reorder_block_ops_pass.cc +++ b/paddle/pir/transforms/reorder_block_ops_pass.cc @@ -12,33 +12,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/ir/transforms/reorder_block_ops_pass.h" +#include "paddle/pir/transforms/reorder_block_ops_pass.h" #include -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/pass/pass.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/pass/pass.h" namespace { -class ReorderBlockOpsPass : public ir::Pass { +class ReorderBlockOpsPass : public pir::Pass { public: - ReorderBlockOpsPass() : ir::Pass("ReorderBlockOpsPass", 0) {} + ReorderBlockOpsPass() : pir::Pass("ReorderBlockOpsPass", 0) {} - void Run(ir::Operation *op) override { + void Run(pir::Operation *op) override { IR_ENFORCE(op->num_regions() > 0, "ReorderBlockOpsPass should run on Operation which regions " "number greater than 0."); for (size_t i = 0; i < op->num_regions(); ++i) { for (auto *block : op->region(i)) { - std::list res_op_list; - std::unordered_map + std::list res_op_list; + std::unordered_map reorder_op_dep_cnt; // op -> dependent input count - std::unordered_set visited_values; - std::queue op_que; + std::unordered_set visited_values; + std::queue op_que; - auto update_op_que = [&](ir::Operation *op) { + auto update_op_que = [&](pir::Operation *op) { for (size_t i = 0; i < op->results().size(); ++i) { auto result = op->result(i); visited_values.insert(result); @@ -86,17 +86,17 @@ class ReorderBlockOpsPass : public ir::Pass { } } - bool CanApplyOn(ir::Operation *op) const override { + bool CanApplyOn(pir::Operation *op) const override { return op->num_regions() > 0; } }; } // namespace -namespace ir { +namespace pir { std::unique_ptr CreateReorderBlockOpsPass() { return std::make_unique(); } -} // namespace ir +} // namespace pir diff --git a/paddle/ir/transforms/reorder_block_ops_pass.h b/paddle/pir/transforms/reorder_block_ops_pass.h similarity index 90% rename from paddle/ir/transforms/reorder_block_ops_pass.h rename to paddle/pir/transforms/reorder_block_ops_pass.h index f668471fc9e04..51ab110bb3ac0 100644 --- a/paddle/ir/transforms/reorder_block_ops_pass.h +++ b/paddle/pir/transforms/reorder_block_ops_pass.h @@ -15,12 +15,12 @@ #pragma once #include -#include "paddle/ir/core/dll_decl.h" +#include "paddle/pir/core/dll_decl.h" -namespace ir { +namespace pir { class Pass; IR_API std::unique_ptr CreateReorderBlockOpsPass(); -} // namespace ir +} // namespace pir diff --git a/python/paddle/base/executor.py b/python/paddle/base/executor.py index 42abbd1b3b717..b82f23335f61b 100755 --- a/python/paddle/base/executor.py +++ b/python/paddle/base/executor.py @@ -409,7 +409,7 @@ def has_fetch_operators( def has_fetch_operations( - block, fetch_targets, fetch_holder_name, fetch_op='pd.fetch' + block, fetch_targets, fetch_holder_name, fetch_op='pd_op.fetch' ): """Check whether the block already has fetch operation. @@ -514,7 +514,7 @@ def _add_new_ir_fetch_ops(program, fetch_list, fetch_var_name): import paddle global_block = program.block() - fetch_op = "pd.fetch" + fetch_op = "pd_op.fetch" if not has_fetch_operations( global_block, fetch_list, fetch_var_name, fetch_op ): @@ -1249,7 +1249,7 @@ def _new_ir_feed_data(self, program, feed, scope): # feed var to framework global_block = program.block() for op in global_block.ops: - if op.name() == 'pd.data': + if op.name() == 'pd_op.data': feed_target_name = op.attrs()["name"] var_type = paddle_type_to_proto_type[op.attrs()["dtype"]] var_shape = op.attrs()["shape"] diff --git a/python/paddle/decomposition/rules.py b/python/paddle/decomposition/rules.py index ef225ce461382..f18efb67ecad4 100644 --- a/python/paddle/decomposition/rules.py +++ b/python/paddle/decomposition/rules.py @@ -18,7 +18,7 @@ from .register import register_decomp -@register_decomp('pd.mean') +@register_decomp('pd_op.mean') def mean(x, axis, keepdim): """define composite rule of op mean""" x_shape = x.shape @@ -38,7 +38,7 @@ def mean(x, axis, keepdim): return res -@register_decomp('pd.gelu') +@register_decomp('pd_op.gelu') def gelu_composite(x, approximate): """define composite rule of op gelu""" M_SQRT1_2 = ( diff --git a/python/setup.py.in b/python/setup.py.in index 0ff5f3d1814bb..39dffbb240556 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -548,7 +548,7 @@ if('${WITH_SHARED_PHI}' == 'ON'): shutil.copy('${PHI_LIB}', libs_path) if('${WITH_SHARED_IR}' == 'ON'): - package_data['paddle.libs'] += [('libir' if os.name != 'nt' else 'ir') + ext_name] + package_data['paddle.libs'] += [('libpir' if os.name != 'nt' else 'pir') + ext_name] shutil.copy('${IR_LIB}', libs_path) package_data['paddle.libs']+=[ diff --git a/setup.py b/setup.py index 34d68f3efe370..3096814be8900 100644 --- a/setup.py +++ b/setup.py @@ -927,7 +927,7 @@ def get_package_data_and_package_dir(): if env_dict.get("WITH_SHARED_IR") == "ON": package_data['paddle.libs'] += [ - ('libir' if os.name != 'nt' else 'ir') + ext_suffix + ('libpir' if os.name != 'nt' else 'pir') + ext_suffix ] shutil.copy(env_dict.get("IR_LIB"), libs_path) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 4c2fb218fc605..19d6a6171c116 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -181,7 +181,7 @@ if(${len} GREATER_EQUAL 1) target_link_libraries(${test_name} $) endif() if(WITH_SHARED_IR) - target_link_libraries(${test_name} $) + target_link_libraries(${test_name} $) endif() add_dependencies(${test_name} ${paddle_lib} paddle_gtest_main_new) if(WITH_GPU) @@ -194,7 +194,7 @@ if(${len} GREATER_EQUAL 1) if(APPLE) target_link_libraries( ${test_name} - "-Wl,-rpath,$ -Wl,-rpath,$ -Wl,-rpath,$" + "-Wl,-rpath,$ -Wl,-rpath,$ -Wl,-rpath,$" ) endif() if(NOT ((NOT WITH_PYTHON) AND ON_INFER)) diff --git a/test/cpp/CMakeLists.txt b/test/cpp/CMakeLists.txt index 030431cddc284..879449512c57d 100644 --- a/test/cpp/CMakeLists.txt +++ b/test/cpp/CMakeLists.txt @@ -4,7 +4,7 @@ add_subdirectory(jit) add_subdirectory(new_executor) add_subdirectory(prim) add_subdirectory(imperative) -add_subdirectory(ir) +add_subdirectory(pir) add_subdirectory(inference) add_subdirectory(eager) add_subdirectory(fluid) diff --git a/test/cpp/fluid/cinn/cinn_launch_context_test.cc b/test/cpp/fluid/cinn/cinn_launch_context_test.cc index dcb9d6f25c497..5e7fbea5d876f 100644 --- a/test/cpp/fluid/cinn/cinn_launch_context_test.cc +++ b/test/cpp/fluid/cinn/cinn_launch_context_test.cc @@ -35,9 +35,9 @@ limitations under the License. */ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/operators/cinn/cinn_op_helper.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" #include "paddle/phi/core/ddim.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" USE_OP_ITSELF(cinn_instruction_run); PD_DECLARE_KERNEL(cinn_instruction_run, CPU, ALL_LAYOUT); diff --git a/test/cpp/ir/core/TestParserText.txt b/test/cpp/ir/core/TestParserText.txt deleted file mode 100644 index e90248086ebda..0000000000000 --- a/test/cpp/ir/core/TestParserText.txt +++ /dev/null @@ -1,43 +0,0 @@ - -//CHECK attribute -(String)sdfgs.sdsd - -//CHECK type -f32 - -//CHECK type -pd.tensor<256xf32> - -//CHECK program -{ - (%0) = "builtin.get_parameter" () {parameter_name:(String)conv2d_0.w_0} : () -> pd.tensor<64x3x7x7xf32> - (%1) = "pd.feed" () {col:(Int32)0,is_persisable:(Array)[false],name:(String)data,stop_gradient:(Array)[true]} : () -> pd.tensor<-1x3x224x224xf32> - (%2) = "pd.conv2d" (%1, %0) {data_format:(String)NCHW,dilations:(Array)[(Int32)1,(Int32)1],groups:(Int32)1,is_persisable:(Array)[false],padding_algorithm:(String)EXPLICIT,paddings:(Array)[(Int32)3,(Int32)3],stop_gradient:(Array)[false],strides:(Array)[(Int32)2,(Int32)2]} : (pd.tensor<-1x3x224x224xf32>, pd.tensor<64x3x7x7xf32>) -> pd.tensor<-1x64x112x112xf32> -} - -//CHECK attribute -(Array)[(pd.DataType)bool,(pd.DataType)float32,(pd.DataType)float64, -(pd.DataType)complex64,(pd.DataType)complex128,(pd.DataType)Undefined, -(pd.DataType)Undefined,(pd.DataType)Undefined,(pd.DataType)Undefined, -(pd.DataType)bfloat16,(pd.DataType)uint8,(pd.DataType)uint32,(pd.DataType)int8, -(pd.DataType)uint16,(pd.DataType)int16,(pd.DataType)int32,(pd.DataType)uint64,(pd.DataType)int64] - - -//CHECK attribute -(Array)[(pd.Place)Place(gpu:0),(pd.Place)Place(gpu_pinned),(pd.Place)Place(gpu_pinned), -(pd.Place)Place(xpu:0),(pd.Place)Place(ipu:0),(pd.Place)Place(:0),(pd.Place)Place(cpu)] - - -//CHECK attribute -(Array)[(pd.DataLayout)NHWC,(pd.DataLayout)STRIDED,(pd.DataLayout)NCHW,(pd.DataLayout)Undefined(AnyLayout), -(pd.DataLayout)ONEDNN,(pd.DataLayout)SPARSE_COO,(pd.DataLayout)SPARSE_CSR,(pd.DataLayout)NDHWC,(pd.DataLayout)NCDHW, -(pd.DataLayout)PSTRING_UNION] - -//CHECK attribute -(Array)[(Double)1,(Int64)0,(String)1] - -//CHECK type -vec[bf16,f64,b,i8,u8,i16,c64,c128] - -//CHECK attribute -(String)1 diff --git a/test/cpp/ir/core/ir_builder_test.cc b/test/cpp/ir/core/ir_builder_test.cc deleted file mode 100644 index 863bac72da9c2..0000000000000 --- a/test/cpp/ir/core/ir_builder_test.cc +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" - -TEST(builder_test, type_api) { - ir::IrContext ctx; - ir::Builder builder(&ctx); - EXPECT_EQ(ir::UInt8Type::get(&ctx), builder.uint8_type()); - EXPECT_EQ(ir::Int8Type::get(&ctx), builder.int8_type()); - EXPECT_EQ(ir::VectorType::get(&ctx, std::vector()), - builder.vec_type({})); - EXPECT_EQ(ir::BFloat16Type::get(&ctx), builder.bfloat16_type()); - EXPECT_EQ(ir::Float32Type::get(&ctx), builder.float32_type()); - EXPECT_EQ(ir::Float64Type::get(&ctx), builder.float64_type()); - EXPECT_EQ(ir::IndexType::get(&ctx), builder.index_type()); - EXPECT_EQ(ir::Int16Type::get(&ctx), builder.int16_type()); - EXPECT_EQ(ir::BoolType::get(&ctx), builder.bool_type()); - EXPECT_EQ(ir::Complex64Type::get(&ctx), builder.complex64_type()); - EXPECT_EQ(ir::Complex128Type::get(&ctx), builder.complex128_type()); -} - -TEST(builder_test, attribute_api) { - ir::IrContext ctx; - ir::Builder builder(&ctx); - EXPECT_EQ(ir::StrAttribute::get(&ctx, "test"), builder.str_attr("test")); - EXPECT_EQ(ir::BoolAttribute::get(&ctx, true), builder.bool_attr(true)); - EXPECT_EQ(ir::FloatAttribute::get(&ctx, 0.2f), builder.float_attr(0.2f)); - EXPECT_EQ(ir::DoubleAttribute::get(&ctx, 2.0), builder.double_attr(2.0)); - EXPECT_EQ(ir::Int32Attribute::get(&ctx, 2), builder.int32_attr(2)); - EXPECT_EQ(ir::Int64Attribute::get(&ctx, 2), builder.int64_attr(2)); - EXPECT_EQ(ir::ArrayAttribute::get(&ctx, std::vector()), - builder.array_attr({})); - EXPECT_EQ(ir::PointerAttribute::get(&ctx, nullptr), - builder.pointer_attr(nullptr)); -} diff --git a/test/cpp/ir/shape_dialect/symbolic_op_test.cc b/test/cpp/ir/shape_dialect/symbolic_op_test.cc deleted file mode 100644 index 138e5e5b0d8c9..0000000000000 --- a/test/cpp/ir/shape_dialect/symbolic_op_test.cc +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/dialect/shape/ir/shape_dialect.h" -#include "paddle/ir/dialect/shape/ir/shape_op.h" -#include "paddle/ir/dialect/shape/utils/shape_utils.h" - -TEST(assist_struct_test, symbolic_dim) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Program program(ctx); - ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); - ir::dialect::SymbolicDim symDim = builder.Build( - "S0", 10, false, false, false, false); - ir::dialect::SymbolicDim symDim_ = builder.Build( - "S1", 10, false, false, false, false); - EXPECT_EQ(symDim.getValue(), 10); - EXPECT_EQ(symDim.getSymName(), "S0"); - EXPECT_FALSE(symDim.getKnownNegativeOne()); - EXPECT_FALSE(symDim.getKnownNonSizeOne()); - EXPECT_FALSE(symDim.getKnownNonSizeZero()); - EXPECT_FALSE(symDim.getKnownNonNegative()); - - EXPECT_FALSE(symDim.isDynamic()); - EXPECT_TRUE(symDim.merge(symDim_)); - - symDim.updateValue(20); - symDim.updateSymName("S2"); - symDim.updateKnownNegativeOne(true); - symDim.updateKnownNonSizeOne(true); - symDim.updateKnownNonSizeZero(true); - symDim.updateKnownNonNegative(true); - - EXPECT_FALSE(symDim.merge(symDim_)); - - EXPECT_EQ(symDim.getValue(), 20); - EXPECT_EQ(symDim.getSymName(), "S2"); - EXPECT_TRUE(symDim.getKnownNegativeOne()); - EXPECT_TRUE(symDim.getKnownNonSizeOne()); - EXPECT_TRUE(symDim.getKnownNonSizeZero()); - EXPECT_TRUE(symDim.getKnownNonNegative()); -} - -TEST(assist_struct_test, symbolic_dim_product) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Program program(ctx); - ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); - ir::dialect::SymbolicDim symDim = builder.Build( - "S0", -100000, false, false, false, false); - ir::SymbolicDimProduct symDimProduct; - ir::SymbolicDimProduct symDimProduct_; - symDimProduct.symbols.push_back(symDim); - symDimProduct.factor *= 10; - EXPECT_EQ(symDimProduct.factor, 10); - EXPECT_NE(symDimProduct, symDimProduct_); - EXPECT_FALSE(symDimProduct.empty()); -} - -TEST(assist_struct_test, symbolic_dim_table) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Program program(ctx); - ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); - ir::dialect::SymbolicDim symDim = builder.Build( - "S0", 10, false, false, false, false); - - ir::SymbolTable symbolTable(program.module_op()); - EXPECT_EQ(symbolTable.insert(symDim), "S0"); - EXPECT_EQ(symbolTable.lookup("S0"), symDim); - EXPECT_EQ(symbolTable.getOp(), program.module_op()); - EXPECT_FALSE(symbolTable.lookup("S1")); -} - -TEST(assist_struct_test, symbolic_dim_mgr_simple) { - /******************************************************/ - /* Mgr simple version, only SymbolicDim related func. */ - /******************************************************/ - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Program program(ctx); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); - - ir::SymbolicDimMgr symDimMgr(program.module_op()); - ir::dialect::SymbolicDim symDimS0 = symDimMgr.newSymbolicDim(); - ir::dialect::SymbolicDim symDimS1 = symDimMgr.newSymbolicDim(); - ir::dialect::SymbolicDim symDimC10 = symDimMgr.newConstantSymbolicDim(10); - symDimMgr.mapSymbolicDimEqual(symDimS0, symDimS1); - - ir::Attribute attr_value = ir::StrAttribute::get(ctx, "op_attr"); - ir::AttributeMap attr_map; - attr_map.insert(std::pair("op", attr_value)); - std::vector op_inputs = {}; - - ir::Type fp32_dtype = ir::Float32Type::get(ctx); - phi::DDim dims = {-100000, 2}; - phi::DataLayout data_layout = phi::DataLayout::NCHW; - phi::LoD lod = {{0, 1, 2}}; - size_t offset = 0; - std::vector op_output_types = { - paddle::dialect::DenseTensorType::get( - ctx, fp32_dtype, dims, data_layout, lod, offset)}; - ir::Operation *op = - ir::Operation::Create(op_inputs, attr_map, op_output_types, ir::OpInfo()); - ir::Value res = op->result(0); - - std::vector symDimVec = - symDimMgr.createSymbolicDimsForRankedValue(res); - - EXPECT_EQ(symDimS0.getSymName(), "S0"); - EXPECT_EQ(symDimS1.getSymName(), "S1"); - EXPECT_EQ(symDimS1.getValue(), -100000); - EXPECT_EQ(symDimC10.getSymName(), "C10"); - EXPECT_EQ(symDimC10.getValue(), 10); - EXPECT_EQ(symDimVec[0].getSymName(), "S2"); - EXPECT_EQ(symDimVec[1].getSymName(), "C2"); - EXPECT_EQ(symDimMgr.symbolTable().lookup("S0"), - symDimS0); - EXPECT_EQ(symDimMgr.symbolTable().lookup("C10"), - symDimC10); - EXPECT_EQ(symDimMgr.getRootSymbolicDim(symDimS1), symDimS0); - EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS0, symDimS1)); - EXPECT_FALSE(symDimMgr.isSymbolicDimEqual(symDimS0, symDimC10)); -} - -TEST(assist_struct_test, symbolic_dim_mgr_complex) { - /***************************************************************/ - /* Mgr with constraintOp, and SymbolicDimProduct related func. */ - /***************************************************************/ - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Program program(ctx); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); - - ir::dialect::SymbolicDim symDimS0 = builder.Build( - "S0", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS1 = builder.Build( - "S1", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS2 = builder.Build( - "S2", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS3 = builder.Build( - "S3", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS4 = builder.Build( - "S4", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS5 = builder.Build( - "S5", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS6 = builder.Build( - "S6", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS7 = builder.Build( - "S7", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS8 = builder.Build( - "S8", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS9 = builder.Build( - "S9", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS10 = builder.Build( - "S10", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS11 = builder.Build( - "S11", -100000, false, false, true, true); - ir::dialect::SymbolicDim symDimS12 = builder.Build( - "S12", -100000, false, false, true, false); - ir::dialect::SymbolicDim symDimC10 = builder.Build( - "C10", 10, true, false, true, true); - ir::dialect::SymbolicDim symDimC20 = builder.Build( - "C20", 20, true, false, true, true); - - ir::OpResult dimOpS0 = builder.Build("S0").out(); - ir::OpResult dimOpS1 = builder.Build("S1").out(); - ir::OpResult dimOpS2 = builder.Build("S2").out(); - ir::OpResult dimOpS3 = builder.Build("S3").out(); - ir::OpResult dimOpS4 = builder.Build("S4").out(); - ir::OpResult dimOpS5 = builder.Build("S5").out(); - ir::OpResult dimOpS6 = builder.Build("S6").out(); - ir::OpResult dimOpS7 = builder.Build("S7").out(); - ir::OpResult dimOpS8 = builder.Build("S8").out(); - ir::OpResult dimOpS9 = builder.Build("S9").out(); - ir::OpResult dimOpS10 = builder.Build("S10").out(); - ir::OpResult dimOpS11 = builder.Build("S11").out(); - ir::OpResult dimOpC10 = builder.Build("C10").out(); - ir::OpResult dimOpC20 = builder.Build("C20").out(); - ir::OpResult constant = - builder - .Build(ir::Int32Attribute::get(ctx, 2), - ir::Int32Type::get(ctx)) - ->result(0); - - // Mark S1 == S2. - builder.Build( - 2, 2, std::vector{constant, dimOpS1, dimOpS2, constant}); - // Mark S0 * S1 == S2 * S3, For check S0 == S3. - builder.Build( - 2, 2, std::vector{dimOpS0, dimOpS1, dimOpS2, dimOpS3}); - // Mark S4 * S0 * S1 == S2 * S3 * S5, For check S4 == S5. - builder.Build( - 3, - 3, - std::vector{ - dimOpS4, dimOpS0, dimOpS1, dimOpS2, dimOpS3, dimOpS5}); - // For check S6 == C10 * C20. - builder.Build( - 1, 2, std::vector{dimOpS6, dimOpC10, dimOpC20}); - // Mark C10 * S0 * S1 == S2 * S3 * S7, for check C10 == S7. - builder.Build( - 3, - 3, - std::vector{ - dimOpC10, dimOpS0, dimOpS1, dimOpS2, dimOpS3, dimOpS7}); - - // Mark S8 * S9 == S10 * S11, for unsimplify product case - builder.Build( - 2, 2, std::vector{dimOpS8, dimOpS9, dimOpS10, dimOpS11}); - - ir::SymbolicDimMgr symDimMgr(program.module_op()); - - symDimMgr.load(); - - // For check indirect equality: S1 * S4 == S2 * S5 - ir::SymbolicDimProduct symDimProductLhs; - ir::SymbolicDimProduct symDimProductRhs; - - symDimProductLhs.symbols.push_back(symDimS1); - symDimProductLhs.symbols.push_back(symDimS4); - - symDimProductRhs.symbols.push_back(symDimS2); - symDimProductRhs.symbols.push_back(symDimS5); - - // For uncompletely simplied product check: S8 * S9 * S12 == S10 * S11 * S12 - ir::SymbolicDimProduct symDimProductLhs_; - ir::SymbolicDimProduct symDimProductRhs_; - - symDimProductLhs_.symbols.push_back(symDimS8); - symDimProductLhs_.symbols.push_back(symDimS9); - symDimProductLhs_.symbols.push_back(symDimS12); - - symDimProductRhs_.symbols.push_back(symDimS10); - symDimProductRhs_.symbols.push_back(symDimS11); - symDimProductRhs_.symbols.push_back(symDimS12); - - // For check simplifySymbolicDimProduct, {factor = 1, Sym = {S7}} => {factor = - // 10} - ir::SymbolicDimProduct symDimProductS7; - symDimProductS7.symbols.push_back(symDimS7); - ir::SymbolicDimProduct simplifiedProductS7 = - symDimMgr.simplifySymbolicDimProduct(symDimProductS7); - - // For check simplifySymbolicDimProductPair, X * Y * Y, Y * Y * Z => X, Z - ir::SymbolicDimProduct symDimProductPairLhs; - ir::SymbolicDimProduct symDimProductPairRhs; - ir::SymbolicDimProduct newLhs, newRhs; - symDimProductPairLhs.symbols.push_back(symDimS4); - symDimProductPairLhs.symbols.push_back(symDimS1); - symDimProductPairLhs.symbols.push_back(symDimS2); - symDimProductPairRhs.symbols.push_back(symDimS1); - symDimProductPairRhs.symbols.push_back(symDimS2); - symDimProductPairRhs.symbols.push_back(symDimS3); - - std::tie(newLhs, newRhs) = symDimMgr.simplifySymbolicDimProductPair( - symDimProductPairLhs, symDimProductPairRhs); - - // For check symbolicDimProductDivide, {S4 * S1 * C20} / {S1 * C10} => {factor - // = 2 Sym = {S4}} - ir::SymbolicDimProduct symDimProductDivLhs; - ir::SymbolicDimProduct symDimProductDivRhs; - symDimProductDivLhs.symbols.push_back(symDimS4); - symDimProductDivLhs.symbols.push_back(symDimS1); - symDimProductDivLhs.symbols.push_back(symDimC20); - symDimProductDivRhs.symbols.push_back(symDimS1); - symDimProductDivRhs.symbols.push_back(symDimC10); - - ir::SymbolicDimProduct *divRes = symDimMgr.symbolicDimProductDivide( - symDimProductDivLhs, symDimProductDivRhs); - - EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS1, symDimS2)); - EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS0, symDimS3)); - EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS4, symDimS5)); - EXPECT_EQ(symDimS6.getValue(), 200); - EXPECT_EQ(symDimMgr.symbolTable().lookup("C20"), - symDimC20); - EXPECT_EQ(symDimS7.getValue(), symDimC10.getValue()); - EXPECT_EQ(simplifiedProductS7.factor, 10); - EXPECT_EQ(simplifiedProductS7.symbols.size(), static_cast(0)); - EXPECT_EQ(newLhs.symbols.size(), static_cast(1)); - EXPECT_EQ(newRhs.symbols.size(), static_cast(1)); - EXPECT_EQ(newLhs.symbols[0], symDimMgr.getRootSymbolicDim(symDimS4)); - EXPECT_EQ(newRhs.symbols[0], symDimMgr.getRootSymbolicDim(symDimS3)); - EXPECT_EQ(divRes->factor, 2); - EXPECT_EQ(divRes->symbols.size(), static_cast(1)); - EXPECT_EQ(divRes->symbols[0], symDimMgr.getRootSymbolicDim(symDimS4)); - EXPECT_TRUE( - symDimMgr.isSymbolicDimProductEqual(symDimProductLhs, symDimProductRhs)); - EXPECT_TRUE(symDimMgr.isSymbolicDimProductEqual(symDimProductLhs_, - symDimProductRhs_)); -} - -TEST(assist_struct_test, dim) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Program program(ctx); - ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); - - ir::dialect::DimOp dimOp = builder.Build("S0"); - ir::OpResult res = dimOp.out(); - EXPECT_EQ(dimOp.getName(), "S0"); - dimOp.setName("S1"); - EXPECT_EQ(dimOp.getName(), "S1"); - EXPECT_EQ(res.GetDefiningOp(), dimOp.operation()); - EXPECT_EQ(res.type(), ir::IndexType::get(ctx)); -} - -TEST(assist_struct_test, tie_product_equal) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Program program(ctx); - ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); - ir::SymbolTable symbolTable(program.module_op()); - - ir::OpResult dimOp0 = builder.Build("S0").out(); - ir::OpResult dimOp1 = builder.Build("S1").out(); - ir::OpResult dimOp2 = builder.Build("S2").out(); - ir::OpResult dimOp3 = builder.Build("S3").out(); - ir::OpResult dimOp4 = builder.Build("S4").out(); - - ir::dialect::TieProductEqualOp tie_product_equal = - builder.Build( - 2, - 3, - std::vector{dimOp0, dimOp1, dimOp2, dimOp3, dimOp4}); - - std::vector lhs = tie_product_equal.getLhs(); - std::vector rhs = tie_product_equal.getRhs(); - - std::vector lhs_ref{dimOp0, dimOp1}; - std::vector rhs_ref{dimOp2, dimOp3, dimOp4}; - - EXPECT_EQ(symbolTable.insert(tie_product_equal), "tie_product_equal"); - EXPECT_EQ( - symbolTable.lookup("tie_product_equal") - .size(), - static_cast(1)); - EXPECT_EQ(symbolTable.lookup( - "tie_product_equal")[0], - tie_product_equal); - EXPECT_EQ(lhs, lhs_ref); - EXPECT_EQ(rhs, rhs_ref); -} diff --git a/test/cpp/new_executor/CMakeLists.txt b/test/cpp/new_executor/CMakeLists.txt index 6ce941d701b4a..d6d9b37f5bb14 100644 --- a/test/cpp/new_executor/CMakeLists.txt +++ b/test/cpp/new_executor/CMakeLists.txt @@ -4,7 +4,7 @@ if(NOT WIN32) cc_test( standalone_executor_new_ir_test SRCS standalone_executor_new_ir_test.cc - DEPS phi_kernel_adaptor pd_dialect pd_kernel_dialect ir) + DEPS phi_kernel_adaptor pd_op_dialect pd_kernel_dialect pir) endif() set(OPS diff --git a/test/cpp/new_executor/standalone_executor_new_ir_test.cc b/test/cpp/new_executor/standalone_executor_new_ir_test.cc index 9ef1d7f3215fb..b865dc61d1c4a 100644 --- a/test/cpp/new_executor/standalone_executor_new_ir_test.cc +++ b/test/cpp/new_executor/standalone_executor_new_ir_test.cc @@ -23,14 +23,14 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/fluid/framework/new_executor/new_ir_interpreter.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" #include "paddle/fluid/platform/init_phi.h" @@ -48,12 +48,12 @@ namespace paddle { namespace framework { TEST(StandaloneExecutor, run) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::Program program((ctx)); - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); + pir::Builder builder = pir::Builder(ctx, program.block()); paddle::dialect::FullOp op1 = builder.Build( std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -96,10 +96,10 @@ TEST(StandaloneExecutor, run) { } TEST(StandaloneExecutor, run_inplace_sqrt) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::Program program((ctx)); - ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::Program program((ctx)); + ctx->GetOrRegisterDialect(); + pir::Builder builder = pir::Builder(ctx, program.block()); paddle::dialect::FullOp full = builder.Build( std::vector{2, 2}, 4.0, phi::DataType::FLOAT32, phi::CPUPlace()); diff --git a/test/cpp/ir/CMakeLists.txt b/test/cpp/pir/CMakeLists.txt similarity index 100% rename from test/cpp/ir/CMakeLists.txt rename to test/cpp/pir/CMakeLists.txt diff --git a/test/cpp/ir/cinn/CMakeLists.txt b/test/cpp/pir/cinn/CMakeLists.txt similarity index 93% rename from test/cpp/ir/cinn/CMakeLists.txt rename to test/cpp/pir/cinn/CMakeLists.txt index 360b6fc29ac81..a3b51965f0459 100644 --- a/test/cpp/ir/cinn/CMakeLists.txt +++ b/test/cpp/pir/cinn/CMakeLists.txt @@ -6,8 +6,8 @@ if(WITH_TESTING AND WITH_CINN) DEPS new_ir_compiler convert_to_dialect - runtime_dialect - ir + cinn_runtime_dialect + pir phi gtest glog) diff --git a/test/cpp/ir/cinn/jit_instruction_test.cc b/test/cpp/pir/cinn/jit_instruction_test.cc similarity index 77% rename from test/cpp/ir/cinn/jit_instruction_test.cc rename to test/cpp/pir/cinn/jit_instruction_test.cc index 9b6f5b2c20800..456b1719b2f65 100644 --- a/test/cpp/ir/cinn/jit_instruction_test.cc +++ b/test/cpp/pir/cinn/jit_instruction_test.cc @@ -22,22 +22,22 @@ #include "paddle/fluid/framework/new_executor/interpretercore.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h" #include "paddle/cinn/hlir/framework/convert_to_dialect.h" #include "paddle/cinn/hlir/framework/new_ir_compiler.h" #include "paddle/cinn/utils/data_util.h" -std::unique_ptr<::ir::Program> BuildProgram() { - ::ir::IrContext* ctx = ::ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - auto program = std::make_unique<::ir::Program>(ctx); - ::ir::Builder builder = ::ir::Builder(ctx, program->block()); +std::unique_ptr<::pir::Program> BuildProgram() { + ::pir::IrContext* ctx = ::pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + auto program = std::make_unique<::pir::Program>(ctx); + ::pir::Builder builder = ::pir::Builder(ctx, program->block()); const float value = 2.0; auto full_op_x = @@ -58,11 +58,11 @@ namespace paddle { namespace framework { TEST(CinnJitInstruction, Run) { - // Step 1: Construct ir::Program - std::unique_ptr<::ir::Program> program = BuildProgram(); + // Step 1: Construct pir::Program + std::unique_ptr<::pir::Program> program = BuildProgram(); EXPECT_EQ(program->block()->size(), 2u); - // Step 2: Compiler New ir::Program into Runtime Program + // Step 2: Compiler New pir::Program into Runtime Program auto target = cinn::common::DefaultNVGPUTarget(); auto scope = cinn::hlir::framework::BuildScope(target, *program); ASSERT_EQ(scope->var_names().size(), 2); @@ -71,7 +71,7 @@ TEST(CinnJitInstruction, Run) { auto runtime_program = ir_compiler.Build(); // Step 3: Convert into cinn::dialect::RuntimeDialect - std::unique_ptr<::ir::Program> ir_runtime_program = + std::unique_ptr<::pir::Program> ir_runtime_program = cinn::hlir::framework::ConvertToRuntimeDialect(*runtime_program); std::set out_names; diff --git a/test/cpp/ir/cinn/new_ir_compiler_test.cc b/test/cpp/pir/cinn/new_ir_compiler_test.cc similarity index 77% rename from test/cpp/ir/cinn/new_ir_compiler_test.cc rename to test/cpp/pir/cinn/new_ir_compiler_test.cc index 91fb7cb13cc09..283e415a38130 100644 --- a/test/cpp/ir/cinn/new_ir_compiler_test.cc +++ b/test/cpp/pir/cinn/new_ir_compiler_test.cc @@ -20,15 +20,15 @@ #include #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" #include "paddle/cinn/utils/data_util.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/jit_kernel_op.h" -#include "paddle/cinn/hlir/dialect/runtime_dialect/ir/runtime_dialect.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/jit_kernel_op.h" +#include "paddle/cinn/hlir/dialect/runtime/ir/runtime_dialect.h" #include "paddle/cinn/hlir/framework/convert_to_dialect.h" #include "paddle/cinn/hlir/framework/new_ir_compiler.h" @@ -36,12 +36,12 @@ using cinn::hlir::framework::newir::Group; using cinn::hlir::framework::newir::GroupPtr; using ProgramInfo = - std::tuple, std::vector>; + std::tuple, std::vector>; ProgramInfo BuildProgram() { - ::ir::IrContext* ctx = ::ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - auto program = std::make_shared<::ir::Program>(ctx); - ::ir::Builder builder = ::ir::Builder(ctx, program->block()); + ::pir::IrContext* ctx = ::pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + auto program = std::make_shared<::pir::Program>(ctx); + ::pir::Builder builder = ::pir::Builder(ctx, program->block()); const float value_one = 1.0; // relu(tan(1.)) = 1.5; const float value_two = 2.0; // relu(tan(2.)) = 0. @@ -64,23 +64,23 @@ ProgramInfo BuildProgram() { std::vector groups; groups.emplace_back( - std::make_shared(std::initializer_list<::ir::Operation*>( + std::make_shared(std::initializer_list<::pir::Operation*>( {full_op_x.operation()}))); // For coverage groups.emplace_back(std::make_shared( - std::initializer_list<::ir::Operation*>({full_op_y.operation()}))); + std::initializer_list<::pir::Operation*>({full_op_y.operation()}))); groups.emplace_back(std::make_shared( - std::vector<::ir::Operation*>({tan_op_x.operation(), - relu_op_x.operation(), - tan_op_y.operation(), - relu_op_y.operation()}))); + std::vector<::pir::Operation*>({tan_op_x.operation(), + relu_op_x.operation(), + tan_op_y.operation(), + relu_op_y.operation()}))); return {program, groups}; } TEST(NewIRCompier, CompilerAndRun) { - // Step 1: Construct ir::Program + // Step 1: Construct pir::Program auto prog_info = BuildProgram(); - std::shared_ptr<::ir::Program> program = std::get<0>(prog_info); + std::shared_ptr<::pir::Program> program = std::get<0>(prog_info); EXPECT_EQ(program->block()->size(), 6u); LOG(INFO) << program->block()->size(); @@ -88,7 +88,7 @@ TEST(NewIRCompier, CompilerAndRun) { program->Print(ss); LOG(INFO) << ss.str(); - // Step 2: Compiler New ir::Program into Runtime Program + // Step 2: Compiler New pir::Program into Runtime Program auto target = cinn::common::DefaultNVGPUTarget(); auto scope = cinn::hlir::framework::BuildScope(target, *program); ASSERT_EQ(scope->var_names().size(), 6); @@ -109,9 +109,9 @@ TEST(NewIRCompier, CompilerAndRun) { } TEST(NewIRCompier, CompileGroupOps) { - // Step 1: Construct ir::Program + // Step 1: Construct pir::Program auto prog_info = BuildProgram(); - std::shared_ptr<::ir::Program> program = std::get<0>(prog_info); + std::shared_ptr<::pir::Program> program = std::get<0>(prog_info); std::vector groups = std::get<1>(prog_info); EXPECT_EQ(program->block()->size(), 6u); LOG(INFO) << program->block()->size(); @@ -120,7 +120,7 @@ TEST(NewIRCompier, CompileGroupOps) { program->Print(ss); LOG(INFO) << ss.str(); - // Step 2: Compiler New ir::Program into Runtime Program + // Step 2: Compiler New pir::Program into Runtime Program auto target = cinn::common::DefaultNVGPUTarget(); auto scope = cinn::hlir::framework::BuildScope(target, *program); ASSERT_EQ(scope->var_names().size(), 6); @@ -141,12 +141,12 @@ TEST(NewIRCompier, CompileGroupOps) { } TEST(RuntimeDialect, CompilerAndRun) { - // Step 1: Construct ir::Program + // Step 1: Construct pir::Program auto prog_info = BuildProgram(); - std::shared_ptr<::ir::Program> program = std::get<0>(prog_info); + std::shared_ptr<::pir::Program> program = std::get<0>(prog_info); EXPECT_EQ(program->block()->size(), 6u); - // Step 2: Compiler New ir::Program into Runtime Program + // Step 2: Compiler New pir::Program into Runtime Program auto target = cinn::common::DefaultNVGPUTarget(); auto scope = cinn::hlir::framework::BuildScope(target, *program); ASSERT_EQ(scope->var_names().size(), 6u); @@ -155,7 +155,7 @@ TEST(RuntimeDialect, CompilerAndRun) { auto runtime_program = ir_compiler.Build(); // Step 3: Convert into cinn::dialect::RuntimeDialect - std::shared_ptr<::ir::Program> ir_runtime_program = + std::shared_ptr<::pir::Program> ir_runtime_program = cinn::hlir::framework::ConvertToRuntimeDialect(*runtime_program); // Step 4: Run cinn::dialect::RuntimeDialect diff --git a/test/cpp/ir/control_flow_dialect/CMakeLists.txt b/test/cpp/pir/control_flow_dialect/CMakeLists.txt similarity index 74% rename from test/cpp/ir/control_flow_dialect/CMakeLists.txt rename to test/cpp/pir/control_flow_dialect/CMakeLists.txt index 5f2a864f9942e..fa6b0a5ae7fca 100644 --- a/test/cpp/ir/control_flow_dialect/CMakeLists.txt +++ b/test/cpp/pir/control_flow_dialect/CMakeLists.txt @@ -3,6 +3,6 @@ cc_test_old( SRCS if_op_test.cc DEPS - ir - pd_dialect + pir + pd_op_dialect gtest) diff --git a/test/cpp/ir/control_flow_dialect/if_op_test.cc b/test/cpp/pir/control_flow_dialect/if_op_test.cc similarity index 56% rename from test/cpp/ir/control_flow_dialect/if_op_test.cc rename to test/cpp/pir/control_flow_dialect/if_op_test.cc index 8d0d962b5e791..f4a7a7790866d 100644 --- a/test/cpp/ir/control_flow_dialect/if_op_test.cc +++ b/test/cpp/pir/control_flow_dialect/if_op_test.cc @@ -14,45 +14,45 @@ #include #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/dialect/control_flow/ir/cf_dialect.h" -#include "paddle/ir/dialect/control_flow/ir/cf_ops.h" +#include "paddle/fluid/pir/dialect/operator/ir/manual_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/dialect/control_flow/ir/cf_dialect.h" +#include "paddle/pir/dialect/control_flow/ir/cf_ops.h" TEST(if_op_test, base) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); - ir::Program program(ctx); - ir::Block* block = program.block(); - ir::Builder builder(ctx, block); + pir::Program program(ctx); + pir::Block* block = program.block(); + pir::Builder builder(ctx, block); auto full_op = builder.Build( std::vector{1}, true, phi::DataType::BOOL); auto if_op = builder.Build( - full_op.out(), std::vector{builder.bool_type()}); + full_op.out(), std::vector{builder.bool_type()}); - ir::Block* true_block = if_op.true_block(); + pir::Block* true_block = if_op.true_block(); builder.SetInsertionPointToStart(true_block); auto full_op_1 = builder.Build( std::vector{2}, true, phi::DataType::BOOL); - builder.Build(std::vector{full_op_1.out()}); + builder.Build(std::vector{full_op_1.out()}); - ir::Block* false_block = if_op.false_block(); + pir::Block* false_block = if_op.false_block(); builder.SetInsertionPointToStart(false_block); auto full_op_2 = builder.Build( std::vector{3}, true, phi::DataType::BOOL); - builder.Build(std::vector{full_op_2.out()}); + builder.Build(std::vector{full_op_2.out()}); std::stringstream ss; program.Print(ss); diff --git a/test/cpp/ir/core/CMakeLists.txt b/test/cpp/pir/core/CMakeLists.txt similarity index 71% rename from test/cpp/ir/core/CMakeLists.txt rename to test/cpp/pir/core/CMakeLists.txt index 14ea9dc13725d..b57a9891e63aa 100644 --- a/test/cpp/ir/core/CMakeLists.txt +++ b/test/cpp/pir/core/CMakeLists.txt @@ -3,21 +3,21 @@ cc_test_old( SRCS type_test.cc DEPS - ir + pir gtest - pd_dialect) -cc_test_old(ir_attribute_test SRCS ir_attribute_test.cc DEPS ir gtest) -cc_test_old(ir_value_test SRCS ir_value_test.cc DEPS ir gtest) -cc_test_old(ir_op_test SRCS ir_op_test.cc DEPS ir gtest) -cc_test_old(ir_region_test SRCS ir_region_test.cc DEPS ir gtest) -cc_test_old(ir_builder_test SRCS ir_builder_test.cc DEPS ir gtest) + pd_op_dialect) +cc_test_old(ir_attribute_test SRCS ir_attribute_test.cc DEPS pir gtest) +cc_test_old(ir_value_test SRCS ir_value_test.cc DEPS pir gtest) +cc_test_old(ir_op_test SRCS ir_op_test.cc DEPS pir gtest) +cc_test_old(ir_region_test SRCS ir_region_test.cc DEPS pir gtest) +cc_test_old(ir_builder_test SRCS ir_builder_test.cc DEPS pir gtest) cc_test_old( ir_program_test SRCS ir_program_test.cc DEPS - pd_dialect - ir + pd_op_dialect + pir phi gtest) @@ -26,8 +26,8 @@ cc_test_old( SRCS ir_infershape_test.cc DEPS - pd_dialect - ir + pd_op_dialect + pir phi gtest) @@ -38,10 +38,10 @@ cc_test_old( DEPS pd_op_to_kernel_pass program_translator - pd_dialect + pd_op_dialect pd_kernel_dialect phi_kernel_adaptor - ir + pir phi gtest) @@ -50,8 +50,8 @@ cc_test_old( SRCS scalar_attribute_test.cc DEPS - pd_dialect - ir + pd_op_dialect + pir gtest) file( @@ -75,8 +75,8 @@ cc_test_old( DEPS program_translator gtest - pd_dialect - ir) + pd_op_dialect + pir) cc_test_old( add_dialect_parser_test @@ -84,8 +84,8 @@ cc_test_old( add_dialect_parser_test.cc DEPS gtest - pd_dialect - ir) + pd_op_dialect + pir) cc_test_old( ir_parser_test @@ -93,19 +93,19 @@ cc_test_old( ir_parser_test.cc DEPS gtest - pd_dialect - ir) + pd_op_dialect + pir) -cc_test_old(ir_op_info_test SRCS op_info_test.cc DEPS gtest ir) +cc_test_old(ir_op_info_test SRCS op_info_test.cc DEPS gtest pir) cc_test_old( ir_op_yaml_info_parser_test SRCS op_yaml_info_parser_test.cc DEPS gtest - pd_dialect + pd_op_dialect pd_interface - ir) + pir) cc_test_old( ir_type_converter_test @@ -114,8 +114,8 @@ cc_test_old( DEPS gtest program_translator - pd_dialect - ir) + pd_op_dialect + pir) cc_test_old( block_operand_test @@ -124,4 +124,4 @@ cc_test_old( DEPS test_dialect gtest - ir) + pir) diff --git a/test/cpp/pir/core/TestParserText.txt b/test/cpp/pir/core/TestParserText.txt new file mode 100644 index 0000000000000..95c26c61501d1 --- /dev/null +++ b/test/cpp/pir/core/TestParserText.txt @@ -0,0 +1,43 @@ + +//CHECK attribute +(String)sdfgs.sdsd + +//CHECK type +f32 + +//CHECK type +pd_op.tensor<256xf32> + +//CHECK program +{ + (%0) = "builtin.get_parameter" () {parameter_name:(String)conv2d_0.w_0} : () -> pd_op.tensor<64x3x7x7xf32> + (%1) = "pd_op.feed" () {col:(Int32)0,is_persisable:(Array)[false],name:(String)data,stop_gradient:(Array)[true]} : () -> pd_op.tensor<-1x3x224x224xf32> + (%2) = "pd_op.conv2d" (%1, %0) {data_format:(String)NCHW,dilations:(Array)[(Int32)1,(Int32)1],groups:(Int32)1,is_persisable:(Array)[false],padding_algorithm:(String)EXPLICIT,paddings:(Array)[(Int32)3,(Int32)3],stop_gradient:(Array)[false],strides:(Array)[(Int32)2,(Int32)2]} : (pd_op.tensor<-1x3x224x224xf32>, pd_op.tensor<64x3x7x7xf32>) -> pd_op.tensor<-1x64x112x112xf32> +} + +//CHECK attribute +(Array)[(pd_op.DataType)bool,(pd_op.DataType)float32,(pd_op.DataType)float64, +(pd_op.DataType)complex64,(pd_op.DataType)complex128,(pd_op.DataType)Undefined, +(pd_op.DataType)Undefined,(pd_op.DataType)Undefined,(pd_op.DataType)Undefined, +(pd_op.DataType)bfloat16,(pd_op.DataType)uint8,(pd_op.DataType)uint32,(pd_op.DataType)int8, +(pd_op.DataType)uint16,(pd_op.DataType)int16,(pd_op.DataType)int32,(pd_op.DataType)uint64,(pd_op.DataType)int64] + + +//CHECK attribute +(Array)[(pd_op.Place)Place(gpu:0),(pd_op.Place)Place(gpu_pinned),(pd_op.Place)Place(gpu_pinned), +(pd_op.Place)Place(xpu:0),(pd_op.Place)Place(ipu:0),(pd_op.Place)Place(:0),(pd_op.Place)Place(cpu)] + + +//CHECK attribute +(Array)[(pd_op.DataLayout)NHWC,(pd_op.DataLayout)STRIDED,(pd_op.DataLayout)NCHW,(pd_op.DataLayout)Undefined(AnyLayout), +(pd_op.DataLayout)ONEDNN,(pd_op.DataLayout)SPARSE_COO,(pd_op.DataLayout)SPARSE_CSR,(pd_op.DataLayout)NDHWC,(pd_op.DataLayout)NCDHW, +(pd_op.DataLayout)PSTRING_UNION] + +//CHECK attribute +(Array)[(Double)1,(Int64)0,(String)1] + +//CHECK type +vec[bf16,f64,b,i8,u8,i16,c64,c128] + +//CHECK attribute +(String)1 diff --git a/test/cpp/ir/core/add_dialect_parser_test.cc b/test/cpp/pir/core/add_dialect_parser_test.cc similarity index 64% rename from test/cpp/ir/core/add_dialect_parser_test.cc rename to test/cpp/pir/core/add_dialect_parser_test.cc index 9bc39bb8d967c..7ab160e2595d6 100644 --- a/test/cpp/ir/core/add_dialect_parser_test.cc +++ b/test/cpp/pir/core/add_dialect_parser_test.cc @@ -18,27 +18,27 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/ir_adaptor/translator/translate.h" -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_attribute_storage.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_parser.h" -#include "paddle/ir/core/utils.h" - -using PaddleDialect = paddle::dialect::PaddleDialect; -using AttributeStorage = ir::AttributeStorage; - -class TestParserDialect : public ir::Dialect { +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_attribute_storage.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_parser.h" +#include "paddle/pir/core/utils.h" + +using OperatorDialect = paddle::dialect::OperatorDialect; +using AttributeStorage = pir::AttributeStorage; + +class TestParserDialect : public pir::Dialect { public: - explicit TestParserDialect(ir::IrContext* context); + explicit TestParserDialect(pir::IrContext* context); static const char* name() { return "tp"; } - void PrintAttribute(ir::Attribute attr, std::ostream& os) const; + void PrintAttribute(pir::Attribute attr, std::ostream& os) const; - ir::Attribute ParseAttribute(ir::IrParser& parser); // NOLINT + pir::Attribute ParseAttribute(pir::IrParser& parser); // NOLINT private: void initialize(); @@ -49,7 +49,7 @@ IR_DEFINE_EXPLICIT_TYPE_ID(TestParserDialect); DECLARE_BASE_TYPE_ATTRIBUTE_STORAGE(CharAttributeStorage, char); -class CharAttribute : public ir::Attribute { +class CharAttribute : public pir::Attribute { public: using Attribute::Attribute; @@ -57,7 +57,7 @@ class CharAttribute : public ir::Attribute { char data() const; - static CharAttribute Parse(ir::IrParser& parser) { // NOLINT + static CharAttribute Parse(pir::IrParser& parser) { // NOLINT std::string char_val = parser.ConsumeToken().val_; return CharAttribute::get(parser.ctx, char_val[0]); } @@ -71,19 +71,19 @@ void TestParserDialect::initialize() { RegisterAttributes(); } char CharAttribute::data() const { return storage()->data(); } -TestParserDialect::TestParserDialect(ir::IrContext* context) - : ir::Dialect(name(), context, ir::TypeId::get()) { +TestParserDialect::TestParserDialect(pir::IrContext* context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } -void TestParserDialect::PrintAttribute(ir::Attribute attr, +void TestParserDialect::PrintAttribute(pir::Attribute attr, std::ostream& os) const { auto byte_attr = attr.dyn_cast(); os << "(tp.char)" << byte_attr.data(); } -ir::Attribute TestParserDialect::ParseAttribute( - ir::IrParser& parser) { // NOLINT +pir::Attribute TestParserDialect::ParseAttribute( + pir::IrParser& parser) { // NOLINT std::string type_name = parser.ConsumeToken().val_; std::string parenthesis_token_val = parser.ConsumeToken().val_; IR_ENFORCE(parenthesis_token_val == ")", @@ -93,19 +93,19 @@ ir::Attribute TestParserDialect::ParseAttribute( } TEST(IrParserTest, AddAttribute) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); ctx->GetOrRegisterDialect(); std::string op_str = " (%0) = \"builtin.get_parameter\" () " "{parameter_name:(String)conv2d_0.w_0,test:(tp.char)a} : () -> " - "pd.tensor<64x3x7x7xf32>"; + "pd_op.tensor<64x3x7x7xf32>"; std::stringstream ss; ss << op_str; - ir::IrParser* parser = new ir::IrParser(ctx, ss); - ir::Operation* op = parser->ParseOperation(); + pir::IrParser* parser = new pir::IrParser(ctx, ss); + pir::Operation* op = parser->ParseOperation(); std::stringstream ssp; op->Print(ssp); delete parser; diff --git a/test/cpp/ir/core/block_operand_test.cc b/test/cpp/pir/core/block_operand_test.cc similarity index 73% rename from test/cpp/ir/core/block_operand_test.cc rename to test/cpp/pir/core/block_operand_test.cc index f2b74e9781a3f..eac4cc302e439 100644 --- a/test/cpp/ir/core/block_operand_test.cc +++ b/test/cpp/pir/core/block_operand_test.cc @@ -14,34 +14,34 @@ #include -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/program.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/program.h" -#include "test/cpp/ir/tools/test_dialect.h" -#include "test/cpp/ir/tools/test_op.h" +#include "test/cpp/pir/tools/test_dialect.h" +#include "test/cpp/pir/tools/test_op.h" TEST(block_operand_test, type_block) { - ir::IrContext ctx; + pir::IrContext ctx; ctx.GetOrRegisterDialect(); - ir::Program program(&ctx); - ir::Block* block = program.block(); + pir::Program program(&ctx); + pir::Block* block = program.block(); - ir::Builder builder(&ctx, block); + pir::Builder builder(&ctx, block); test::RegionOp region_op = builder.Build(); auto& region = region_op->region(0); - ir::Block* block_1 = new ir::Block(); - ir::Block* block_2 = new ir::Block(); - ir::Block* block_3 = new ir::Block(); + pir::Block* block_1 = new pir::Block(); + pir::Block* block_2 = new pir::Block(); + pir::Block* block_3 = new pir::Block(); region.push_back(block_1); region.push_back(block_2); region.push_back(block_3); builder.SetInsertionPointToEnd(block_1); auto op1 = - builder.Build(std::vector{}, block_2); + builder.Build(std::vector{}, block_2); EXPECT_TRUE(block_2->HasOneUse()); EXPECT_FALSE(block_2->use_empty()); @@ -56,7 +56,7 @@ TEST(block_operand_test, type_block) { builder.SetInsertionPointToEnd(block_3); auto op3 = - builder.Build(std::vector{}, block_1); + builder.Build(std::vector{}, block_1); block_operand = op3->block_operand(0); block_operand.set_source(block_2); EXPECT_EQ(block_2, block_operand.source()); diff --git a/test/cpp/ir/core/ir_attribute_test.cc b/test/cpp/pir/core/ir_attribute_test.cc similarity index 51% rename from test/cpp/ir/core/ir_attribute_test.cc rename to test/cpp/pir/core/ir_attribute_test.cc index 291b64a7233cb..1242e19b50faa 100644 --- a/test/cpp/ir/core/ir_attribute_test.cc +++ b/test/cpp/pir/core/ir_attribute_test.cc @@ -15,21 +15,21 @@ #include #include -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" class AttributeA {}; IR_DECLARE_EXPLICIT_TYPE_ID(AttributeA) IR_DEFINE_EXPLICIT_TYPE_ID(AttributeA) -struct FakeDialect : ir::Dialect { - explicit FakeDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) {} +struct FakeDialect : pir::Dialect { + explicit FakeDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) {} static const char *name() { return "fake"; } }; IR_DECLARE_EXPLICIT_TYPE_ID(FakeDialect) @@ -37,37 +37,37 @@ IR_DEFINE_EXPLICIT_TYPE_ID(FakeDialect) TEST(attribute_test, attribute_base) { // Test 1: Test the function of IrContext to register Dialect. - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Dialect *fake_dialect = ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Dialect *fake_dialect = ctx->GetOrRegisterDialect(); // Test 2: Test the get method of AbstractType. - ir::TypeId a_id = ir::TypeId::get(); - ir::AbstractAttribute abstract_attribute_a = - ir::AbstractAttribute::get(a_id, *fake_dialect); + pir::TypeId a_id = pir::TypeId::get(); + pir::AbstractAttribute abstract_attribute_a = + pir::AbstractAttribute::get(a_id, *fake_dialect); EXPECT_EQ(abstract_attribute_a.type_id(), a_id); // Test 3: Test the constructor of AbstractStorage. - ir::AttributeStorage storage_a(&abstract_attribute_a); + pir::AttributeStorage storage_a(&abstract_attribute_a); EXPECT_EQ(storage_a.abstract_attribute().type_id(), abstract_attribute_a.type_id()); } TEST(attribute_test, built_in_attribute) { - ir::IrContext *ctx = ir::IrContext::Instance(); + pir::IrContext *ctx = pir::IrContext::Instance(); // Test 1: Test the parametric built-in attribute of IrContext. std::string str_tmp = "string_a"; - ir::Attribute string_attr_1 = ir::StrAttribute::get(ctx, str_tmp); - ir::Attribute string_attr_2 = ir::StrAttribute::get(ctx, str_tmp); + pir::Attribute string_attr_1 = pir::StrAttribute::get(ctx, str_tmp); + pir::Attribute string_attr_2 = pir::StrAttribute::get(ctx, str_tmp); EXPECT_EQ(string_attr_1, string_attr_2); - EXPECT_EQ(ir::StrAttribute::classof(string_attr_1), 1); + EXPECT_EQ(pir::StrAttribute::classof(string_attr_1), 1); // Test 2: Test isa and dyn_cast. - EXPECT_EQ(string_attr_1.isa(), true); - ir::StrAttribute string_attr_cast_1 = - string_attr_1.dyn_cast(); - EXPECT_EQ(string_attr_cast_1.isa(), true); + EXPECT_EQ(string_attr_1.isa(), true); + pir::StrAttribute string_attr_cast_1 = + string_attr_1.dyn_cast(); + EXPECT_EQ(string_attr_cast_1.isa(), true); EXPECT_EQ(string_attr_cast_1.size() == 8, 1); - ir::Int32Type i32_type = ir::Int32Type::get(ctx); - ir::Attribute type_attr = ir::TypeAttribute::get(ctx, i32_type); - EXPECT_TRUE(type_attr.isa()); - EXPECT_EQ(type_attr.dyn_cast().data().type_id(), + pir::Int32Type i32_type = pir::Int32Type::get(ctx); + pir::Attribute type_attr = pir::TypeAttribute::get(ctx, i32_type); + EXPECT_TRUE(type_attr.isa()); + EXPECT_EQ(type_attr.dyn_cast().data().type_id(), i32_type.type_id()); } diff --git a/test/cpp/pir/core/ir_builder_test.cc b/test/cpp/pir/core/ir_builder_test.cc new file mode 100644 index 0000000000000..e3705d08c7ef9 --- /dev/null +++ b/test/cpp/pir/core/ir_builder_test.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" + +TEST(builder_test, type_api) { + pir::IrContext ctx; + pir::Builder builder(&ctx); + EXPECT_EQ(pir::UInt8Type::get(&ctx), builder.uint8_type()); + EXPECT_EQ(pir::Int8Type::get(&ctx), builder.int8_type()); + EXPECT_EQ(pir::VectorType::get(&ctx, std::vector()), + builder.vec_type({})); + EXPECT_EQ(pir::BFloat16Type::get(&ctx), builder.bfloat16_type()); + EXPECT_EQ(pir::Float32Type::get(&ctx), builder.float32_type()); + EXPECT_EQ(pir::Float64Type::get(&ctx), builder.float64_type()); + EXPECT_EQ(pir::IndexType::get(&ctx), builder.index_type()); + EXPECT_EQ(pir::Int16Type::get(&ctx), builder.int16_type()); + EXPECT_EQ(pir::BoolType::get(&ctx), builder.bool_type()); + EXPECT_EQ(pir::Complex64Type::get(&ctx), builder.complex64_type()); + EXPECT_EQ(pir::Complex128Type::get(&ctx), builder.complex128_type()); +} + +TEST(builder_test, attribute_api) { + pir::IrContext ctx; + pir::Builder builder(&ctx); + EXPECT_EQ(pir::StrAttribute::get(&ctx, "test"), builder.str_attr("test")); + EXPECT_EQ(pir::BoolAttribute::get(&ctx, true), builder.bool_attr(true)); + EXPECT_EQ(pir::FloatAttribute::get(&ctx, 0.2f), builder.float_attr(0.2f)); + EXPECT_EQ(pir::DoubleAttribute::get(&ctx, 2.0), builder.double_attr(2.0)); + EXPECT_EQ(pir::Int32Attribute::get(&ctx, 2), builder.int32_attr(2)); + EXPECT_EQ(pir::Int64Attribute::get(&ctx, 2), builder.int64_attr(2)); + EXPECT_EQ(pir::ArrayAttribute::get(&ctx, std::vector()), + builder.array_attr({})); + EXPECT_EQ(pir::PointerAttribute::get(&ctx, nullptr), + builder.pointer_attr(nullptr)); +} diff --git a/test/cpp/ir/core/ir_exe_test.cc b/test/cpp/pir/core/ir_exe_test.cc similarity index 71% rename from test/cpp/ir/core/ir_exe_test.cc rename to test/cpp/pir/core/ir_exe_test.cc index e36c99fb2e4b1..6ce9d39172a20 100644 --- a/test/cpp/ir/core/ir_exe_test.cc +++ b/test/cpp/pir/core/ir_exe_test.cc @@ -14,20 +14,20 @@ #include -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_util.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/infermeta/binary.h" #include "paddle/phi/kernels/elementwise_add_kernel.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" @@ -40,12 +40,12 @@ #include "paddle/fluid/platform/init.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" -#include "paddle/ir/core/attribute.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_adaptor.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/pir/core/attribute.h" PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full_int_array, CPU, ALL_LAYOUT); @@ -56,32 +56,33 @@ bool simple_cmp(float a, float b) { return std::abs((a - b) / a) < 1e-5; } TEST(program_test, program) { // Prepare ir env - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program(ctx); - ir::Builder builder(ctx, program.block()); - ir::Block* block = program.block(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program(ctx); + pir::Builder builder(ctx, program.block()); + pir::Block* block = program.block(); // Def: A = paddle::dialect::UniformOp(std::vector shape, // phi::DataType dtype, float min, float max, int seed, phi::Place place) - ir::AttributeMap uniform1_attributes; + pir::AttributeMap uniform1_attributes; uniform1_attributes.insert({"shape", paddle::dialect::IntArrayAttribute::get( - ir::IrContext::Instance(), + pir::IrContext::Instance(), phi::IntArray(std::vector{2, 2}))}); uniform1_attributes.insert( {"dtype", - paddle::dialect::DataTypeAttribute::get(ir::IrContext::Instance(), + paddle::dialect::DataTypeAttribute::get(pir::IrContext::Instance(), phi::DataType::FLOAT32)}); uniform1_attributes.insert( - {"min", ir::FloatAttribute::get(ir::IrContext::Instance(), 0.0)}); + {"min", pir::FloatAttribute::get(pir::IrContext::Instance(), 0.0)}); uniform1_attributes.insert( - {"max", ir::FloatAttribute::get(ir::IrContext::Instance(), 1.0)}); + {"max", pir::FloatAttribute::get(pir::IrContext::Instance(), 1.0)}); uniform1_attributes.insert( - {"seed", ir::Int32Attribute::get(ir::IrContext::Instance(), 2)}); - uniform1_attributes.insert({"place", - paddle::dialect::PlaceAttribute::get( - ir::IrContext::Instance(), phi::CPUPlace())}); + {"seed", pir::Int32Attribute::get(pir::IrContext::Instance(), 2)}); + uniform1_attributes.insert( + {"place", + paddle::dialect::PlaceAttribute::get(pir::IrContext::Instance(), + phi::CPUPlace())}); paddle::dialect::UniformOp uniform1 = builder.Build(uniform1_attributes); @@ -89,10 +90,11 @@ TEST(program_test, program) { true); EXPECT_EQ(block->size(), 4u); - ir::Attribute seed_attr = uniform1.attribute("seed"); - ir::Int32Attribute seed_attr1 = - uniform1.attribute("seed"); - EXPECT_EQ(seed_attr.dyn_cast().data(), seed_attr1.data()); + pir::Attribute seed_attr = uniform1.attribute("seed"); + pir::Int32Attribute seed_attr1 = + uniform1.attribute("seed"); + EXPECT_EQ(seed_attr.dyn_cast().data(), + seed_attr1.data()); // Def: B = paddle::dialect::UniformOp(...) paddle::dialect::UniformOp uniform2 = @@ -106,7 +108,7 @@ TEST(program_test, program) { true); EXPECT_EQ(block->size(), 8u); - // Def: C = paddle::dialect::AddOp(ir::OpResult x_, ir::OpResult y_) + // Def: C = paddle::dialect::AddOp(pir::OpResult x_, pir::OpResult y_) paddle::dialect::AddOp add = builder.Build( uniform1->result(0), uniform2->result(0)); EXPECT_EQ(add->result(0).type().isa(), @@ -135,28 +137,28 @@ TEST(program_test, program) { TEST(program_test, mutable_attribute) { // Prepare ir env - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program(ctx); - ir::Builder builder = ir::Builder(ctx, program.block()); - ir::Block* block = program.block(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program(ctx); + pir::Builder builder = pir::Builder(ctx, program.block()); + pir::Block* block = program.block(); // Def FullOp paddle::dialect::FullIntArrayOp full_shape_op = builder.Build( std::vector{2, 2}, phi::DataType::INT64, phi::CPUPlace()); - ir::OpResult shape_ = full_shape_op->result(0); + pir::OpResult shape_ = full_shape_op->result(0); // Generate scalar mutable attribute: min paddle::dialect::FullOp full_min_op = builder.Build( std::vector{1}, 0.0, phi::DataType::FLOAT32, phi::CPUPlace()); - ir::OpResult min_ = full_min_op->result(0); + pir::OpResult min_ = full_min_op->result(0); // Generate scalar mutable attribute: max paddle::dialect::FullOp full_max_op = builder.Build( std::vector{1}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); - ir::OpResult max_ = full_max_op->result(0); + pir::OpResult max_ = full_max_op->result(0); - // Def: static void Build(ir::Builder &builder, ir::OperationArgument - // &argument, ir::OpResult shape_, ir::OpResult min_, ir::OpResult max_, + // Def: static void Build(pir::Builder &builder, pir::OperationArgument + // &argument, pir::OpResult shape_, pir::OpResult min_, pir::OpResult max_, // phi::DataType dtype, int seed, phi::Place place={}); paddle::dialect::UniformOp uniform1 = builder.Build( @@ -173,7 +175,7 @@ TEST(program_test, mutable_attribute) { true); EXPECT_EQ(block->size(), 5u); - // Def: C = paddle::dialect::AddOp(ir::OpResult x_, ir::OpResult y_) + // Def: C = paddle::dialect::AddOp(pir::OpResult x_, pir::OpResult y_) paddle::dialect::AddOp add = builder.Build( uniform1->result(0), uniform2->result(0)); EXPECT_EQ(add->result(0).type().isa(), diff --git a/test/cpp/ir/core/ir_infershape_test.cc b/test/cpp/pir/core/ir_infershape_test.cc similarity index 72% rename from test/cpp/ir/core/ir_infershape_test.cc rename to test/cpp/pir/core/ir_infershape_test.cc index e39a69ac573f9..589e3b87bebe0 100644 --- a/test/cpp/ir/core/ir_infershape_test.cc +++ b/test/cpp/pir/core/ir_infershape_test.cc @@ -14,14 +14,14 @@ #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/op_base.h" -#include "paddle/ir/core/region.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/op_base.h" +#include "paddle/pir/core/region.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" @@ -32,14 +32,14 @@ #include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_factory.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" #include "paddle/fluid/platform/init.h" #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/nullary.h" // Define op class OperationTest - : public ir::Op { + : public pir::Op { public: using Op::Op; static const char *name() { return "test.operation2"; } @@ -59,10 +59,10 @@ const char *OperationTest::attributes_name[attributes_num] = { // NOLINT "op2_attr2"}; // Define a dialect, op1 and op2 will be registered by this dialect. -class TestDialect : public ir::Dialect { +class TestDialect : public pir::Dialect { public: - explicit TestDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { + explicit TestDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } static const char *name() { return "test"; } @@ -74,19 +74,19 @@ IR_DECLARE_EXPLICIT_TYPE_ID(TestDialect) IR_DEFINE_EXPLICIT_TYPE_ID(TestDialect) TEST(infershape_test, infershape_test) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Dialect *test_dialect = ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Dialect *test_dialect = ctx->GetOrRegisterDialect(); EXPECT_EQ(test_dialect != nullptr, true); // (2) Get registered operations. std::string op_name = OperationTest::name(); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(op_name); - std::vector op_inputs = {}; - std::vector op_output_types = {ir::Float32Type::get(ctx)}; - ir::Operation *op = - ir::Operation::Create(op_inputs, {}, op_output_types, op_info); + std::vector op_inputs = {}; + std::vector op_output_types = {pir::Float32Type::get(ctx)}; + pir::Operation *op = + pir::Operation::Create(op_inputs, {}, op_output_types, op_info); paddle::dialect::InferMetaInterface interface = op->dyn_cast(); diff --git a/test/cpp/ir/core/ir_op_test.cc b/test/cpp/pir/core/ir_op_test.cc similarity index 52% rename from test/cpp/ir/core/ir_op_test.cc rename to test/cpp/pir/core/ir_op_test.cc index 48f54c63230e0..d0c35151d6be1 100644 --- a/test/cpp/ir/core/ir_op_test.cc +++ b/test/cpp/pir/core/ir_op_test.cc @@ -15,24 +15,24 @@ #include #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/ir_printer.h" -#include "paddle/ir/core/op_base.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/region.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/ir_printer.h" +#include "paddle/pir/core/op_base.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/region.h" /// \brief Define built-in Trait, derived from OpTraitBase. -class ReadOnlyTrait : public ir::OpTraitBase { +class ReadOnlyTrait : public pir::OpTraitBase { public: - explicit ReadOnlyTrait(ir::Operation *op) - : ir::OpTraitBase(op) {} + explicit ReadOnlyTrait(pir::Operation *op) + : pir::OpTraitBase(op) {} }; IR_DECLARE_EXPLICIT_TYPE_ID(ReadOnlyTrait) IR_DEFINE_EXPLICIT_TYPE_ID(ReadOnlyTrait) @@ -41,17 +41,17 @@ IR_DEFINE_EXPLICIT_TYPE_ID(ReadOnlyTrait) /// Models need to be defined within the class. Concept defines abstract /// interface functions, and Model is a template class that defines the specific /// implementation of interface functions based on template parameters. -class InferShapeInterface : public ir::OpInterfaceBase { +class InferShapeInterface : public pir::OpInterfaceBase { public: struct Concept { - explicit Concept(void (*infer_shape)(ir::Operation *)) + explicit Concept(void (*infer_shape)(pir::Operation *)) : infer_shape_(infer_shape) {} - void (*infer_shape_)(ir::Operation *); + void (*infer_shape_)(pir::Operation *); }; template struct Model : public Concept { - static void InferShape(ir::Operation *op) { + static void InferShape(pir::Operation *op) { ConcreteOp concret_op = ConcreteOp(op); if (concret_op == nullptr) throw("concret_op is nullptr"); concret_op.InferShape(); @@ -60,8 +60,8 @@ class InferShapeInterface : public ir::OpInterfaceBase { Model() : Concept(InferShape) {} }; - InferShapeInterface(ir::Operation *op, Concept *impl) - : ir::OpInterfaceBase(op), impl_(impl) {} + InferShapeInterface(pir::Operation *op, Concept *impl) + : pir::OpInterfaceBase(op), impl_(impl) {} void InferShape() { impl_->infer_shape_(operation()); } @@ -71,20 +71,21 @@ class InferShapeInterface : public ir::OpInterfaceBase { IR_DECLARE_EXPLICIT_TYPE_ID(InferShapeInterface) IR_DEFINE_EXPLICIT_TYPE_ID(InferShapeInterface) -ir::AttributeMap CreateAttributeMap(std::vector attribute_names, - std::vector attributes) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::AttributeMap attr_map; +pir::AttributeMap CreateAttributeMap( + const std::vector &attribute_names, + const std::vector &attributes) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::AttributeMap attr_map; for (size_t i = 0; i < attribute_names.size(); i++) { - ir::Attribute attr_value = ir::StrAttribute::get(ctx, attributes[i]); + pir::Attribute attr_value = pir::StrAttribute::get(ctx, attributes[i]); attr_map.insert( - std::pair(attribute_names[i], attr_value)); + std::pair(attribute_names[i], attr_value)); } return attr_map; } // Define op1. -class Operation1 : public ir::Op { +class Operation1 : public pir::Op { public: using Op::Op; static const char *name() { return "test.operation1"; } @@ -93,20 +94,20 @@ class Operation1 : public ir::Op { void Verify() { auto &attributes = this->attributes(); if (attributes.count("op1_attr1") == 0 || - !attributes.at("op1_attr1").isa()) { + !attributes.at("op1_attr1").isa()) { throw("Type of attribute: parameter_name is not right."); } if (attributes.count("op1_attr2") == 0 || - !attributes.at("op1_attr2").isa()) { + !attributes.at("op1_attr2").isa()) { throw("Type of attribute: parameter_name is not right."); } } - static void Build(const ir::Builder &builder, - ir::OperationArgument &argument) { // NOLINT - std::vector inputs = {}; - std::vector output_types = { - ir::Float32Type::get(builder.ir_context())}; - std::unordered_map attributes = + static void Build(const pir::Builder &builder, + pir::OperationArgument &argument) { // NOLINT + std::vector inputs = {}; + std::vector output_types = { + pir::Float32Type::get(builder.ir_context())}; + std::unordered_map attributes = CreateAttributeMap({"op1_attr1", "op1_attr2"}, {"op1_attr1", "op1_attr2"}); argument.AddOperands(inputs.begin(), inputs.end()); @@ -123,7 +124,7 @@ IR_DEFINE_EXPLICIT_TYPE_ID(Operation1) // Define op2. class Operation2 - : public ir::Op { + : public pir::Op { public: using Op::Op; static const char *name() { return "test.operation2"; } @@ -132,11 +133,11 @@ class Operation2 void Verify() { auto &attributes = this->attributes(); if (attributes.count("op2_attr1") == 0 || - (!attributes.at("op2_attr1").isa())) { + (!attributes.at("op2_attr1").isa())) { throw("Type of attribute: parameter_name is not right."); } if (attributes.count("op2_attr2") == 0 || - (!attributes.at("op2_attr2").isa())) { + (!attributes.at("op2_attr2").isa())) { throw("Type of attribute: parameter_name is not right."); } } @@ -149,16 +150,16 @@ IR_DECLARE_EXPLICIT_TYPE_ID(Operation2) IR_DEFINE_EXPLICIT_TYPE_ID(Operation2) // Define a dialect, op1 and op2 will be registered by this dialect. -class TestDialect : public ir::Dialect { +class TestDialect : public pir::Dialect { public: - explicit TestDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { + explicit TestDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } static const char *name() { return "test"; } - void PrintOperation(ir::Operation *op, - ir::IrPrinter &printer) const override { + void PrintOperation(pir::Operation *op, + pir::IrPrinter &printer) const override { printer.PrintOpResult(op); printer.os << " ="; @@ -174,16 +175,16 @@ IR_DEFINE_EXPLICIT_TYPE_ID(TestDialect) TEST(op_test, op_test) { // (1) Register Dialect, Operation1, Operation2 into IrContext. - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Dialect *test_dialect = ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Dialect *test_dialect = ctx->GetOrRegisterDialect(); EXPECT_EQ(test_dialect != nullptr, true); // (2) Get registered operations. std::string op1_name = Operation1::name(); - ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); + pir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); EXPECT_TRUE(op1_info); std::string op2_name = Operation2::name(); - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo(op2_name); + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo(op2_name); EXPECT_TRUE(op2_info); EXPECT_EQ(op1_info.HasTrait(), false); EXPECT_EQ(op1_info.HasInterface(), false); @@ -191,14 +192,14 @@ TEST(op_test, op_test) { EXPECT_EQ(op2_info.HasInterface(), true); // (3) Test uses for op. - std::vector op_inputs = {}; - std::vector op_output_types = {ir::Float32Type::get(ctx)}; - ir::Operation *op2 = - ir::Operation::Create(op_inputs, - CreateAttributeMap({"op2_attr1", "op2_attr2"}, - {"op2_attr1", "op2_attr2"}), - op_output_types, - op2_info); + std::vector op_inputs = {}; + std::vector op_output_types = {pir::Float32Type::get(ctx)}; + pir::Operation *op2 = + pir::Operation::Create(op_inputs, + CreateAttributeMap({"op2_attr1", "op2_attr2"}, + {"op2_attr1", "op2_attr2"}), + op_output_types, + op2_info); ReadOnlyTrait trait = op2->dyn_cast(); EXPECT_EQ(trait.operation(), op2); @@ -211,37 +212,37 @@ TEST(op_test, op_test) { TEST(op_test, region_test) { // (1) Register Dialect, Operation1, Operation2 into IrContext. - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Dialect *test_dialect = ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Dialect *test_dialect = ctx->GetOrRegisterDialect(); EXPECT_EQ(test_dialect != nullptr, true); // (2) Get registered operations. - ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(Operation1::name()); - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo(Operation2::name()); - - ir::Operation *op1 = - ir::Operation::Create({}, - CreateAttributeMap({"op1_attr1", "op1_attr2"}, - {"op1_attr1", "op1_attr2"}), - {ir::Float32Type::get(ctx)}, - op1_info); - ir::Operation *op1_2 = - ir::Operation::Create({}, - CreateAttributeMap({"op1_attr1", "op1_attr2"}, - {"op1_attr1", "op1_attr2"}), - {ir::Float32Type::get(ctx)}, - op1_info); - - ir::OperationArgument argument(op2_info); + pir::OpInfo op1_info = ctx->GetRegisteredOpInfo(Operation1::name()); + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo(Operation2::name()); + + pir::Operation *op1 = + pir::Operation::Create({}, + CreateAttributeMap({"op1_attr1", "op1_attr2"}, + {"op1_attr1", "op1_attr2"}), + {pir::Float32Type::get(ctx)}, + op1_info); + pir::Operation *op1_2 = + pir::Operation::Create({}, + CreateAttributeMap({"op1_attr1", "op1_attr2"}, + {"op1_attr1", "op1_attr2"}), + {pir::Float32Type::get(ctx)}, + op1_info); + + pir::OperationArgument argument(op2_info); argument.attributes = CreateAttributeMap({"op2_attr1", "op2_attr2"}, {"op2_attr1", "op2_attr2"}); - argument.output_types = {ir::Float32Type::get(ctx)}; + argument.output_types = {pir::Float32Type::get(ctx)}; argument.num_regions = 1; - ir::Operation *op3 = ir::Operation::Create(std::move(argument)); - // argument.regions.emplace_back(std::make_unique()); + pir::Operation *op3 = pir::Operation::Create(std::move(argument)); + // argument.regions.emplace_back(std::make_unique()); - ir::Region ®ion = op3->region(0); + pir::Region ®ion = op3->region(0); EXPECT_EQ(region.empty(), true); // (3) Test custom operation printer @@ -249,35 +250,35 @@ TEST(op_test, region_test) { op1->Print(ss); EXPECT_EQ(ss.str(), " (%0) = \"test.operation1\" ()"); - region.push_back(new ir::Block()); - region.push_front(new ir::Block()); - region.insert(region.begin(), new ir::Block()); - ir::Block *block = region.front(); + region.push_back(new pir::Block()); + region.push_front(new pir::Block()); + region.insert(region.begin(), new pir::Block()); + pir::Block *block = region.front(); block->push_front(op1); block->insert(block->begin(), op1_2); op3->Destroy(); } TEST(op_test, module_op_death) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::OpInfo op_info = ctx->GetRegisteredOpInfo(ir::ModuleOp::name()); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::OpInfo op_info = ctx->GetRegisteredOpInfo(pir::ModuleOp::name()); - std::vector inputs{ir::OpResult()}; - ir::AttributeMap attrs{{"program", ir::Int32Attribute::get(ctx, 1)}}; - std::vector output_types = {ir::Float32Type::get(ctx)}; + std::vector inputs{pir::OpResult()}; + pir::AttributeMap attrs{{"program", pir::Int32Attribute::get(ctx, 1)}}; + std::vector output_types = {pir::Float32Type::get(ctx)}; - EXPECT_THROW(ir::Operation::Create(inputs, {}, {}, op_info), - ir::IrNotMetException); - EXPECT_THROW(ir::Operation::Create({}, attrs, {}, op_info), - ir::IrNotMetException); - EXPECT_THROW(ir::Operation::Create({}, {}, output_types, op_info), - ir::IrNotMetException); + EXPECT_THROW(pir::Operation::Create(inputs, {}, {}, op_info), + pir::IrNotMetException); + EXPECT_THROW(pir::Operation::Create({}, attrs, {}, op_info), + pir::IrNotMetException); + EXPECT_THROW(pir::Operation::Create({}, {}, output_types, op_info), + pir::IrNotMetException); - ir::Program program(ctx); + pir::Program program(ctx); EXPECT_EQ(program.module_op().program(), &program); EXPECT_EQ(program.module_op().ir_context(), ctx); program.module_op()->set_attribute("program", - ir::PointerAttribute::get(ctx, &program)); + pir::PointerAttribute::get(ctx, &program)); } diff --git a/test/cpp/ir/core/ir_parser_test.cc b/test/cpp/pir/core/ir_parser_test.cc similarity index 76% rename from test/cpp/ir/core/ir_parser_test.cc rename to test/cpp/pir/core/ir_parser_test.cc index 39abf960583e0..15f6f1cfd5017 100644 --- a/test/cpp/ir/core/ir_parser_test.cc +++ b/test/cpp/pir/core/ir_parser_test.cc @@ -21,18 +21,18 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/ir_adaptor/translator/translate.h" -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/attribute_base.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_attribute_storage.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_parser.h" -#include "paddle/ir/core/ir_printer.h" -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/attribute_base.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_attribute_storage.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_parser.h" +#include "paddle/pir/core/ir_printer.h" +#include "paddle/pir/core/utils.h" -using PaddleDialect = paddle::dialect::PaddleDialect; -using AttributeStorage = ir::AttributeStorage; +using OperatorDialect = paddle::dialect::OperatorDialect; +using AttributeStorage = pir::AttributeStorage; enum TestType { AttributeTest = 0, @@ -59,7 +59,7 @@ class ParserTest { public: explicit ParserTest(std::ifstream& test_text) : test_text(test_text) {} TestTask* GetTestTask(); - bool ConsumeTestTask(TestTask* test_task, ir::IrContext* ctx); + bool ConsumeTestTask(TestTask* test_task, pir::IrContext* ctx); }; TestTask* ParserTest::GetTestTask() { @@ -92,13 +92,13 @@ TestTask* ParserTest::GetTestTask() { return nullptr; } -bool ParserTest::ConsumeTestTask(TestTask* test_task, ir::IrContext* ctx) { +bool ParserTest::ConsumeTestTask(TestTask* test_task, pir::IrContext* ctx) { std::string test_info = test_task->test_info; TestType test_type = test_task->test_type; - std::unique_ptr printer; - std::unique_ptr parser; + std::unique_ptr printer; + std::unique_ptr parser; std::stringstream is(test_info); - parser.reset(new ir::IrParser(ctx, is)); + parser.reset(new pir::IrParser(ctx, is)); std::vector before_parser_tokens; while (parser->PeekToken().token_type_ != EOF_) { before_parser_tokens.push_back(parser->ConsumeToken().val_); @@ -106,16 +106,16 @@ bool ParserTest::ConsumeTestTask(TestTask* test_task, ir::IrContext* ctx) { std::stringstream is_par(test_info); std::stringstream os; if (test_type == AttributeTest) { - auto attr = ir::Attribute::Parse(is_par, ctx); + auto attr = pir::Attribute::Parse(is_par, ctx); attr.Print(os); } else if (test_type == ProgramTest) { - auto program = ir::Program::Parse(is_par, ctx); + auto program = pir::Program::Parse(is_par, ctx); program->Print(os); } else if (test_type == TypeTest) { - auto type = ir::Type::Parse(is_par, ctx); + auto type = pir::Type::Parse(is_par, ctx); type.Print(os); } - parser.reset(new ir::IrParser(ctx, os)); + parser.reset(new pir::IrParser(ctx, os)); std::vector after_parser_tokens; while (parser->PeekToken().token_type_ != EOF_) { auto str = parser->ConsumeToken().val_; @@ -136,9 +136,9 @@ bool ParserTest::ConsumeTestTask(TestTask* test_task, ir::IrContext* ctx) { } TEST(IrParserTest, TestParserByFile) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); std::ifstream is("TestParserText.txt"); EXPECT_TRUE(is.is_open()); ParserTest parser_test(is); diff --git a/test/cpp/ir/core/ir_program_test.cc b/test/cpp/pir/core/ir_program_test.cc similarity index 61% rename from test/cpp/ir/core/ir_program_test.cc rename to test/cpp/pir/core/ir_program_test.cc index c7729ae89fde8..b4a2ebc2522dc 100644 --- a/test/cpp/ir/core/ir_program_test.cc +++ b/test/cpp/pir/core/ir_program_test.cc @@ -16,37 +16,37 @@ #include -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/infermeta/binary.h" #include "paddle/phi/kernels/elementwise_add_kernel.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in -// paddle/fluid/ir/dialect/CMakeLists.txt. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/transforms/param_to_variable.h" +// paddle/fluid/pir/dialect/CMakeLists.txt. +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/fluid/pir/dialect/operator/transforms/param_to_variable.h" -class AddOp : public ir::Op { +class AddOp : public pir::Op { public: using Op::Op; static const char *name() { return "test.add"; } static constexpr const char **attributes_name = nullptr; static constexpr uint32_t attributes_num = 0; void Verify(); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult l_operand, - ir::OpResult r_operand, - ir::Type sum_type); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult l_operand, + pir::OpResult r_operand, + pir::Type sum_type); }; void AddOp::Verify() { if (num_operands() != 2) { @@ -56,11 +56,11 @@ void AddOp::Verify() { throw("The size of outputs must be equal to 1."); } } -void AddOp::Build(ir::Builder &, - ir::OperationArgument &argument, - ir::OpResult l_operand, - ir::OpResult r_operand, - ir::Type sum_type) { +void AddOp::Build(pir::Builder &, + pir::OperationArgument &argument, + pir::OpResult l_operand, + pir::OpResult r_operand, + pir::Type sum_type) { argument.AddOperand(l_operand); argument.AddOperand(r_operand); argument.AddOutput(sum_type); @@ -70,44 +70,44 @@ IR_DEFINE_EXPLICIT_TYPE_ID(AddOp) TEST(program_test, program) { // (1) Init environment. - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Dialect *builtin_dialect = - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Dialect *builtin_dialect = + ctx->GetOrRegisterDialect(); builtin_dialect->RegisterOp(); - ir::Dialect *paddle_dialect = - ctx->GetOrRegisterDialect(); + pir::Dialect *paddle_dialect = + ctx->GetOrRegisterDialect(); // (2) Create an empty program object - ir::Program program(ctx); + pir::Program program(ctx); // (3) Create a float32 DenseTensor Parameter and save into Program - ir::Type fp32_dtype = ir::Float32Type::get(ctx); + pir::Type fp32_dtype = pir::Float32Type::get(ctx); phi::DDim dims = {2, 2}; phi::DataLayout data_layout = phi::DataLayout::NCHW; phi::LoD lod = {{0, 1, 2}}; size_t offset = 0; - ir::Type dense_tensor_dtype = paddle::dialect::DenseTensorType::get( + pir::Type dense_tensor_dtype = paddle::dialect::DenseTensorType::get( ctx, fp32_dtype, dims, data_layout, lod, offset); std::vector data_a = {1, 2, 3, 4}; - std::unique_ptr parameter_a = - std::make_unique(reinterpret_cast(data_a.data()), - 4 * sizeof(float), - dense_tensor_dtype); + std::unique_ptr parameter_a = + std::make_unique(reinterpret_cast(data_a.data()), + 4 * sizeof(float), + dense_tensor_dtype); program.SetParameter("a", std::move(parameter_a)); EXPECT_EQ(program.parameters_num() == 1, true); std::vector data_b = {5, 6, 7, 8}; - std::unique_ptr parameter_b = - std::make_unique(reinterpret_cast(data_b.data()), - 4 * sizeof(float), - dense_tensor_dtype); + std::unique_ptr parameter_b = + std::make_unique(reinterpret_cast(data_b.data()), + 4 * sizeof(float), + dense_tensor_dtype); program.SetParameter("b", std::move(parameter_b)); EXPECT_EQ(program.parameters_num() == 2, true); // (4) Def a = GetParameterOp("a"), and create DenseTensor for a. - ir::Builder builder(ctx, program.block()); - auto op1 = builder.Build("a", dense_tensor_dtype); + pir::Builder builder(ctx, program.block()); + auto op1 = builder.Build("a", dense_tensor_dtype); EXPECT_EQ(&program, op1->GetParentProgram()); EXPECT_EQ(op1->result(0).type().dialect().id(), paddle_dialect->id()); @@ -128,7 +128,7 @@ TEST(program_test, program) { } // (5) Def b = GetParameterOp("b"), and create DenseTensor for b. - auto op2 = builder.Build("b", dense_tensor_dtype); + auto op2 = builder.Build("b", dense_tensor_dtype); EXPECT_EQ(op2->result(0).type().dialect().id(), paddle_dialect->id()); Interface *b_interface = @@ -175,14 +175,14 @@ TEST(program_test, program) { EXPECT_EQ(std::get<0>(interface.GetOpInfo())[0].name == "x", true); // (8) Def SetParameterOp(c, "c") - auto op4 = builder.Build(op3->result(0), "c"); + auto op4 = builder.Build(op3->result(0), "c"); EXPECT_EQ(op4->operand(0).type().dialect().id(), paddle_dialect->id()); Interface *c_interface = op4->operand(0).type().dialect().GetRegisteredInterface(); - // ir::Parameter *parameter_c = + // pir::Parameter *parameter_c = // c_interface->VariableToParameter(variable_c.get()); - std::unique_ptr parameter_c = + std::unique_ptr parameter_c = c_interface->VariableToParameter(variable_c.get()); EXPECT_EQ(parameter_c->type(), dense_tensor_dtype); for (int64_t i = 0; i < dst_tensor->numel(); i++) { @@ -206,55 +206,55 @@ TEST(program_test, program) { TEST(program_test, slice_combine_test) { // (1) Init environment. - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); // (2) Create an empty program object - ir::Program program(ctx); - // ir::Program *program = new ir::Program(); + pir::Program program(ctx); + // pir::Program *program = new pir::Program(); EXPECT_EQ(program.block()->empty(), true); // (3) Create a float32 DenseTensor Parameter and save into Program - ir::Type fp32_dtype = ir::Float32Type::get(ctx); + pir::Type fp32_dtype = pir::Float32Type::get(ctx); // (4) Def a = GetParameterOp("a") - std::string op1_name = ir::GetParameterOp::name(); - ir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); - std::unordered_map op1_attribute{ - {"parameter_name", ir::StrAttribute::get(ctx, "a")}}; - ir::Operation *op1 = - ir::Operation::Create({}, op1_attribute, {fp32_dtype}, op1_info); + std::string op1_name = pir::GetParameterOp::name(); + pir::OpInfo op1_info = ctx->GetRegisteredOpInfo(op1_name); + std::unordered_map op1_attribute{ + {"parameter_name", pir::StrAttribute::get(ctx, "a")}}; + pir::Operation *op1 = + pir::Operation::Create({}, op1_attribute, {fp32_dtype}, op1_info); program.block()->push_back(op1); // (5) Def b = Constant("b") - std::string op2_name = std::string(ir::ConstantOp::name()); - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo(op2_name); - ir::AttributeMap attr_map; - attr_map.insert(std::pair( - "value", ir::FloatAttribute::get(ctx, 2.0))); - ir::Operation *op2 = - ir::Operation::Create({}, attr_map, {fp32_dtype}, op2_info); + std::string op2_name = std::string(pir::ConstantOp::name()); + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo(op2_name); + pir::AttributeMap attr_map; + attr_map.insert(std::pair( + "value", pir::FloatAttribute::get(ctx, 2.0))); + pir::Operation *op2 = + pir::Operation::Create({}, attr_map, {fp32_dtype}, op2_info); program.block()->push_back(op2); // (6) Def combine_op = CombineOp("a", "b") - std::string combine_op_name = std::string(ir::CombineOp::name()); - ir::OpInfo combine_op_info = ctx->GetRegisteredOpInfo(combine_op_name); - ir::Type output_type = - ir::VectorType::get(ctx, std::vector({fp32_dtype, fp32_dtype})); - ir::Operation *combine_op = ir::Operation::Create( + std::string combine_op_name = std::string(pir::CombineOp::name()); + pir::OpInfo combine_op_info = ctx->GetRegisteredOpInfo(combine_op_name); + pir::Type output_type = pir::VectorType::get( + ctx, std::vector({fp32_dtype, fp32_dtype})); + pir::Operation *combine_op = pir::Operation::Create( {op1->result(0), op2->result(0)}, {}, {output_type}, combine_op_info); - ir::CombineOp combine_op_type = combine_op->dyn_cast(); + pir::CombineOp combine_op_type = combine_op->dyn_cast(); EXPECT_TRUE(combine_op_type.out()); program.block()->push_back(combine_op); // (7) Def slice_op = SliceOp(combine_op, 0) - std::string slice_op_name = std::string(ir::SliceOp::name()); - ir::OpInfo slice_op_info = ctx->GetRegisteredOpInfo(slice_op_name); - ir::Attribute index_attr = ir::Int32Attribute::get(ctx, 0); - ir::Operation *slice_op = ir::Operation::Create({combine_op->result(0)}, - {{"index", index_attr}}, - {fp32_dtype}, - slice_op_info); + std::string slice_op_name = std::string(pir::SliceOp::name()); + pir::OpInfo slice_op_info = ctx->GetRegisteredOpInfo(slice_op_name); + pir::Attribute index_attr = pir::Int32Attribute::get(ctx, 0); + pir::Operation *slice_op = pir::Operation::Create({combine_op->result(0)}, + {{"index", index_attr}}, + {fp32_dtype}, + slice_op_info); program.block()->push_back(slice_op); // (8) Traverse Program @@ -262,14 +262,14 @@ TEST(program_test, slice_combine_test) { } TEST(program_test, builder) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program(ctx); - ir::Builder builder = ir::Builder(ctx, program.block()); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program(ctx); + pir::Builder builder = pir::Builder(ctx, program.block()); paddle::dialect::FullOp full_op = builder.Build( std::vector{2, 2}, 1.5, phi::DataType::FLOAT32, phi::CPUPlace()); - ir::Type full_op_output = full_op->result(0).type(); + pir::Type full_op_output = full_op->result(0).type(); EXPECT_EQ(program.block()->size(), 1u); EXPECT_EQ(program.block()->back(), full_op.operation()); EXPECT_EQ(full_op.num_operands(), 0u); @@ -284,8 +284,8 @@ TEST(program_test, builder) { EXPECT_EQ(dim == 2, true); } - ir::ConstantOp constant = builder.Build( - ir::Int32Attribute::get(ctx, 2), ir::Int32Type::get(ctx)); + pir::ConstantOp constant = builder.Build( + pir::Int32Attribute::get(ctx, 2), pir::Int32Type::get(ctx)); EXPECT_EQ(program.block()->size() == 2, true); - EXPECT_EQ(constant.value().dyn_cast().data() == 2, true); + EXPECT_EQ(constant.value().dyn_cast().data() == 2, true); } diff --git a/test/cpp/ir/core/ir_region_test.cc b/test/cpp/pir/core/ir_region_test.cc similarity index 50% rename from test/cpp/ir/core/ir_region_test.cc rename to test/cpp/pir/core/ir_region_test.cc index c14e9da420901..ea829ea4b7639 100644 --- a/test/cpp/ir/core/ir_region_test.cc +++ b/test/cpp/pir/core/ir_region_test.cc @@ -14,41 +14,43 @@ #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" TEST(region, erase_op_test) { // (1) Init environment. - ir::IrContext* ctx = ir::IrContext::Instance(); + pir::IrContext* ctx = pir::IrContext::Instance(); // (2) Create an empty program object - ir::Program program(ctx); - ir::Builder builder = ir::Builder(ctx, program.block()); + pir::Program program(ctx); + pir::Builder builder = pir::Builder(ctx, program.block()); // (3) Def a = ConstantOp("2.0"); b = ConstantOp("2.0"); - ir::FloatAttribute fp_attr = builder.float_attr(2.0f); - ir::Float32Type fp32_type = builder.float32_type(); - ir::OpResult a = builder.Build(fp_attr, fp32_type)->result(0); - ir::OpResult b = builder.Build(fp_attr, fp32_type)->result(0); + pir::FloatAttribute fp_attr = builder.float_attr(2.0f); + pir::Float32Type fp32_type = builder.float32_type(); + pir::OpResult a = + builder.Build(fp_attr, fp32_type)->result(0); + pir::OpResult b = + builder.Build(fp_attr, fp32_type)->result(0); // (6) Def c = CombineOp(a, b) - builder.Build(std::vector{a, b}); + builder.Build(std::vector{a, b}); - // Test ir::Block::erase - ir::Block* block = program.block(); + // Test pir::Block::erase + pir::Block* block = program.block(); EXPECT_EQ(block->size(), 3u); block->erase(*(block->back())); EXPECT_EQ(block->size(), 2u); - // Test ir::Region::erase - ir::Region& region = program.module_op()->region(0); - region.push_back(new ir::Block()); + // Test pir::Region::erase + pir::Region& region = program.module_op()->region(0); + region.push_back(new pir::Block()); EXPECT_EQ(region.size(), 2u); region.erase(region.begin()); EXPECT_EQ(region.size(), 1u); diff --git a/test/cpp/ir/core/ir_type_converter_test.cc b/test/cpp/pir/core/ir_type_converter_test.cc similarity index 63% rename from test/cpp/ir/core/ir_type_converter_test.cc rename to test/cpp/pir/core/ir_type_converter_test.cc index 4370d79d5b6d5..d5c77d7550d2b 100644 --- a/test/cpp/ir/core/ir_type_converter_test.cc +++ b/test/cpp/pir/core/ir_type_converter_test.cc @@ -18,19 +18,19 @@ #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/var_desc.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" #include "paddle/fluid/ir_adaptor/translator/type_translator.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/type.h" template void test_parameterless_type() { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); - ir::Type type = IR_TYPE::get(ctx); + pir::Type type = IR_TYPE::get(ctx); std::stringstream ss; ss << type; EXPECT_GT(ss.str().size(), 0u); @@ -41,7 +41,7 @@ void test_parameterless_type() { auto& type_translator = paddle::translator::TypeTranslator::instance(); paddle::framework::VarDesc empty_var_desc("empty"); auto proto_type = paddle::framework::TransToProtoVarType(phi_type); - ir::Type final_type = type_translator[proto_type](ctx, empty_var_desc); + pir::Type final_type = type_translator[proto_type](ctx, empty_var_desc); EXPECT_EQ(type, final_type); } @@ -52,25 +52,25 @@ void test_parameterless_type_helper() { } TEST(TypeConverterTest, paramterless_type) { - test_parameterless_type_helper(); + test_parameterless_type_helper(); } void test_index_type() { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); - ir::Type type = ir::IndexType::get(ctx); + pir::Type type = pir::IndexType::get(ctx); std::stringstream ss; ss << type; EXPECT_GT(ss.str().size(), 0u); @@ -80,7 +80,7 @@ void test_index_type() { auto& type_translator = paddle::translator::TypeTranslator::instance(); paddle::framework::VarDesc empty_var_desc("empty"); auto proto_type = paddle::framework::TransToProtoVarType(phi_type); - ir::Type final_type = type_translator[proto_type](ctx, empty_var_desc); + pir::Type final_type = type_translator[proto_type](ctx, empty_var_desc); EXPECT_EQ(paddle::dialect::TransToIrDataType(phi_type), final_type); } diff --git a/test/cpp/ir/core/ir_value_test.cc b/test/cpp/pir/core/ir_value_test.cc similarity index 51% rename from test/cpp/ir/core/ir_value_test.cc rename to test/cpp/pir/core/ir_value_test.cc index fb7fcfd6fdda1..cd8a299e59df5 100644 --- a/test/cpp/ir/core/ir_value_test.cc +++ b/test/cpp/pir/core/ir_value_test.cc @@ -14,73 +14,73 @@ #include -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/operation.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/operation.h" // This unittest is used to test the construction interfaces of value class and // operation. The constructed test scenario is: a = OP1(); b = OP2(); c = OP3(a, // b); d, e, f, g, h, i, j = OP4(a, c); -ir::AttributeMap CreateAttributeMap(std::string attribute_name, - std::string attribute) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Attribute attr_value = ir::StrAttribute::get(ctx, attribute); - ir::AttributeMap attr_map; +pir::AttributeMap CreateAttributeMap(std::string attribute_name, + std::string attribute) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Attribute attr_value = pir::StrAttribute::get(ctx, attribute); + pir::AttributeMap attr_map; attr_map.insert( - std::pair(attribute_name, attr_value)); + std::pair(attribute_name, attr_value)); return attr_map; } TEST(value_test, value_test) { - ir::IrContext *ctx = ir::IrContext::Instance(); + pir::IrContext *ctx = pir::IrContext::Instance(); // 1. Construct OP1: a = OP1() - std::vector op1_inputs = {}; - std::vector op1_output_types = {ir::Float32Type::get(ctx)}; - ir::Operation *op1 = - ir::Operation::Create(op1_inputs, - CreateAttributeMap("op1_name", "op1_attr"), - op1_output_types, - ir::OpInfo()); + std::vector op1_inputs = {}; + std::vector op1_output_types = {pir::Float32Type::get(ctx)}; + pir::Operation *op1 = + pir::Operation::Create(op1_inputs, + CreateAttributeMap("op1_name", "op1_attr"), + op1_output_types, + pir::OpInfo()); op1->Print(std::cout); - ir::OpResult a = op1->result(0); + pir::OpResult a = op1->result(0); EXPECT_TRUE(a.use_empty()); // 2. Construct OP2: b = OP2(); - std::vector op2_inputs = {}; - std::vector op2_output_types = {ir::Float32Type::get(ctx)}; - ir::Operation *op2 = - ir::Operation::Create(op2_inputs, - CreateAttributeMap("op2_name", "op2_attr"), - op2_output_types, - ir::OpInfo()); + std::vector op2_inputs = {}; + std::vector op2_output_types = {pir::Float32Type::get(ctx)}; + pir::Operation *op2 = + pir::Operation::Create(op2_inputs, + CreateAttributeMap("op2_name", "op2_attr"), + op2_output_types, + pir::OpInfo()); op2->Print(std::cout); - ir::OpResult b = op2->result(0); + pir::OpResult b = op2->result(0); EXPECT_TRUE(b.use_empty()); // 3. Construct OP3: c = OP3(a, b); - std::vector op3_inputs{a, b}; - std::vector op3_output_types = {ir::Float32Type::get(ctx)}; - ir::Operation *op3 = - ir::Operation::Create(op3_inputs, - CreateAttributeMap("op3_name", "op3_attr"), - op3_output_types, - ir::OpInfo()); + std::vector op3_inputs{a, b}; + std::vector op3_output_types = {pir::Float32Type::get(ctx)}; + pir::Operation *op3 = + pir::Operation::Create(op3_inputs, + CreateAttributeMap("op3_name", "op3_attr"), + op3_output_types, + pir::OpInfo()); EXPECT_TRUE(op1->result(0).HasOneUse()); EXPECT_TRUE(op2->result(0).HasOneUse()); op3->Print(std::cout); - ir::OpResult c = op3->result(0); + pir::OpResult c = op3->result(0); // 4. Construct OP4: d, e, f, g, h, i, j = OP4(a, c); - std::vector op4_inputs = {a, c}; - std::vector op4_output_types; + std::vector op4_inputs = {a, c}; + std::vector op4_output_types; for (size_t i = 0; i < 7; i++) { - op4_output_types.push_back(ir::Float32Type::get(ctx)); + op4_output_types.push_back(pir::Float32Type::get(ctx)); } - ir::Operation *op4 = - ir::Operation::Create(op4_inputs, - CreateAttributeMap("op4_name", "op4_attr"), - op4_output_types, - ir::OpInfo()); + pir::Operation *op4 = + pir::Operation::Create(op4_inputs, + CreateAttributeMap("op4_name", "op4_attr"), + op4_output_types, + pir::OpInfo()); op4->Print(std::cout); // Test 1: @@ -90,17 +90,17 @@ TEST(value_test, value_test) { EXPECT_EQ(op4->result(6).GetDefiningOp(), op4); // Test 2: op1_first_output -> op4_first_input - ir::OpResult op1_first_output = op1->result(0); - ir::OpOperand op4_first_input = op4->operand(0); + pir::OpResult op1_first_output = op1->result(0); + pir::OpOperand op4_first_input = op4->operand(0); EXPECT_EQ(op1_first_output.first_use(), op4_first_input); - ir::OpOperand op3_first_input = op3->operand(0); + pir::OpOperand op3_first_input = op3->operand(0); EXPECT_EQ(op4_first_input.next_use(), op3_first_input); EXPECT_EQ(op3_first_input.next_use(), nullptr); // Test 3: Value iterator - using my_iterator = ir::Value::UseIterator; + using my_iterator = pir::Value::UseIterator; my_iterator iter = op1->result(0).use_begin(); EXPECT_EQ(iter.owner(), op4); ++iter; @@ -109,7 +109,7 @@ TEST(value_test, value_test) { // Test 4: Value Replace Use // a = OP1(); b = OP2(); c = OP3(a, b); d, e, f, g, h, i, j = OP4(a, c); // - c.ReplaceUsesWithIf(b, [](ir::OpOperand) { return true; }); + c.ReplaceUsesWithIf(b, [](pir::OpOperand) { return true; }); EXPECT_EQ(op4->operand_source(1), b); EXPECT_TRUE(c.use_empty()); diff --git a/test/cpp/ir/core/op_info_test.cc b/test/cpp/pir/core/op_info_test.cc similarity index 51% rename from test/cpp/ir/core/op_info_test.cc rename to test/cpp/pir/core/op_info_test.cc index 3e91f357daf6a..d02566237876a 100644 --- a/test/cpp/ir/core/op_info_test.cc +++ b/test/cpp/pir/core/op_info_test.cc @@ -14,33 +14,33 @@ #include -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/verify.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/verify.h" TEST(ir_op_info_test, op_op_info_test) { - ir::IrContext* context = ir::IrContext::Instance(); - ir::Program program(context); + pir::IrContext* context = pir::IrContext::Instance(); + pir::Program program(context); - ir::Block* block = program.block(); - ir::Builder builder(context, block); - builder.Build(ir::Int32Attribute::get(context, 5), - ir::Int32Type::get(context)); + pir::Block* block = program.block(); + pir::Builder builder(context, block); + builder.Build(pir::Int32Attribute::get(context, 5), + pir::Int32Type::get(context)); - ir::Operation* op = block->back(); + pir::Operation* op = block->back(); - EXPECT_EQ(block->end(), ++ir::Block::iterator(*op)); + EXPECT_EQ(block->end(), ++pir::Block::iterator(*op)); auto& info_map = context->registered_op_info_map(); EXPECT_FALSE(info_map.empty()); void* info_1 = op->info().AsOpaquePointer(); - auto info_2 = ir::OpInfo::RecoverFromOpaquePointer(info_1); + auto info_2 = pir::OpInfo::RecoverFromOpaquePointer(info_1); EXPECT_EQ(op->info(), info_2); - ir::Verify(program.module_op()); + pir::Verify(program.module_op()); } diff --git a/test/cpp/ir/core/op_yaml_info_parser_test.cc b/test/cpp/pir/core/op_yaml_info_parser_test.cc similarity index 61% rename from test/cpp/ir/core/op_yaml_info_parser_test.cc rename to test/cpp/pir/core/op_yaml_info_parser_test.cc index 3abdf0a72cd30..d269400fb13e3 100644 --- a/test/cpp/ir/core/op_yaml_info_parser_test.cc +++ b/test/cpp/pir/core/op_yaml_info_parser_test.cc @@ -14,30 +14,30 @@ #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_parser.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/op_yaml_info_parser.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/utils.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" TEST(ir_op_info_test, op_op_info_test) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::Program program(ctx); + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::Program program(ctx); - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); - ir::Builder builder(ctx, program.block()); + pir::Builder builder(ctx, program.block()); auto uniform1 = builder.Build(std::vector{2, 2}, @@ -55,7 +55,7 @@ TEST(ir_op_info_test, op_op_info_test) { paddle::dialect::OpYamlInfoParser op_yaml_info_parser(op_info_res); - EXPECT_EQ((op_yaml_info_parser.AttrTypeName("seed") == "ir::Int32Attribute"), + EXPECT_EQ((op_yaml_info_parser.AttrTypeName("seed") == "pir::Int32Attribute"), true); EXPECT_EQ(op_yaml_info_parser.IsTensorAttribute(0), true); diff --git a/test/cpp/ir/core/program_translator_test.cc b/test/cpp/pir/core/program_translator_test.cc similarity index 75% rename from test/cpp/ir/core/program_translator_test.cc rename to test/cpp/pir/core/program_translator_test.cc index 0441860ed1d7c..114ad1961a8b2 100644 --- a/test/cpp/ir/core/program_translator_test.cc +++ b/test/cpp/pir/core/program_translator_test.cc @@ -25,17 +25,17 @@ #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" #include "paddle/fluid/ir_adaptor/translator/translate.h" #include "paddle/fluid/ir_adaptor/translator/utils.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/ir_parser.h" -#include "paddle/ir/core/ir_printer.h" -#include "paddle/ir/core/program.h" - -using PaddleDialect = paddle::dialect::PaddleDialect; +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/ir_parser.h" +#include "paddle/pir/core/ir_printer.h" +#include "paddle/pir/core/program.h" + +using OperatorDialect = paddle::dialect::OperatorDialect; using ProgramDesc = paddle::framework::ProgramDesc; using BlockDesc = paddle::framework::BlockDesc; using OpDesc = paddle::framework::OpDesc; @@ -53,13 +53,13 @@ ProgramDesc load_from_file(const std::string &file_name) { return ProgramDesc(buffer); } -TEST(PaddleDialectTest, MainProgram) { +TEST(OperatorDialectTest, MainProgram) { auto p = load_from_file("resnet50_main.prog"); EXPECT_EQ(p.Size(), 1u); - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); auto program = paddle::TranslateLegacyProgramToProgram(p); std::stringstream ss; @@ -72,13 +72,13 @@ TEST(PaddleDialectTest, MainProgram) { EXPECT_GT(ss.str().size(), 0u); } -TEST(PaddleDialectTest, StartupProgram) { +TEST(OperatorDialectTest, StartupProgram) { auto p = load_from_file("resnet50_startup.prog"); EXPECT_EQ(p.Size(), 1u); - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); auto program = paddle::TranslateLegacyProgramToProgram(p); size_t op_size = program->block()->size(); @@ -94,7 +94,7 @@ TEST(PaddleDialectTest, StartupProgram) { TEST(RegisterInfoTest, MainProgram) { auto p = load_from_file("resnet50_startup.prog"); - ir::IrContext *ctx = ir::IrContext::Instance(); + pir::IrContext *ctx = pir::IrContext::Instance(); auto unregistered_ops = paddle::translator::CheckUnregisteredOperation(ctx, p); @@ -113,14 +113,14 @@ TEST(RegisterInfoTest, MainProgram) { TEST(IrParserTest, MainProgram) { auto p = load_from_file("resnet50_main.prog"); EXPECT_EQ(p.Size(), 1u); - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); auto program = paddle::TranslateLegacyProgramToProgram(p); std::stringstream ss; program->Print(ss); - std::unique_ptr parser_program = ir::Program::Parse(ss, ctx); + std::unique_ptr parser_program = pir::Program::Parse(ss, ctx); std::stringstream ssp; parser_program->Print(ssp); @@ -130,14 +130,14 @@ TEST(IrParserTest, MainProgram) { TEST(IrParserTest, StartupProgram) { auto p = load_from_file("resnet50_startup.prog"); EXPECT_EQ(p.Size(), 1u); - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); auto program = paddle::TranslateLegacyProgramToProgram(p); std::stringstream ss; program->Print(ss); - std::unique_ptr parser_program = ir::Program::Parse(ss, ctx); + std::unique_ptr parser_program = pir::Program::Parse(ss, ctx); std::stringstream ssp; parser_program->Print(ssp); diff --git a/test/cpp/ir/core/scalar_attribute_test.cc b/test/cpp/pir/core/scalar_attribute_test.cc similarity index 54% rename from test/cpp/ir/core/scalar_attribute_test.cc rename to test/cpp/pir/core/scalar_attribute_test.cc index 649d9cd0cd3d2..e15ebfad84585 100644 --- a/test/cpp/ir/core/scalar_attribute_test.cc +++ b/test/cpp/pir/core/scalar_attribute_test.cc @@ -14,42 +14,42 @@ #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/ir/core/attribute.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/pir/core/attribute.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" using ScalarAttribute = paddle::dialect::ScalarAttribute; TEST(ScalarTest, base) { - ir::IrContext *ctx = ir::IrContext::Instance(); + pir::IrContext *ctx = pir::IrContext::Instance(); - ir::Attribute bool_scalar = ir::BoolAttribute::get(ctx, false); + pir::Attribute bool_scalar = pir::BoolAttribute::get(ctx, false); EXPECT_TRUE(bool_scalar.isa()); - EXPECT_TRUE(bool_scalar.isa()); - ir::BoolAttribute pure_bool = bool_scalar.dyn_cast(); + EXPECT_TRUE(bool_scalar.isa()); + pir::BoolAttribute pure_bool = bool_scalar.dyn_cast(); EXPECT_TRUE(pure_bool.isa()); ScalarAttribute scalar_from_bool = bool_scalar.dyn_cast(); - EXPECT_TRUE(scalar_from_bool.isa()); - EXPECT_NO_THROW(scalar_from_bool.dyn_cast()); + EXPECT_TRUE(scalar_from_bool.isa()); + EXPECT_NO_THROW(scalar_from_bool.dyn_cast()); } TEST(ScalarTest, test_classof) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Attribute bool_scalar = ir::BoolAttribute::get(ctx, false); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Attribute bool_scalar = pir::BoolAttribute::get(ctx, false); EXPECT_TRUE(bool_scalar.isa()); - ir::Attribute float_scalar = ir::FloatAttribute::get(ctx, 1.0f); + pir::Attribute float_scalar = pir::FloatAttribute::get(ctx, 1.0f); EXPECT_TRUE(float_scalar.isa()); - ir::Attribute double_scalar = ir::DoubleAttribute::get(ctx, 1.0); + pir::Attribute double_scalar = pir::DoubleAttribute::get(ctx, 1.0); EXPECT_TRUE(double_scalar.isa()); - ir::Attribute int32_scalar = ir::Int32Attribute::get(ctx, 1); + pir::Attribute int32_scalar = pir::Int32Attribute::get(ctx, 1); EXPECT_TRUE(int32_scalar.isa()); - ir::Attribute int64_scalar = ir::Int64Attribute::get(ctx, 1l); + pir::Attribute int64_scalar = pir::Int64Attribute::get(ctx, 1l); EXPECT_TRUE(int64_scalar.isa()); } diff --git a/test/cpp/ir/core/type_test.cc b/test/cpp/pir/core/type_test.cc similarity index 51% rename from test/cpp/ir/core/type_test.cc rename to test/cpp/pir/core/type_test.cc index 2def5aa3d1741..45aca1d784202 100644 --- a/test/cpp/ir/core/type_test.cc +++ b/test/cpp/pir/core/type_test.cc @@ -15,16 +15,16 @@ #include #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/type.h" -#include "paddle/ir/core/type_base.h" -#include "paddle/ir/core/type_name.h" -#include "paddle/ir/core/utils.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/type.h" +#include "paddle/pir/core/type_base.h" +#include "paddle/pir/core/type_name.h" +#include "paddle/pir/core/utils.h" class TypeA {}; IR_DECLARE_EXPLICIT_TYPE_ID(TypeA) @@ -37,14 +37,14 @@ IR_DEFINE_EXPLICIT_TYPE_ID(TypeB) TEST(type_test, type_id) { // Test 1: Test construct TypeId by TypeId::get() and overloaded operator== // method. - ir::TypeId a_id = ir::TypeId::get(); - ir::TypeId a_other_id = ir::TypeId::get(); - ir::TypeId b_id = ir::TypeId::get(); + pir::TypeId a_id = pir::TypeId::get(); + pir::TypeId a_other_id = pir::TypeId::get(); + pir::TypeId b_id = pir::TypeId::get(); EXPECT_EQ(a_id, a_other_id); EXPECT_NE(a_id, b_id); // Test 2: Test the hash function of TypeId. - std::unordered_map type_id_register; + std::unordered_map type_id_register; type_id_register.emplace(a_id, &a_id); type_id_register.emplace(b_id, &b_id); for (auto kv : type_id_register) { @@ -53,9 +53,9 @@ TEST(type_test, type_id) { } // Define a FakeDialect without registering any types. -struct FakeDialect : ir::Dialect { - explicit FakeDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) {} +struct FakeDialect : pir::Dialect { + explicit FakeDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) {} static const char *name() { return "fake"; } }; IR_DECLARE_EXPLICIT_TYPE_ID(FakeDialect) @@ -63,114 +63,115 @@ IR_DEFINE_EXPLICIT_TYPE_ID(FakeDialect) TEST(type_test, type_base) { // Test 1: Test the function of IrContext to register Dialect. - ir::IrContext *ctx = ir::IrContext::Instance(); - ir::Dialect *fake_dialect = ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Dialect *fake_dialect = ctx->GetOrRegisterDialect(); // Test 2: Test the get method of AbstractType. - ir::TypeId a_id = ir::TypeId::get(); - ir::AbstractType abstract_type_a = ir::AbstractType::get(a_id, *fake_dialect); + pir::TypeId a_id = pir::TypeId::get(); + pir::AbstractType abstract_type_a = + pir::AbstractType::get(a_id, *fake_dialect); EXPECT_EQ(abstract_type_a.type_id(), a_id); // Test 3: Test the constructor of TypeStorage. - ir::TypeStorage storage_a(&abstract_type_a); + pir::TypeStorage storage_a(&abstract_type_a); EXPECT_EQ(storage_a.abstract_type().type_id(), abstract_type_a.type_id()); } TEST(type_test, built_in_type) { // Test the interfaces of class Type: judgment, type_id, abstract_type, // classof. - ir::IrContext *ctx = ir::IrContext::Instance(); + pir::IrContext *ctx = pir::IrContext::Instance(); // Test 1: Test the parameterless built-in type of IrContext. - ir::Type bfp16_1 = ir::BFloat16Type::get(ctx); - ir::Type bfp16_2 = ir::BFloat16Type::get(ctx); + pir::Type bfp16_1 = pir::BFloat16Type::get(ctx); + pir::Type bfp16_2 = pir::BFloat16Type::get(ctx); EXPECT_EQ(bfp16_1, bfp16_2); EXPECT_EQ(bfp16_1.type_id(), bfp16_2.type_id()); EXPECT_EQ(&bfp16_1.abstract_type(), - &ir::AbstractType::lookup(bfp16_1.type_id(), ctx)); - EXPECT_EQ(ir::BFloat16Type::classof(bfp16_1), 1); + &pir::AbstractType::lookup(bfp16_1.type_id(), ctx)); + EXPECT_EQ(pir::BFloat16Type::classof(bfp16_1), 1); - ir::Type index_1 = ir::IndexType::get(ctx); - ir::Type index_2 = ir::IndexType::get(ctx); + pir::Type index_1 = pir::IndexType::get(ctx); + pir::Type index_2 = pir::IndexType::get(ctx); EXPECT_EQ(index_1, index_2); EXPECT_EQ(index_1.type_id(), index_2.type_id()); EXPECT_EQ(&index_1.abstract_type(), - &ir::AbstractType::lookup(index_1.type_id(), ctx)); - EXPECT_EQ(ir::IndexType::classof(index_1), 1); + &pir::AbstractType::lookup(index_1.type_id(), ctx)); + EXPECT_EQ(pir::IndexType::classof(index_1), 1); - ir::Type fp16_1 = ir::Float16Type::get(ctx); - ir::Type fp16_2 = ir::Float16Type::get(ctx); + pir::Type fp16_1 = pir::Float16Type::get(ctx); + pir::Type fp16_2 = pir::Float16Type::get(ctx); EXPECT_EQ(fp16_1, fp16_2); EXPECT_EQ(fp16_1.type_id(), fp16_2.type_id()); EXPECT_EQ(&fp16_1.abstract_type(), - &ir::AbstractType::lookup(fp16_1.type_id(), ctx)); - EXPECT_EQ(ir::Float16Type::classof(fp16_1), 1); + &pir::AbstractType::lookup(fp16_1.type_id(), ctx)); + EXPECT_EQ(pir::Float16Type::classof(fp16_1), 1); - ir::Type fp32_1 = ir::Float32Type::get(ctx); - ir::Type fp32_2 = ir::Float32Type::get(ctx); + pir::Type fp32_1 = pir::Float32Type::get(ctx); + pir::Type fp32_2 = pir::Float32Type::get(ctx); EXPECT_EQ(fp32_1, fp32_2); EXPECT_EQ(fp32_1.type_id(), fp32_2.type_id()); EXPECT_EQ(&fp32_1.abstract_type(), - &ir::AbstractType::lookup(fp32_1.type_id(), ctx)); - EXPECT_EQ(ir::Float32Type::classof(fp32_1), 1); + &pir::AbstractType::lookup(fp32_1.type_id(), ctx)); + EXPECT_EQ(pir::Float32Type::classof(fp32_1), 1); - ir::Type fp64_1 = ir::Float64Type::get(ctx); - ir::Type fp64_2 = ir::Float64Type::get(ctx); + pir::Type fp64_1 = pir::Float64Type::get(ctx); + pir::Type fp64_2 = pir::Float64Type::get(ctx); EXPECT_EQ(fp64_1, fp64_2); EXPECT_EQ(fp64_1.type_id(), fp64_2.type_id()); EXPECT_EQ(&fp64_1.abstract_type(), - &ir::AbstractType::lookup(fp64_1.type_id(), ctx)); - EXPECT_EQ(ir::Float64Type::classof(fp64_1), 1); + &pir::AbstractType::lookup(fp64_1.type_id(), ctx)); + EXPECT_EQ(pir::Float64Type::classof(fp64_1), 1); - ir::Type int16_1 = ir::Int16Type::get(ctx); - ir::Type int16_2 = ir::Int16Type::get(ctx); + pir::Type int16_1 = pir::Int16Type::get(ctx); + pir::Type int16_2 = pir::Int16Type::get(ctx); EXPECT_EQ(int16_1, int16_2); EXPECT_EQ(int16_1.type_id(), int16_2.type_id()); EXPECT_EQ(&int16_1.abstract_type(), - &ir::AbstractType::lookup(int16_1.type_id(), ctx)); - EXPECT_EQ(ir::Int16Type::classof(int16_1), 1); + &pir::AbstractType::lookup(int16_1.type_id(), ctx)); + EXPECT_EQ(pir::Int16Type::classof(int16_1), 1); - ir::Type int32_1 = ir::Int32Type::get(ctx); - ir::Type int32_2 = ir::Int32Type::get(ctx); + pir::Type int32_1 = pir::Int32Type::get(ctx); + pir::Type int32_2 = pir::Int32Type::get(ctx); EXPECT_EQ(int32_1, int32_2); EXPECT_EQ(int32_1.type_id(), int32_2.type_id()); EXPECT_EQ(&int32_1.abstract_type(), - &ir::AbstractType::lookup(int32_1.type_id(), ctx)); - EXPECT_EQ(ir::Int32Type::classof(int32_1), 1); + &pir::AbstractType::lookup(int32_1.type_id(), ctx)); + EXPECT_EQ(pir::Int32Type::classof(int32_1), 1); - ir::Type int64_1 = ir::Int64Type::get(ctx); - ir::Type int64_2 = ir::Int64Type::get(ctx); + pir::Type int64_1 = pir::Int64Type::get(ctx); + pir::Type int64_2 = pir::Int64Type::get(ctx); EXPECT_EQ(int64_1, int64_2); EXPECT_EQ(int64_1.type_id(), int64_2.type_id()); EXPECT_EQ(&int64_1.abstract_type(), - &ir::AbstractType::lookup(int64_1.type_id(), ctx)); - EXPECT_EQ(ir::Int64Type::classof(int64_1), 1); + &pir::AbstractType::lookup(int64_1.type_id(), ctx)); + EXPECT_EQ(pir::Int64Type::classof(int64_1), 1); // Test 2: Test isa and dyn_cast. - EXPECT_EQ(fp16_1.isa(), true); - EXPECT_EQ(fp16_1.isa(), false); - EXPECT_EQ(fp16_1.isa(), true); + EXPECT_EQ(fp16_1.isa(), true); + EXPECT_EQ(fp16_1.isa(), false); + EXPECT_EQ(fp16_1.isa(), true); // Test 3: Test VectorType - std::vector vec_type = {int32_1, int64_1}; - ir::Type vector_type = ir::VectorType::get(ctx, vec_type); - EXPECT_EQ(vector_type.isa(), true); - EXPECT_EQ(vector_type.dyn_cast().size() == 2, true); - EXPECT_EQ(vector_type.dyn_cast()[0].isa(), + std::vector vec_type = {int32_1, int64_1}; + pir::Type vector_type = pir::VectorType::get(ctx, vec_type); + EXPECT_EQ(vector_type.isa(), true); + EXPECT_EQ(vector_type.dyn_cast().size() == 2, true); + EXPECT_EQ(vector_type.dyn_cast()[0].isa(), true); - EXPECT_EQ(vector_type.dyn_cast()[1].isa(), + EXPECT_EQ(vector_type.dyn_cast()[1].isa(), true); } // Customize a parameterized TypeStorage IntegerTypeStorage. -struct IntegerTypeStorage : public ir::TypeStorage { +struct IntegerTypeStorage : public pir::TypeStorage { IntegerTypeStorage(unsigned width, unsigned signedness) : width_(width), signedness_(signedness) {} using ParamKey = std::pair; static std::size_t HashValue(const ParamKey &key) { - return ir::hash_combine(std::hash()(std::get<0>(key)), - std::hash()(std::get<1>(key))); + return pir::hash_combine(std::hash()(std::get<0>(key)), + std::hash()(std::get<1>(key))); } bool operator==(const ParamKey &key) const { @@ -189,7 +190,7 @@ struct IntegerTypeStorage : public ir::TypeStorage { // Customize a parameterized type: IntegerType, storage type is // IntegerTypeStorage. -class IntegerType : public ir::Type { +class IntegerType : public pir::Type { public: using Type::Type; DECLARE_TYPE_UTILITY_FUNCTOR(IntegerType, IntegerTypeStorage); @@ -198,9 +199,9 @@ IR_DECLARE_EXPLICIT_TYPE_ID(IntegerType) IR_DEFINE_EXPLICIT_TYPE_ID(IntegerType) // Customize a Dialect IntegerDialect, registration type of IntegerType. -struct IntegerDialect : ir::Dialect { - explicit IntegerDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { +struct IntegerDialect : pir::Dialect { + explicit IntegerDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { RegisterType(); } static const char *name() { return "integer"; } @@ -209,41 +210,41 @@ IR_DECLARE_EXPLICIT_TYPE_ID(IntegerDialect) IR_DEFINE_EXPLICIT_TYPE_ID(IntegerDialect) TEST(type_test, custom_type_dialect) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); // Test 1: Test the function of IrContext to register Dialect. ctx->GetOrRegisterDialect(); - ir::Type int1_1 = IntegerType::get(ctx, 1, 0); - ir::Type int1_2 = IntegerType::get(ctx, 1, 0); + pir::Type int1_1 = IntegerType::get(ctx, 1, 0); + pir::Type int1_2 = IntegerType::get(ctx, 1, 0); EXPECT_EQ(int1_1, int1_2); - ir::Type int8 = IntegerType::get(ctx, 8, 0); + pir::Type int8 = IntegerType::get(ctx, 8, 0); EXPECT_NE(int8, int1_2); // Test 2: Test Dialect interfaces EXPECT_EQ(ctx, int8.ir_context()); - EXPECT_EQ(int8.dialect().id(), ir::TypeId::get()); + EXPECT_EQ(int8.dialect().id(), pir::TypeId::get()); - std::vector dialect_list = ctx->GetRegisteredDialects(); + std::vector dialect_list = ctx->GetRegisteredDialects(); EXPECT_EQ(dialect_list.size() == 4, 1); // integer, builtin, fake - ir::Dialect *dialect_builtin1 = ctx->GetRegisteredDialect("builtin"); - ir::Dialect *dialect_builtin2 = - ctx->GetRegisteredDialect(); + pir::Dialect *dialect_builtin1 = ctx->GetRegisteredDialect("builtin"); + pir::Dialect *dialect_builtin2 = + ctx->GetRegisteredDialect(); EXPECT_EQ(dialect_builtin1, dialect_builtin2); - ir::Dialect *dialect_integer1 = ctx->GetRegisteredDialect("integer"); - ir::Dialect *dialect_integer2 = ctx->GetRegisteredDialect(); + pir::Dialect *dialect_integer1 = ctx->GetRegisteredDialect("integer"); + pir::Dialect *dialect_integer2 = ctx->GetRegisteredDialect(); EXPECT_EQ(dialect_integer1, dialect_integer2); } -TEST(type_test, pd_dialect) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Type fp32_dtype = ir::Float32Type::get(ctx); +TEST(type_test, pd_op_dialect) { + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Type fp32_dtype = pir::Float32Type::get(ctx); phi::DDim dims = {2, 2}; phi::DataLayout data_layout = phi::DataLayout::NCHW; phi::LoD lod = {{0, 1, 2}}; @@ -251,7 +252,7 @@ TEST(type_test, pd_dialect) { paddle::dialect::SelectedRowsType select_rows_dtype = paddle::dialect::SelectedRowsType::get( ctx, fp32_dtype, dims, data_layout, lod, offset); - EXPECT_EQ(select_rows_dtype.dtype().isa(), true); + EXPECT_EQ(select_rows_dtype.dtype().isa(), true); EXPECT_EQ(select_rows_dtype.dims(), dims); EXPECT_EQ(select_rows_dtype.data_layout(), data_layout); EXPECT_EQ(select_rows_dtype.lod(), lod); @@ -263,6 +264,6 @@ class TestClass {}; } // namespace TestNamespace TEST(type_test, get_type_name) { - auto name = ir::get_type_name(); + auto name = pir::get_type_name(); EXPECT_EQ(name, "TestNamespace::TestClass"); } diff --git a/test/cpp/ir/kernel_dialect/CMakeLists.txt b/test/cpp/pir/kernel_dialect/CMakeLists.txt similarity index 97% rename from test/cpp/ir/kernel_dialect/CMakeLists.txt rename to test/cpp/pir/kernel_dialect/CMakeLists.txt index 47aad616fd783..ea8477dbe3970 100644 --- a/test/cpp/ir/kernel_dialect/CMakeLists.txt +++ b/test/cpp/pir/kernel_dialect/CMakeLists.txt @@ -8,6 +8,6 @@ cc_test_old( pd_kernel_dialect phi_kernel_adaptor pd_trait - ir + pir phi gtest) diff --git a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc b/test/cpp/pir/kernel_dialect/ir_kernel_dialect_pass_test.cc similarity index 66% rename from test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc rename to test/cpp/pir/kernel_dialect/ir_kernel_dialect_pass_test.cc index 22442c40555d0..97aad7062292c 100644 --- a/test/cpp/ir/kernel_dialect/ir_kernel_dialect_pass_test.cc +++ b/test/cpp/pir/kernel_dialect/ir_kernel_dialect_pass_test.cc @@ -19,22 +19,16 @@ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable_helper.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_kernel_dialect/ir/kernel_op.h" -#include "paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_dialect.h" +#include "paddle/fluid/pir/dialect/kernel/ir/kernel_op.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_adaptor.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" #include "paddle/fluid/platform/init.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/kernel_context.h" @@ -43,6 +37,12 @@ #include "paddle/phi/core/meta_tensor.h" #include "paddle/phi/infermeta/binary.h" #include "paddle/phi/kernels/elementwise_add_kernel.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full_int_array, CPU, ALL_LAYOUT); @@ -53,12 +53,12 @@ bool simple_cmp(float a, float b) { return std::abs((a - b) / a) < 1e-5; } TEST(program_test, program) { // (1) Init environment. - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::Program program((ctx)); - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); - ir::Builder builder = ir::Builder(ctx, program.block()); + pir::Builder builder = pir::Builder(ctx, program.block()); paddle::dialect::FullOp op1 = builder.Build( std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -92,7 +92,7 @@ TEST(program_test, program) { ->front() ->dyn_cast() .op_name(), - "pd.full"); + "pd_op.full"); EXPECT_EQ(kernel_program->block() ->front() ->dyn_cast() @@ -108,12 +108,12 @@ TEST(program_test, program) { TEST(dialect_attr, attr) { // (1) Init environment. - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::Program program((ctx)); - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); auto kernel_dialect = - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); phi::KernelKey kernel_key( phi::Backend::CPU, phi::DataLayout::ALL_LAYOUT, phi::DataType::FLOAT32); @@ -128,42 +128,42 @@ TEST(dialect_attr, attr) { true); } -ir::AttributeMap CreateAttributeMap(std::vector attribute_names, - std::vector attributes, - std::string attr_name, - phi::KernelKey kernel_key) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::AttributeMap attr_map; +pir::AttributeMap CreateAttributeMap(std::vector attribute_names, + std::vector attributes, + std::string attr_name, + phi::KernelKey kernel_key) { + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::AttributeMap attr_map; for (size_t i = 0; i < attribute_names.size(); i++) { - ir::Attribute attr_value = ir::StrAttribute::get(ctx, attributes[i]); + pir::Attribute attr_value = pir::StrAttribute::get(ctx, attributes[i]); attr_map.insert( - std::pair(attribute_names[i], attr_value)); + std::pair(attribute_names[i], attr_value)); } auto attr = paddle::dialect::KernelAttribute::get(ctx, kernel_key); - attr_map.insert(std::pair(attr_name, attr)); + attr_map.insert(std::pair(attr_name, attr)); return attr_map; } TEST(kernel_dialect, legacy_op_test) { // (1) Init environment. - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::Program program((ctx)); - ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); phi::KernelKey kernel_key( phi::Backend::CPU, phi::DataLayout::ALL_LAYOUT, phi::DataType::FLOAT32); - ir::OpInfo kernel_op_info = + pir::OpInfo kernel_op_info = ctx->GetRegisteredOpInfo(paddle::dialect::LegacyKernelOp::name()); - ir::OperationArgument argument(kernel_op_info); + pir::OperationArgument argument(kernel_op_info); argument.attributes = CreateAttributeMap({"op_name", "kernel_name"}, - {"pd.kernel_op", "kernel_op"}, + {"pd_op.kernel_op", "kernel_op"}, "kernel_key", kernel_key); - ir::Operation* op = ir::Operation::Create(std::move(argument)); - EXPECT_EQ("pd.kernel_op", + pir::Operation* op = pir::Operation::Create(std::move(argument)); + EXPECT_EQ("pd_op.kernel_op", op->dyn_cast().op_name()); EXPECT_EQ("kernel_op", op->dyn_cast().kernel_name()); diff --git a/test/cpp/ir/pass/CMakeLists.txt b/test/cpp/pir/pass/CMakeLists.txt similarity index 79% rename from test/cpp/ir/pass/CMakeLists.txt rename to test/cpp/pir/pass/CMakeLists.txt index 061b1e8c9e2fb..be68cdab344e7 100644 --- a/test/cpp/ir/pass/CMakeLists.txt +++ b/test/cpp/pir/pass/CMakeLists.txt @@ -3,7 +3,7 @@ cc_test_old( SRCS pass_manager_test.cc DEPS - ir - pd_dialect + pir + pd_op_dialect phi gtest) diff --git a/test/cpp/ir/pass/pass_manager_test.cc b/test/cpp/pir/pass/pass_manager_test.cc similarity index 77% rename from test/cpp/ir/pass/pass_manager_test.cc rename to test/cpp/pir/pass/pass_manager_test.cc index 38af7d7a3021f..ac1b8a6c6d9f3 100644 --- a/test/cpp/ir/pass/pass_manager_test.cc +++ b/test/cpp/pir/pass/pass_manager_test.cc @@ -16,23 +16,23 @@ #include "glog/logging.h" // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in -// paddle/fluid/ir/dialect/CMakeLists.txt. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" - -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/builtin_type.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/op_base.h" -#include "paddle/ir/core/operation.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_manager.h" +// paddle/fluid/pir/dialect/CMakeLists.txt. +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" + +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" #include "paddle/phi/kernels/elementwise_add_kernel.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/op_base.h" +#include "paddle/pir/core/operation.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_manager.h" #ifndef _WIN32 class TestAnalysis1 {}; @@ -44,7 +44,7 @@ IR_DECLARE_EXPLICIT_TYPE_ID(TestAnalysis2) IR_DEFINE_EXPLICIT_TYPE_ID(TestAnalysis2) TEST(pass_manager, PreservedAnalyses) { - ir::detail::PreservedAnalyses pa; + pir::detail::PreservedAnalyses pa; CHECK_EQ(pa.IsNone(), true); CHECK_EQ(pa.IsPreserved(), false); @@ -63,18 +63,18 @@ TEST(pass_manager, PreservedAnalyses) { } #endif -class AddOp : public ir::Op { +class AddOp : public pir::Op { public: using Op::Op; static const char *name() { return "test.add"; } static constexpr const char **attributes_name = nullptr; static constexpr uint32_t attributes_num = 0; void Verify(); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult l_operand, - ir::OpResult r_operand, - ir::Type sum_type); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult l_operand, + pir::OpResult r_operand, + pir::Type sum_type); }; void AddOp::Verify() { if (num_operands() != 2) { @@ -84,11 +84,11 @@ void AddOp::Verify() { throw("The size of outputs must be equal to 1."); } } -void AddOp::Build(ir::Builder &, - ir::OperationArgument &argument, - ir::OpResult l_operand, - ir::OpResult r_operand, - ir::Type sum_type) { +void AddOp::Build(pir::Builder &, + pir::OperationArgument &argument, + pir::OpResult l_operand, + pir::OpResult r_operand, + pir::Type sum_type) { argument.AddOperand(l_operand); argument.AddOperand(r_operand); argument.AddOutput(sum_type); @@ -97,7 +97,7 @@ IR_DECLARE_EXPLICIT_TYPE_ID(AddOp) IR_DEFINE_EXPLICIT_TYPE_ID(AddOp) struct CountOpAnalysis { - explicit CountOpAnalysis(ir::Operation *container_op) { + explicit CountOpAnalysis(pir::Operation *container_op) { IR_ENFORCE(container_op->num_regions() > 0, "op must be a container with zero or multiple regions."); @@ -120,17 +120,17 @@ struct CountOpAnalysis { IR_DECLARE_EXPLICIT_TYPE_ID(CountOpAnalysis) IR_DEFINE_EXPLICIT_TYPE_ID(CountOpAnalysis) -class TestPass : public ir::Pass { +class TestPass : public pir::Pass { public: - TestPass() : ir::Pass("TestPass", 1) {} - void Run(ir::Operation *op) override { + TestPass() : pir::Pass("TestPass", 1) {} + void Run(pir::Operation *op) override { auto count_op_analysis = analysis_manager().GetAnalysis(); pass_state().preserved_analyses.Preserve(); CHECK_EQ(pass_state().preserved_analyses.IsPreserved(), true); CHECK_EQ(count_op_analysis.count, 11); - auto module_op = op->dyn_cast(); + auto module_op = op->dyn_cast(); CHECK_EQ(module_op.operation(), op); CHECK_EQ(module_op.name(), module_op->name()); LOG(INFO) << "In " << pass_info().name << ": " << module_op->name() @@ -141,12 +141,12 @@ class TestPass : public ir::Pass { false); } - bool CanApplyOn(ir::Operation *op) const override { + bool CanApplyOn(pir::Operation *op) const override { return op->name() == "builtin.module" && op->num_regions() > 0; } }; -void BuildProgram(ir::Builder &builder) { // NOLINT +void BuildProgram(pir::Builder &builder) { // NOLINT paddle::dialect::FullOp full_input_op = builder.Build(std::vector{4, 3, 16, 16}, 1.5, @@ -204,25 +204,25 @@ void BuildProgram(ir::Builder &builder) { // NOLINT } TEST(pass_manager, PassManager) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program(ctx); - ir::Builder builder = ir::Builder(ctx, program.block()); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program(ctx); + pir::Builder builder = pir::Builder(ctx, program.block()); BuildProgram(builder); EXPECT_EQ(program.block()->size(), 11u); // (9) Test pass manager for program. - ir::PassManager pm(ctx); + pir::PassManager pm(ctx); pm.AddPass(std::make_unique()); // pm.EnableIRPrinting(); - pm.EnableIRPrinting(std::make_unique( - [](ir::Pass *pass, ir::Operation *op) { + pm.EnableIRPrinting(std::make_unique( + [](pir::Pass *pass, pir::Operation *op) { return pass->name() == "TestPass"; }, - [](ir::Pass *pass, ir::Operation *op) { + [](pir::Pass *pass, pir::Operation *op) { return pass->name() == "TestPass"; }, true, diff --git a/test/cpp/ir/pattern_rewrite/CMakeLists.txt b/test/cpp/pir/pattern_rewrite/CMakeLists.txt similarity index 73% rename from test/cpp/ir/pattern_rewrite/CMakeLists.txt rename to test/cpp/pir/pattern_rewrite/CMakeLists.txt index 2023cc0cf413f..b65aca7e62656 100644 --- a/test/cpp/ir/pattern_rewrite/CMakeLists.txt +++ b/test/cpp/pir/pattern_rewrite/CMakeLists.txt @@ -1,5 +1,5 @@ -set(PATTERN_REWRITE_TEST_DEPS _constant_folding_pass - transform_general_functions gtest pd_dialect ir) +set(PATTERN_REWRITE_TEST_DEPS + _constant_folding_pass transform_general_functions gtest pd_op_dialect pir) if(WITH_DISTRIBUTE) set(PATTERN_REWRITE_TEST_DEPS ${PATTERN_REWRITE_TEST_DEPS} fleet_executor) diff --git a/test/cpp/ir/pattern_rewrite/pattern_rewrite_test.cc b/test/cpp/pir/pattern_rewrite/pattern_rewrite_test.cc similarity index 69% rename from test/cpp/ir/pattern_rewrite/pattern_rewrite_test.cc rename to test/cpp/pir/pattern_rewrite/pattern_rewrite_test.cc index a86055523b521..985d00c4b0d1e 100644 --- a/test/cpp/ir/pattern_rewrite/pattern_rewrite_test.cc +++ b/test/cpp/pir/pattern_rewrite/pattern_rewrite_test.cc @@ -20,45 +20,45 @@ #include #include -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" -#include "paddle/fluid/ir/transforms/constant_folding_pass.h" -#include "paddle/fluid/ir/transforms/transform_general_functions.h" -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_dialect.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/cast_utils.h" -#include "paddle/ir/core/dialect.h" -#include "paddle/ir/core/enforce.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/op_info.h" -#include "paddle/ir/core/parameter.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/value.h" -#include "paddle/ir/pass/pass.h" -#include "paddle/ir/pass/pass_manager.h" -#include "paddle/ir/pattern_rewrite/frozen_rewrite_pattern_set.h" -#include "paddle/ir/pattern_rewrite/pattern_applicator.h" -#include "paddle/ir/pattern_rewrite/pattern_match.h" -#include "paddle/ir/pattern_rewrite/pattern_rewrite_driver.h" -#include "paddle/ir/transforms/dead_code_elimination_pass.h" -#include "paddle/ir/transforms/reorder_block_ops_pass.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" +#include "paddle/fluid/pir/transforms/constant_folding_pass.h" +#include "paddle/fluid/pir/transforms/transform_general_functions.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_dialect.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/cast_utils.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/enforce.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/op_info.h" +#include "paddle/pir/core/parameter.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/value.h" +#include "paddle/pir/pass/pass.h" +#include "paddle/pir/pass/pass_manager.h" +#include "paddle/pir/pattern_rewrite/frozen_rewrite_pattern_set.h" +#include "paddle/pir/pattern_rewrite/pattern_applicator.h" +#include "paddle/pir/pattern_rewrite/pattern_match.h" +#include "paddle/pir/pattern_rewrite/pattern_rewrite_driver.h" +#include "paddle/pir/transforms/dead_code_elimination_pass.h" +#include "paddle/pir/transforms/reorder_block_ops_pass.h" // NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in -// paddle/fluid/ir/dialect/CMakeLists.txt. -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" +// paddle/fluid/pir/dialect/CMakeLists.txt. +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" #include "paddle/phi/core/ddim.h" // build Conv2dFusionOp -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/fluid/pir/dialect/operator/interface/infermeta.h" +#include "paddle/fluid/pir/dialect/operator/interface/op_yaml_info.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/infermeta/multiary.h" +#include "paddle/pir/core/op_base.h" PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); @@ -73,7 +73,7 @@ PD_DECLARE_KERNEL(conv2d, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(transpose, CPU, ALL_LAYOUT); // Define op1. -class Operation1 : public ir::Op { +class Operation1 : public pir::Op { public: using Op::Op; static const char *name() { return "test.Operation1"; } @@ -86,11 +86,11 @@ class Operation1 : public ir::Op { void Operation1::Verify() { auto &attributes = this->attributes(); if (attributes.count("op2_attr1") == 0 || - (!attributes.at("op2_attr1").isa())) { + (!attributes.at("op2_attr1").isa())) { throw("Type of attribute: parameter_name is not right."); } if (attributes.count("op2_attr2") == 0 || - (!attributes.at("op2_attr2").isa())) { + (!attributes.at("op2_attr2").isa())) { throw("Type of attribute: parameter_name is not right."); } } @@ -101,10 +101,10 @@ IR_DECLARE_EXPLICIT_TYPE_ID(Operation1) IR_DEFINE_EXPLICIT_TYPE_ID(Operation1) // Define a dialect, op1 and op2 will be registered by this dialect. -class TestDialect : public ir::Dialect { +class TestDialect : public pir::Dialect { public: - explicit TestDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { + explicit TestDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } static const char *name() { return "test"; } @@ -116,28 +116,28 @@ IR_DECLARE_EXPLICIT_TYPE_ID(TestDialect) IR_DEFINE_EXPLICIT_TYPE_ID(TestDialect) // TODO(wilber): Add logical when ir support erase, replace or update. -class TestPatternRewrite : public ir::OpRewritePattern { +class TestPatternRewrite : public pir::OpRewritePattern { public: - using ir::OpRewritePattern::OpRewritePattern; + using pir::OpRewritePattern::OpRewritePattern; - void Rewrite(Operation1 op, ir::PatternRewriter &rewriter) const override {} + void Rewrite(Operation1 op, pir::PatternRewriter &rewriter) const override {} bool Match(Operation1 op) const override { return false; } }; -class TestPatternRewrite2 : public ir::OpRewritePattern { +class TestPatternRewrite2 : public pir::OpRewritePattern { public: - using ir::OpRewritePattern::OpRewritePattern; + using pir::OpRewritePattern::OpRewritePattern; bool MatchAndRewrite( Operation1 op, - ir::PatternRewriter &rewriter) const override { // NOLINT + pir::PatternRewriter &rewriter) const override { // NOLINT return false; } }; TEST(PatternRewrite, PatternBenefit) { - ir::PatternBenefit benefit1(1); + pir::PatternBenefit benefit1(1); EXPECT_EQ(benefit1.benefit(), 1U); - ir::PatternBenefit benefit2(2); + pir::PatternBenefit benefit2(2); EXPECT_EQ(benefit2.benefit(), 2U); EXPECT_TRUE(benefit2 > benefit1); @@ -145,17 +145,17 @@ TEST(PatternRewrite, PatternBenefit) { EXPECT_TRUE(benefit1 < benefit2); EXPECT_TRUE(benefit1 <= benefit2); EXPECT_TRUE(benefit1 != benefit2); - ir::PatternBenefit benefit3(2); + pir::PatternBenefit benefit3(2); EXPECT_TRUE(benefit2 == benefit3); } TEST(RewritePattern, RewritePatternSet) { - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); auto *test_dialect = ctx->GetOrRegisterDialect(); test_dialect->RegisterOp(); - ir::RewritePatternSet ps(ctx); + pir::RewritePatternSet ps(ctx); ps.Add(ctx, 1); EXPECT_EQ(ps.native_patterns().size(), 1U); EXPECT_TRUE(ps.native_patterns().back()->debug_labels().empty()); @@ -175,31 +175,31 @@ TEST(RewritePattern, RewritePatternSet) { // TODO(wilber): Add actual case. // TEST(PatternRewrite, PatternApplicator) { -// ir::IrContext *ctx = ir::IrContext::Instance(); -// ctx->GetOrRegisterDialect(); +// pir::IrContext *ctx = pir::IrContext::Instance(); +// ctx->GetOrRegisterDialect(); // auto *test_dialect = ctx->GetOrRegisterDialect(); // test_dialect->RegisterOp(); -// ir::RewritePatternSet ps(ctx); +// pir::RewritePatternSet ps(ctx); // ps.Add(ctx, 2); -// ir::FrozenRewritePatternSet frozen_set(std::move(ps)); -// ir::PatternApplicator applicator(frozen_set); +// pir::FrozenRewritePatternSet frozen_set(std::move(ps)); +// pir::PatternApplicator applicator(frozen_set); // applicator.ApplyDefaultCostModel(); // } // // TODO(wilber): Add actual case. TEST(PatternRewrite, FrozenRewritePatternSet) { - ir::FrozenRewritePatternSet frozen_set; + pir::FrozenRewritePatternSet frozen_set; EXPECT_TRUE(frozen_set.match_any_op_native_patterns().empty()); EXPECT_TRUE(frozen_set.op_specific_native_patterns().empty()); - ir::IrContext *ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); + pir::IrContext *ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); auto *test_dialect = ctx->GetOrRegisterDialect(); test_dialect->RegisterOp(); - ir::RewritePatternSet ps(ctx); + pir::RewritePatternSet ps(ctx); ps.Add(ctx, 2); - ir::FrozenRewritePatternSet frozen_set2(std::move(ps)); + pir::FrozenRewritePatternSet frozen_set2(std::move(ps)); EXPECT_TRUE(frozen_set2.match_any_op_native_patterns().empty()); const auto &pattern_maps = frozen_set2.op_specific_native_patterns(); EXPECT_EQ(pattern_maps.size(), 1U); @@ -208,13 +208,13 @@ TEST(PatternRewrite, FrozenRewritePatternSet) { } class RedundantTransposeFusePattern - : public ir::OpRewritePattern { + : public pir::OpRewritePattern { public: - using ir::OpRewritePattern::OpRewritePattern; + using pir::OpRewritePattern::OpRewritePattern; bool MatchAndRewrite(paddle::dialect::TransposeOp op, - ir::PatternRewriter &rewriter) const override { - auto prev_op = ir::GetDefiningOpForInput(op, 0); + pir::PatternRewriter &rewriter) const override { + auto prev_op = pir::GetDefiningOpForInput(op, 0); std::vector axis_last = GetAxis(op); auto prev_trans_op = prev_op->dyn_cast(); if (prev_trans_op) { @@ -224,7 +224,7 @@ class RedundantTransposeFusePattern auto new_perm = GetPerm(axis_first, axis_last); rewriter.SetInsertionPoint(op); auto new_transpose_op = rewriter.Build( - ir::GetDefiningOpForInput(prev_trans_op, 0)->result(0), new_perm); + pir::GetDefiningOpForInput(prev_trans_op, 0)->result(0), new_perm); rewriter.ReplaceOp(op, {new_transpose_op.out()}); return true; } @@ -234,10 +234,10 @@ class RedundantTransposeFusePattern private: std::vector GetAxis(paddle::dialect::TransposeOp op) const { - auto array_attr = op.attribute("perm").AsVector(); + auto array_attr = op.attribute("perm").AsVector(); std::vector axis(array_attr.size()); for (size_t i = 0; i < array_attr.size(); ++i) { - axis[i] = array_attr[i].dyn_cast().data(); + axis[i] = array_attr[i].dyn_cast().data(); } return axis; } @@ -258,53 +258,55 @@ class RedundantTransposeFusePattern }; class Conv2dBnFusePattern - : public ir::OpRewritePattern { + : public pir::OpRewritePattern { public: - using ir::OpRewritePattern::OpRewritePattern; + using pir::OpRewritePattern::OpRewritePattern; bool MatchAndRewrite( paddle::dialect::BatchNormOp op, - ir::PatternRewriter &rewriter) const override { // NOLINT + pir::PatternRewriter &rewriter) const override { // NOLINT // The next op should be batch_norm. paddle::dialect::Conv2dOp conv2d_op = - ir::GetDefiningOpForInput(op, 0)->dyn_cast(); + pir::GetDefiningOpForInput(op, 0) + ->dyn_cast(); if (!conv2d_op) return false; - ir::OpResult conv2d_out = conv2d_op.out(); + pir::OpResult conv2d_out = conv2d_op.out(); if (!conv2d_out.HasOneUse()) return false; - ir::Value conv2d_filter = conv2d_op.filter(); + pir::Value conv2d_filter = conv2d_op.filter(); - // ir::GetParameterOp filter_parameter_op = - // conv2d_filter.GetDefiningOp()->dyn_cast(); + // pir::GetParameterOp filter_parameter_op = + // conv2d_filter.GetDefiningOp()->dyn_cast(); // if (!filter_parameter_op) return false; - ir::OpResult conv2d_filter_result = conv2d_filter.dyn_cast(); + pir::OpResult conv2d_filter_result = + conv2d_filter.dyn_cast(); IR_ENFORCE(conv2d_filter_result); - ir::Value bn_input = op.x(); + pir::Value bn_input = op.x(); IR_ENFORCE(bn_input == conv2d_out); - ir::Value bn_mean = op.mean(); - ir::Value bn_variance = op.variance(); - ir::Value bn_scale = op.scale(); - ir::Value bn_bias = op.bias(); + pir::Value bn_mean = op.mean(); + pir::Value bn_variance = op.variance(); + pir::Value bn_scale = op.scale(); + pir::Value bn_bias = op.bias(); // --- deal with filter --- rewriter.SetInsertionPoint(op); phi::DDim bn_variance_shape = bn_variance.type().dyn_cast().dims(); - float epsilon = op.attribute("epsilon").data(); + float epsilon = op.attribute("epsilon").data(); paddle::dialect::FullOp full_op = rewriter.Build( phi::vectorize(bn_variance_shape), epsilon); paddle::dialect::AddOp add_op = rewriter.Build( - bn_variance.dyn_cast(), full_op.out()); + bn_variance.dyn_cast(), full_op.out()); paddle::dialect::SqrtOp sqrt_op = rewriter.Build(add_op.out()); paddle::dialect::DivideOp div_op = rewriter.Build( - bn_scale.dyn_cast(), sqrt_op.out()); + bn_scale.dyn_cast(), sqrt_op.out()); // reshape scale - phi::DDim conv2d_filter_shape = ir::GetShapeFromValue(conv2d_filter); + phi::DDim conv2d_filter_shape = pir::GetShapeFromValue(conv2d_filter); phi::DDim bn_scale_shape = bn_scale.type().dyn_cast().dims(); std::vector bn_scale_new_shape(conv2d_filter_shape.size(), 1); @@ -319,23 +321,24 @@ class Conv2dBnFusePattern auto conv2d_attributes = conv2d_op->attributes(); auto new_conv2d_op = rewriter.Build( - conv2d_op.input().dyn_cast(), + conv2d_op.input().dyn_cast(), mul_op.out(), conv2d_attributes); // --- deal with bias --- paddle::dialect::MultiplyOp mul_bias_op = rewriter.Build( - bn_mean.dyn_cast(), div_op.out()); + bn_mean.dyn_cast(), div_op.out()); // new bias --> sub_op.out() paddle::dialect::SubtractOp sub_op = rewriter.Build( - bn_bias.dyn_cast(), mul_bias_op.out()); + bn_bias.dyn_cast(), mul_bias_op.out()); // reshape new bias - phi::DDim new_conv2d_out_shape = ir::GetShapeFromValue(new_conv2d_op.out()); + phi::DDim new_conv2d_out_shape = + pir::GetShapeFromValue(new_conv2d_op.out()); std::vector new_bias_new_shape(new_conv2d_out_shape.size(), 1); std::string data_format = - new_conv2d_op.attribute("data_format").AsString(); + new_conv2d_op.attribute("data_format").AsString(); IR_ENFORCE(data_format == "NCHW", "Only support NCHW now."); new_bias_new_shape[1] = new_conv2d_out_shape[1]; paddle::dialect::ReshapeOp reshape_bias_op = @@ -354,21 +357,21 @@ class Conv2dBnFusePattern namespace paddle { namespace dialect { -class Conv2dFusionOpTest : public ir::Op { +class Conv2dFusionOpTest : public pir::Op { public: using Op::Op; - static const char *name() { return "pd.conv2d_fusion_test"; } + static const char *name() { return "pd_op.conv2d_fusion_test"; } static const char *attributes_name[10]; // NOLINT static constexpr uint32_t attributes_num = 10; static OpInfoTuple GetOpInfo(); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult input_, - ir::OpResult filter_, - ir::OpResult bias_, - ir::OpResult residual_, + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult input_, + pir::OpResult filter_, + pir::OpResult bias_, + pir::OpResult residual_, const std::vector &strides, const std::vector &paddings_t, std::string padding_algorithm, @@ -380,21 +383,21 @@ class Conv2dFusionOpTest : public ir::Op &channels, int user_workspace_size); - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - ir::OpResult input_, - ir::OpResult filter_, - ir::OpResult bias_, - ir::OpResult residual_, - ir::AttributeMap attributes); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + pir::OpResult input_, + pir::OpResult filter_, + pir::OpResult bias_, + pir::OpResult residual_, + pir::AttributeMap attributes); void Verify(); - ir::Value input() { return operand_source(0); } - ir::Value filter() { return operand_source(1); } - ir::Value bias() { return operand_source(2); } - ir::Value residual() { return operand_source(3); } - ir::OpResult output() { return result(0); } - ir::OpResult outputs() { return result(1); } - ir::Attribute attribute(const std::string &name) { + pir::Value input() { return operand_source(0); } + pir::Value filter() { return operand_source(1); } + pir::Value bias() { return operand_source(2); } + pir::Value residual() { return operand_source(3); } + pir::OpResult output() { return result(0); } + pir::OpResult outputs() { return result(1); } + pir::Attribute attribute(const std::string &name) { { PADDLE_ENFORCE( attributes().count(name) > 0, @@ -454,22 +457,24 @@ OpInfoTuple Conv2dFusionOpTest::GetOpInfo() { false, true)}; std::vector attributes = { - OpAttributeInfo("strides", "ir::ArrayAttribute", ""), OpAttributeInfo( - "paddings_t", "ir::ArrayAttribute", ""), - OpAttributeInfo("padding_algorithm", "ir::StrAttribute", ""), + "strides", "pir::ArrayAttribute", ""), OpAttributeInfo( - "dilations_t", "ir::ArrayAttribute", ""), - OpAttributeInfo("groups", "ir::Int32Attribute", ""), - OpAttributeInfo("data_format", "ir::StrAttribute", ""), - OpAttributeInfo("activation", "ir::StrAttribute", ""), - OpAttributeInfo("exhaustive_search", "ir::BoolAttribute", ""), - OpAttributeInfo("channels", "ir::ArrayAttribute", ""), - OpAttributeInfo("user_workspace_size", "ir::Int32Attribute", "")}; + "paddings_t", "pir::ArrayAttribute", ""), + OpAttributeInfo("padding_algorithm", "pir::StrAttribute", ""), + OpAttributeInfo( + "dilations_t", "pir::ArrayAttribute", ""), + OpAttributeInfo("groups", "pir::Int32Attribute", ""), + OpAttributeInfo("data_format", "pir::StrAttribute", ""), + OpAttributeInfo("activation", "pir::StrAttribute", ""), + OpAttributeInfo("exhaustive_search", "pir::BoolAttribute", ""), + OpAttributeInfo( + "channels", "pir::ArrayAttribute", ""), + OpAttributeInfo("user_workspace_size", "pir::Int32Attribute", "")}; std::vector outputs = { OpOutputInfo("output", "paddle::dialect::DenseTensorType", false, false), OpOutputInfo("outputs", - "ir::VectorType", + "pir::VectorType", false, false)}; paddle::dialect::OpRunTimeInfo run_time_info = @@ -512,132 +517,132 @@ OpInfoTuple Conv2dFusionOpTest::GetOpInfo() { inputs, attributes, outputs, run_time_info, "conv2d_fusion_test"); } -void Conv2dFusionOpTest::Build(ir::Builder &builder, - ir::OperationArgument &argument, - ir::OpResult input_, - ir::OpResult filter_, - ir::OpResult bias_, - ir::OpResult residual_, - ir::AttributeMap attributes) { +void Conv2dFusionOpTest::Build(pir::Builder &builder, + pir::OperationArgument &argument, + pir::OpResult input_, + pir::OpResult filter_, + pir::OpResult bias_, + pir::OpResult residual_, + pir::AttributeMap attributes) { std::vector strides; for (size_t i = 0; - i < attributes.at("strides").dyn_cast().size(); + i < attributes.at("strides").dyn_cast().size(); i++) { strides.push_back(attributes.at("strides") - .dyn_cast() + .dyn_cast() .at(i) - .dyn_cast() + .dyn_cast() .data()); } std::vector paddings_t; for (size_t i = 0; - i < attributes.at("paddings_t").dyn_cast().size(); + i < attributes.at("paddings_t").dyn_cast().size(); i++) { paddings_t.push_back(attributes.at("paddings_t") - .dyn_cast() + .dyn_cast() .at(i) - .dyn_cast() + .dyn_cast() .data()); } std::string padding_algorithm = attributes.at("padding_algorithm") - .dyn_cast() + .dyn_cast() .AsString(); std::vector dilations_t; for (size_t i = 0; - i < attributes.at("dilations_t").dyn_cast().size(); + i < attributes.at("dilations_t").dyn_cast().size(); i++) { dilations_t.push_back(attributes.at("dilations_t") - .dyn_cast() + .dyn_cast() .at(i) - .dyn_cast() + .dyn_cast() .data()); } - int groups = attributes.at("groups").dyn_cast().data(); + int groups = attributes.at("groups").dyn_cast().data(); std::string data_format = - attributes.at("data_format").dyn_cast().AsString(); + attributes.at("data_format").dyn_cast().AsString(); std::string activation = - attributes.at("activation").dyn_cast().AsString(); + attributes.at("activation").dyn_cast().AsString(); bool exhaustive_search = - attributes.at("exhaustive_search").dyn_cast().data(); + attributes.at("exhaustive_search").dyn_cast().data(); std::vector channels; for (size_t i = 0; - i < attributes.at("channels").dyn_cast().size(); + i < attributes.at("channels").dyn_cast().size(); i++) { channels.push_back(attributes.at("channels") - .dyn_cast() + .dyn_cast() .at(i) - .dyn_cast() + .dyn_cast() .data()); } int user_workspace_size = attributes.at("user_workspace_size") - .dyn_cast() + .dyn_cast() .data(); VLOG(4) << "Builder construction inputs"; - std::vector argument_inputs = { + std::vector argument_inputs = { input_, filter_, bias_, residual_}; argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); VLOG(4) << "Builder construction attributes"; - std::vector vec_strides; + std::vector vec_strides; for (auto stride : strides) { - ir::Attribute attr_strides = - ir::Int32Attribute::get(ir::IrContext::Instance(), stride); + pir::Attribute attr_strides = + pir::Int32Attribute::get(pir::IrContext::Instance(), stride); vec_strides.push_back(attr_strides); } - ir::Attribute attr_strides = - ir::ArrayAttribute::get(ir::IrContext::Instance(), vec_strides); + pir::Attribute attr_strides = + pir::ArrayAttribute::get(pir::IrContext::Instance(), vec_strides); argument.AddAttribute("strides", attr_strides); - std::vector vec_paddings_t; + std::vector vec_paddings_t; for (auto padding : paddings_t) { - ir::Attribute attr_paddings_t = - ir::Int32Attribute::get(ir::IrContext::Instance(), padding); + pir::Attribute attr_paddings_t = + pir::Int32Attribute::get(pir::IrContext::Instance(), padding); vec_paddings_t.push_back(attr_paddings_t); } - ir::Attribute attr_paddings_t = - ir::ArrayAttribute::get(ir::IrContext::Instance(), vec_paddings_t); + pir::Attribute attr_paddings_t = + pir::ArrayAttribute::get(pir::IrContext::Instance(), vec_paddings_t); argument.AddAttribute("paddings_t", attr_paddings_t); - ir::Attribute attr_padding_algorithm = - ir::StrAttribute::get(ir::IrContext::Instance(), padding_algorithm); + pir::Attribute attr_padding_algorithm = + pir::StrAttribute::get(pir::IrContext::Instance(), padding_algorithm); argument.AddAttribute("padding_algorithm", attr_padding_algorithm); - std::vector vec_dilations_t; + std::vector vec_dilations_t; for (auto dilation : dilations_t) { - ir::Attribute attr_dilations_t = - ir::Int32Attribute::get(ir::IrContext::Instance(), dilation); + pir::Attribute attr_dilations_t = + pir::Int32Attribute::get(pir::IrContext::Instance(), dilation); vec_dilations_t.push_back(attr_dilations_t); } - ir::Attribute attr_dilations_t = - ir::ArrayAttribute::get(ir::IrContext::Instance(), vec_dilations_t); + pir::Attribute attr_dilations_t = + pir::ArrayAttribute::get(pir::IrContext::Instance(), vec_dilations_t); argument.AddAttribute("dilations_t", attr_dilations_t); - ir::Attribute attr_groups = - ir::Int32Attribute::get(ir::IrContext::Instance(), groups); + pir::Attribute attr_groups = + pir::Int32Attribute::get(pir::IrContext::Instance(), groups); argument.AddAttribute("groups", attr_groups); - ir::Attribute attr_data_format = - ir::StrAttribute::get(ir::IrContext::Instance(), data_format); + pir::Attribute attr_data_format = + pir::StrAttribute::get(pir::IrContext::Instance(), data_format); argument.AddAttribute("data_format", attr_data_format); - ir::Attribute attr_activation = - ir::StrAttribute::get(ir::IrContext::Instance(), activation); + pir::Attribute attr_activation = + pir::StrAttribute::get(pir::IrContext::Instance(), activation); argument.AddAttribute("activation", attr_activation); - ir::Attribute attr_exhaustive_search = - ir::BoolAttribute::get(ir::IrContext::Instance(), exhaustive_search); + pir::Attribute attr_exhaustive_search = + pir::BoolAttribute::get(pir::IrContext::Instance(), exhaustive_search); argument.AddAttribute("exhaustive_search", attr_exhaustive_search); - std::vector vec_channels; + std::vector vec_channels; for (auto channel : channels) { - ir::Attribute attr_channels = - ir::Int32Attribute::get(ir::IrContext::Instance(), channel); + pir::Attribute attr_channels = + pir::Int32Attribute::get(pir::IrContext::Instance(), channel); vec_channels.push_back(attr_channels); } - ir::Attribute attr_channels = - ir::ArrayAttribute::get(ir::IrContext::Instance(), vec_channels); + pir::Attribute attr_channels = + pir::ArrayAttribute::get(pir::IrContext::Instance(), vec_channels); argument.AddAttribute("channels", attr_channels); - ir::Attribute attr_user_workspace_size = - ir::Int32Attribute::get(ir::IrContext::Instance(), user_workspace_size); + pir::Attribute attr_user_workspace_size = + pir::Int32Attribute::get(pir::IrContext::Instance(), user_workspace_size); argument.AddAttribute("user_workspace_size", attr_user_workspace_size); VLOG(4) << "Builder construction outputs"; @@ -734,9 +739,9 @@ void Conv2dFusionOpTest::Build(ir::Builder &builder, &meta_output, phi::MetaConfig()); - std::vector argument_outputs; + std::vector argument_outputs; auto output_dense_tensor_type = paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + pir::IrContext::Instance(), TransToIrDataType(dense_output.dtype()), dense_output.dims(), dense_output.layout(), @@ -746,18 +751,18 @@ void Conv2dFusionOpTest::Build(ir::Builder &builder, argument_outputs.push_back(output_dense_tensor_type); - std::vector outputs_types; + std::vector outputs_types; for (size_t i = 0; i < static_cast(channels.size()); i++) { outputs_types.push_back(paddle::dialect::DenseTensorType::get( - ir::IrContext::Instance(), + pir::IrContext::Instance(), TransToIrDataType(vec_dense_outputs[i].dtype()), vec_dense_outputs[i].dims(), vec_dense_outputs[i].layout(), vec_dense_outputs[i].lod(), vec_dense_outputs[i].offset())); } - ir::Type outputs_vector_type = - ir::VectorType::get(ir::IrContext::Instance(), outputs_types); + pir::Type outputs_vector_type = + pir::VectorType::get(pir::IrContext::Instance(), outputs_types); argument_outputs.push_back(outputs_vector_type); argument.AddOutputs(argument_outputs.begin(), argument_outputs.end()); } @@ -801,86 +806,87 @@ void Conv2dFusionOpTest::Verify() { { auto &attributes = this->attributes(); PADDLE_ENFORCE(attributes.count("strides") > 0 && - attributes.at("strides").isa(), + attributes.at("strides").isa(), phi::errors::PreconditionNotMet( "Type of attribute: strides is not right.")); for (size_t i = 0; - i < attributes.at("strides").dyn_cast().size(); + i < attributes.at("strides").dyn_cast().size(); i++) { PADDLE_ENFORCE(attributes.at("strides") - .dyn_cast() + .dyn_cast() .at(i) - .isa(), + .isa(), phi::errors::PreconditionNotMet( "Type of attribute: strides is not right.")); } PADDLE_ENFORCE(attributes.count("paddings_t") > 0 && - attributes.at("paddings_t").isa(), + attributes.at("paddings_t").isa(), phi::errors::PreconditionNotMet( "Type of attribute: paddings_t is not right.")); for (size_t i = 0; - i < attributes.at("paddings_t").dyn_cast().size(); + i < attributes.at("paddings_t").dyn_cast().size(); i++) { PADDLE_ENFORCE(attributes.at("paddings_t") - .dyn_cast() + .dyn_cast() .at(i) - .isa(), + .isa(), phi::errors::PreconditionNotMet( "Type of attribute: paddings_t is not right.")); } PADDLE_ENFORCE( attributes.count("padding_algorithm") > 0 && - attributes.at("padding_algorithm").isa(), + attributes.at("padding_algorithm").isa(), phi::errors::PreconditionNotMet( "Type of attribute: padding_algorithm is not right.")); PADDLE_ENFORCE(attributes.count("dilations_t") > 0 && - attributes.at("dilations_t").isa(), + attributes.at("dilations_t").isa(), phi::errors::PreconditionNotMet( "Type of attribute: dilations_t is not right.")); for (size_t i = 0; - i < attributes.at("dilations_t").dyn_cast().size(); + i < + attributes.at("dilations_t").dyn_cast().size(); i++) { PADDLE_ENFORCE(attributes.at("dilations_t") - .dyn_cast() + .dyn_cast() .at(i) - .isa(), + .isa(), phi::errors::PreconditionNotMet( "Type of attribute: dilations_t is not right.")); } PADDLE_ENFORCE(attributes.count("groups") > 0 && - attributes.at("groups").isa(), + attributes.at("groups").isa(), phi::errors::PreconditionNotMet( "Type of attribute: groups is not right.")); PADDLE_ENFORCE(attributes.count("data_format") > 0 && - attributes.at("data_format").isa(), + attributes.at("data_format").isa(), phi::errors::PreconditionNotMet( "Type of attribute: data_format is not right.")); PADDLE_ENFORCE(attributes.count("activation") > 0 && - attributes.at("activation").isa(), + attributes.at("activation").isa(), phi::errors::PreconditionNotMet( "Type of attribute: activation is not right.")); PADDLE_ENFORCE( attributes.count("exhaustive_search") > 0 && - attributes.at("exhaustive_search").isa(), + attributes.at("exhaustive_search").isa(), phi::errors::PreconditionNotMet( "Type of attribute: exhaustive_search is not right.")); PADDLE_ENFORCE(attributes.count("channels") > 0 && - attributes.at("channels").isa(), + attributes.at("channels").isa(), phi::errors::PreconditionNotMet( "Type of attribute: channels is not right.")); for (size_t i = 0; - i < attributes.at("channels").dyn_cast().size(); + i < attributes.at("channels").dyn_cast().size(); i++) { PADDLE_ENFORCE(attributes.at("channels") - .dyn_cast() + .dyn_cast() .at(i) - .isa(), + .isa(), phi::errors::PreconditionNotMet( "Type of attribute: channels is not right.")); } PADDLE_ENFORCE( attributes.count("user_workspace_size") > 0 && - attributes.at("user_workspace_size").isa(), + attributes.at("user_workspace_size").isa(), phi::errors::PreconditionNotMet( "Type of attribute: user_workspace_size is not right.")); } @@ -897,7 +903,7 @@ void Conv2dFusionOpTest::Verify() { phi::errors::PreconditionNotMet( "Type validation failed for the 0th output.")); auto output_1_type = (*this)->result(1).type(); - if (auto vec_type = output_1_type.dyn_cast()) { + if (auto vec_type = output_1_type.dyn_cast()) { for (size_t i = 0; i < vec_type.size(); i++) { PADDLE_ENFORCE(vec_type[i].isa(), phi::errors::PreconditionNotMet( @@ -922,10 +928,10 @@ void Conv2dFusionOpTest::InferMeta(phi::InferMetaContext *infer_meta) { IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::Conv2dFusionOpTest) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::Conv2dFusionOpTest) -class Conv2dFusionTestDialect : public ir::Dialect { +class Conv2dFusionTestDialect : public pir::Dialect { public: - explicit Conv2dFusionTestDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { + explicit Conv2dFusionTestDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } static const char *name() { return "con2d fusion test"; } @@ -937,30 +943,32 @@ IR_DECLARE_EXPLICIT_TYPE_ID(Conv2dFusionTestDialect) IR_DEFINE_EXPLICIT_TYPE_ID(Conv2dFusionTestDialect) class Conv2dAddFusePattern - : public ir::OpRewritePattern { + : public pir::OpRewritePattern { public: - using ir::OpRewritePattern::OpRewritePattern; + using pir::OpRewritePattern::OpRewritePattern; bool MatchAndRewrite( paddle::dialect::AddOp op, - ir::PatternRewriter &rewriter) const override { // NOLINT + pir::PatternRewriter &rewriter) const override { // NOLINT // The next op should be add. paddle::dialect::Conv2dOp conv2d_op = - ir::GetDefiningOpForInput(op, 0)->dyn_cast(); + pir::GetDefiningOpForInput(op, 0) + ->dyn_cast(); if (!conv2d_op) return false; - ir::OpResult conv2d_out = conv2d_op.out(); + pir::OpResult conv2d_out = conv2d_op.out(); if (!conv2d_out.HasOneUse()) return false; - ir::Value conv2d_filter = conv2d_op.filter(); + pir::Value conv2d_filter = conv2d_op.filter(); - ir::OpResult conv2d_filter_result = conv2d_filter.dyn_cast(); + pir::OpResult conv2d_filter_result = + conv2d_filter.dyn_cast(); IR_ENFORCE(conv2d_filter_result); - ir::Value add_input = op.x(); + pir::Value add_input = op.x(); IR_ENFORCE(add_input == conv2d_out); - ir::Value y = op.y(); - ir::OpResult bias = y.dyn_cast(); + pir::Value y = op.y(); + pir::OpResult bias = y.dyn_cast(); auto conv2d_attributes = conv2d_op.attributes(); std::vector conv2d_fusion_attrStr = {"strides", "paddings_t", @@ -972,7 +980,7 @@ class Conv2dAddFusePattern "exhaustive_search", "channels", "user_workspace_size"}; - std::vector con2d_fusing_attr = { + std::vector con2d_fusing_attr = { conv2d_attributes.at("strides"), conv2d_attributes.at("paddings"), conv2d_attributes.at("padding_algorithm"), @@ -981,32 +989,32 @@ class Conv2dAddFusePattern conv2d_attributes.at("data_format"), rewriter.str_attr("identity"), rewriter.bool_attr(true), - rewriter.array_attr(std::vector{}), + rewriter.array_attr(std::vector{}), rewriter.int32_attr(0)}; - ir::AttributeMap conv2d_fusion_attributes; + pir::AttributeMap conv2d_fusion_attributes; for (size_t i = 0; i < conv2d_fusion_attrStr.size(); ++i) { conv2d_fusion_attributes[conv2d_fusion_attrStr[i]] = con2d_fusing_attr[i]; } - ir::OpResult tmpResidual; + pir::OpResult tmpResidual; auto conv2d_fuse_op = rewriter.Build( - ir::GetDefiningOpForInput(conv2d_op, 0)->result(0), + pir::GetDefiningOpForInput(conv2d_op, 0)->result(0), conv2d_filter_result, bias, tmpResidual, conv2d_fusion_attributes); - rewriter.ReplaceOp(op, std::vector{conv2d_fuse_op.output()}); + rewriter.ReplaceOp(op, std::vector{conv2d_fuse_op.output()}); return true; } }; -class TestPass : public ir::Pass { +class TestPass : public pir::Pass { public: - TestPass() : ir::Pass("TestPass", 1) {} + TestPass() : pir::Pass("TestPass", 1) {} - bool Initialize(ir::IrContext *context) override { - ir::RewritePatternSet ps(context); + bool Initialize(pir::IrContext *context) override { + pir::RewritePatternSet ps(context); ps.Add(context); auto conv_bn_pattern = std::make_unique( context, @@ -1024,26 +1032,26 @@ class TestPass : public ir::Pass { LOG(INFO) << "--- " << op_info.name(); } ps.Add(std::move(conv_bn_pattern)); - patterns_ = ir::FrozenRewritePatternSet(std::move(ps)); + patterns_ = pir::FrozenRewritePatternSet(std::move(ps)); return true; } - void Run(ir::Operation *op) override { - ir::GreedyRewriteConfig cfg; + void Run(pir::Operation *op) override { + pir::GreedyRewriteConfig cfg; cfg.use_top_down_traversal = true; cfg.max_iterations = 10; - ir::ApplyPatternsGreedily(op->region(0), patterns_, cfg); + pir::ApplyPatternsGreedily(op->region(0), patterns_, cfg); } - bool CanApplyOn(ir::Operation *op) const override { + bool CanApplyOn(pir::Operation *op) const override { return op->name() == "builtin.module" && op->num_regions() > 0; } private: - ir::FrozenRewritePatternSet patterns_; + pir::FrozenRewritePatternSet patterns_; }; -void BuildProgram(ir::Builder &builder) { // NOLINT +void BuildProgram(pir::Builder &builder) { // NOLINT paddle::dialect::FullOp full_input_op = builder.Build(std::vector{4, 3, 16, 16}, 1.5, @@ -1102,28 +1110,28 @@ void BuildProgram(ir::Builder &builder) { // NOLINT // TODO(wilber): Add a normal test. TEST(pattern_rewrite, Patterns) { - ir::IrContext *ctx = ir::IrContext::Instance(); + pir::IrContext *ctx = pir::IrContext::Instance(); auto *test_dialect = ctx->GetOrRegisterDialect(); test_dialect->RegisterOp(); - ctx->GetOrRegisterDialect(); - ir::Program program(ctx); - ir::Builder builder = ir::Builder(ctx, program.block()); + ctx->GetOrRegisterDialect(); + pir::Program program(ctx); + pir::Builder builder = pir::Builder(ctx, program.block()); BuildProgram(builder); EXPECT_EQ(program.block()->size(), 11u); - ir::PassManager pm(ctx); + pir::PassManager pm(ctx); pm.AddPass(std::make_unique()); // pm.AddPass(ir::CreateConstantFoldingPass()); - pm.AddPass(ir::CreateDeadCodeEliminationPass()); - pm.AddPass(ir::CreateReorderBlockOpsPass()); + pm.AddPass(pir::CreateDeadCodeEliminationPass()); + pm.AddPass(pir::CreateReorderBlockOpsPass()); pm.EnablePassTiming(); pm.EnableIRPrinting(); - // pm.EnableIRPrinting(std::make_unique( - // [](ir::Pass *pass, ir::Operation *op) { + // pm.EnableIRPrinting(std::make_unique( + // [](pir::Pass *pass, pir::Operation *op) { // return pass->name() == "ConstantFoldingPass"; // }, - // [](ir::Pass *pass, ir::Operation *op) { + // [](pir::Pass *pass, pir::Operation *op) { // return pass->name() == "ConstantFoldingPass"; // }, // true, diff --git a/test/cpp/ir/shape_dialect/CMakeLists.txt b/test/cpp/pir/shape_dialect/CMakeLists.txt similarity index 77% rename from test/cpp/ir/shape_dialect/CMakeLists.txt rename to test/cpp/pir/shape_dialect/CMakeLists.txt index ae3e3d63d52bd..73c635713f99d 100644 --- a/test/cpp/ir/shape_dialect/CMakeLists.txt +++ b/test/cpp/pir/shape_dialect/CMakeLists.txt @@ -3,6 +3,6 @@ cc_test_old( SRCS symbolic_op_test.cc DEPS - pd_dialect - ir + pd_op_dialect + pir gtest) diff --git a/test/cpp/pir/shape_dialect/symbolic_op_test.cc b/test/cpp/pir/shape_dialect/symbolic_op_test.cc new file mode 100644 index 0000000000000..f916650376fbe --- /dev/null +++ b/test/cpp/pir/shape_dialect/symbolic_op_test.cc @@ -0,0 +1,371 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/builtin_type.h" +#include "paddle/pir/core/dialect.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/dialect/shape/ir/shape_dialect.h" +#include "paddle/pir/dialect/shape/ir/shape_op.h" +#include "paddle/pir/dialect/shape/utils/shape_utils.h" + +TEST(assist_struct_test, symbolic_dim) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Program program(ctx); + ctx->GetOrRegisterDialect(); + pir::Builder builder = pir::Builder(ctx, program.block()); + pir::dialect::SymbolicDim symDim = builder.Build( + "S0", 10, false, false, false, false); + pir::dialect::SymbolicDim symDim_ = builder.Build( + "S1", 10, false, false, false, false); + EXPECT_EQ(symDim.getValue(), 10); + EXPECT_EQ(symDim.getSymName(), "S0"); + EXPECT_FALSE(symDim.getKnownNegativeOne()); + EXPECT_FALSE(symDim.getKnownNonSizeOne()); + EXPECT_FALSE(symDim.getKnownNonSizeZero()); + EXPECT_FALSE(symDim.getKnownNonNegative()); + + EXPECT_FALSE(symDim.isDynamic()); + EXPECT_TRUE(symDim.merge(symDim_)); + + symDim.updateValue(20); + symDim.updateSymName("S2"); + symDim.updateKnownNegativeOne(true); + symDim.updateKnownNonSizeOne(true); + symDim.updateKnownNonSizeZero(true); + symDim.updateKnownNonNegative(true); + + EXPECT_FALSE(symDim.merge(symDim_)); + + EXPECT_EQ(symDim.getValue(), 20); + EXPECT_EQ(symDim.getSymName(), "S2"); + EXPECT_TRUE(symDim.getKnownNegativeOne()); + EXPECT_TRUE(symDim.getKnownNonSizeOne()); + EXPECT_TRUE(symDim.getKnownNonSizeZero()); + EXPECT_TRUE(symDim.getKnownNonNegative()); +} + +TEST(assist_struct_test, symbolic_dim_product) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Program program(ctx); + ctx->GetOrRegisterDialect(); + pir::Builder builder = pir::Builder(ctx, program.block()); + pir::dialect::SymbolicDim symDim = builder.Build( + "S0", -100000, false, false, false, false); + pir::SymbolicDimProduct symDimProduct; + pir::SymbolicDimProduct symDimProduct_; + symDimProduct.symbols.push_back(symDim); + symDimProduct.factor *= 10; + EXPECT_EQ(symDimProduct.factor, 10); + EXPECT_NE(symDimProduct, symDimProduct_); + EXPECT_FALSE(symDimProduct.empty()); +} + +TEST(assist_struct_test, symbolic_dim_table) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Program program(ctx); + ctx->GetOrRegisterDialect(); + pir::Builder builder = pir::Builder(ctx, program.block()); + pir::dialect::SymbolicDim symDim = builder.Build( + "S0", 10, false, false, false, false); + + pir::SymbolTable symbolTable(program.module_op()); + EXPECT_EQ(symbolTable.insert(symDim), "S0"); + EXPECT_EQ(symbolTable.lookup("S0"), symDim); + EXPECT_EQ(symbolTable.getOp(), program.module_op()); + EXPECT_FALSE(symbolTable.lookup("S1")); +} + +TEST(assist_struct_test, symbolic_dim_mgr_simple) { + /******************************************************/ + /* Mgr simple version, only SymbolicDim related func. */ + /******************************************************/ + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Program program(ctx); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); + + pir::SymbolicDimMgr symDimMgr(program.module_op()); + pir::dialect::SymbolicDim symDimS0 = symDimMgr.newSymbolicDim(); + pir::dialect::SymbolicDim symDimS1 = symDimMgr.newSymbolicDim(); + pir::dialect::SymbolicDim symDimC10 = symDimMgr.newConstantSymbolicDim(10); + symDimMgr.mapSymbolicDimEqual(symDimS0, symDimS1); + + pir::Attribute attr_value = pir::StrAttribute::get(ctx, "op_attr"); + pir::AttributeMap attr_map; + attr_map.insert(std::pair("op", attr_value)); + std::vector op_inputs = {}; + + pir::Type fp32_dtype = pir::Float32Type::get(ctx); + phi::DDim dims = {-100000, 2}; + phi::DataLayout data_layout = phi::DataLayout::NCHW; + phi::LoD lod = {{0, 1, 2}}; + size_t offset = 0; + std::vector op_output_types = { + paddle::dialect::DenseTensorType::get( + ctx, fp32_dtype, dims, data_layout, lod, offset)}; + pir::Operation *op = pir::Operation::Create( + op_inputs, attr_map, op_output_types, pir::OpInfo()); + pir::Value res = op->result(0); + + std::vector symDimVec = + symDimMgr.createSymbolicDimsForRankedValue(res); + + EXPECT_EQ(symDimS0.getSymName(), "S0"); + EXPECT_EQ(symDimS1.getSymName(), "S1"); + EXPECT_EQ(symDimS1.getValue(), -100000); + EXPECT_EQ(symDimC10.getSymName(), "C10"); + EXPECT_EQ(symDimC10.getValue(), 10); + EXPECT_EQ(symDimVec[0].getSymName(), "S2"); + EXPECT_EQ(symDimVec[1].getSymName(), "C2"); + EXPECT_EQ(symDimMgr.symbolTable().lookup("S0"), + symDimS0); + EXPECT_EQ(symDimMgr.symbolTable().lookup("C10"), + symDimC10); + EXPECT_EQ(symDimMgr.getRootSymbolicDim(symDimS1), symDimS0); + EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS0, symDimS1)); + EXPECT_FALSE(symDimMgr.isSymbolicDimEqual(symDimS0, symDimC10)); +} + +TEST(assist_struct_test, symbolic_dim_mgr_complex) { + /***************************************************************/ + /* Mgr with constraintOp, and SymbolicDimProduct related func. */ + /***************************************************************/ + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Program program(ctx); + ctx->GetOrRegisterDialect(); + ctx->GetOrRegisterDialect(); + pir::Builder builder = pir::Builder(ctx, program.block()); + + pir::dialect::SymbolicDim symDimS0 = builder.Build( + "S0", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS1 = builder.Build( + "S1", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS2 = builder.Build( + "S2", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS3 = builder.Build( + "S3", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS4 = builder.Build( + "S4", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS5 = builder.Build( + "S5", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS6 = builder.Build( + "S6", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS7 = builder.Build( + "S7", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS8 = builder.Build( + "S8", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS9 = builder.Build( + "S9", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS10 = + builder.Build( + "S10", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS11 = + builder.Build( + "S11", -100000, false, false, true, true); + pir::dialect::SymbolicDim symDimS12 = + builder.Build( + "S12", -100000, false, false, true, false); + pir::dialect::SymbolicDim symDimC10 = + builder.Build( + "C10", 10, true, false, true, true); + pir::dialect::SymbolicDim symDimC20 = + builder.Build( + "C20", 20, true, false, true, true); + + pir::OpResult dimOpS0 = builder.Build("S0").out(); + pir::OpResult dimOpS1 = builder.Build("S1").out(); + pir::OpResult dimOpS2 = builder.Build("S2").out(); + pir::OpResult dimOpS3 = builder.Build("S3").out(); + pir::OpResult dimOpS4 = builder.Build("S4").out(); + pir::OpResult dimOpS5 = builder.Build("S5").out(); + pir::OpResult dimOpS6 = builder.Build("S6").out(); + pir::OpResult dimOpS7 = builder.Build("S7").out(); + pir::OpResult dimOpS8 = builder.Build("S8").out(); + pir::OpResult dimOpS9 = builder.Build("S9").out(); + pir::OpResult dimOpS10 = builder.Build("S10").out(); + pir::OpResult dimOpS11 = builder.Build("S11").out(); + pir::OpResult dimOpC10 = builder.Build("C10").out(); + pir::OpResult dimOpC20 = builder.Build("C20").out(); + pir::OpResult constant = + builder + .Build(pir::Int32Attribute::get(ctx, 2), + pir::Int32Type::get(ctx)) + ->result(0); + + // Mark S1 == S2. + builder.Build( + 2, 2, std::vector{constant, dimOpS1, dimOpS2, constant}); + // Mark S0 * S1 == S2 * S3, For check S0 == S3. + builder.Build( + 2, 2, std::vector{dimOpS0, dimOpS1, dimOpS2, dimOpS3}); + // Mark S4 * S0 * S1 == S2 * S3 * S5, For check S4 == S5. + builder.Build( + 3, + 3, + std::vector{ + dimOpS4, dimOpS0, dimOpS1, dimOpS2, dimOpS3, dimOpS5}); + // For check S6 == C10 * C20. + builder.Build( + 1, 2, std::vector{dimOpS6, dimOpC10, dimOpC20}); + // Mark C10 * S0 * S1 == S2 * S3 * S7, for check C10 == S7. + builder.Build( + 3, + 3, + std::vector{ + dimOpC10, dimOpS0, dimOpS1, dimOpS2, dimOpS3, dimOpS7}); + + // Mark S8 * S9 == S10 * S11, for unsimplify product case + builder.Build( + 2, 2, std::vector{dimOpS8, dimOpS9, dimOpS10, dimOpS11}); + + pir::SymbolicDimMgr symDimMgr(program.module_op()); + + symDimMgr.load(); + + // For check indirect equality: S1 * S4 == S2 * S5 + pir::SymbolicDimProduct symDimProductLhs; + pir::SymbolicDimProduct symDimProductRhs; + + symDimProductLhs.symbols.push_back(symDimS1); + symDimProductLhs.symbols.push_back(symDimS4); + + symDimProductRhs.symbols.push_back(symDimS2); + symDimProductRhs.symbols.push_back(symDimS5); + + // For uncompletely simplied product check: S8 * S9 * S12 == S10 * S11 * S12 + pir::SymbolicDimProduct symDimProductLhs_; + pir::SymbolicDimProduct symDimProductRhs_; + + symDimProductLhs_.symbols.push_back(symDimS8); + symDimProductLhs_.symbols.push_back(symDimS9); + symDimProductLhs_.symbols.push_back(symDimS12); + + symDimProductRhs_.symbols.push_back(symDimS10); + symDimProductRhs_.symbols.push_back(symDimS11); + symDimProductRhs_.symbols.push_back(symDimS12); + + // For check simplifySymbolicDimProduct, {factor = 1, Sym = {S7}} => {factor = + // 10} + pir::SymbolicDimProduct symDimProductS7; + symDimProductS7.symbols.push_back(symDimS7); + pir::SymbolicDimProduct simplifiedProductS7 = + symDimMgr.simplifySymbolicDimProduct(symDimProductS7); + + // For check simplifySymbolicDimProductPair, X * Y * Y, Y * Y * Z => X, Z + pir::SymbolicDimProduct symDimProductPairLhs; + pir::SymbolicDimProduct symDimProductPairRhs; + pir::SymbolicDimProduct newLhs, newRhs; + symDimProductPairLhs.symbols.push_back(symDimS4); + symDimProductPairLhs.symbols.push_back(symDimS1); + symDimProductPairLhs.symbols.push_back(symDimS2); + symDimProductPairRhs.symbols.push_back(symDimS1); + symDimProductPairRhs.symbols.push_back(symDimS2); + symDimProductPairRhs.symbols.push_back(symDimS3); + + std::tie(newLhs, newRhs) = symDimMgr.simplifySymbolicDimProductPair( + symDimProductPairLhs, symDimProductPairRhs); + + // For check symbolicDimProductDivide, {S4 * S1 * C20} / {S1 * C10} => {factor + // = 2 Sym = {S4}} + pir::SymbolicDimProduct symDimProductDivLhs; + pir::SymbolicDimProduct symDimProductDivRhs; + symDimProductDivLhs.symbols.push_back(symDimS4); + symDimProductDivLhs.symbols.push_back(symDimS1); + symDimProductDivLhs.symbols.push_back(symDimC20); + symDimProductDivRhs.symbols.push_back(symDimS1); + symDimProductDivRhs.symbols.push_back(symDimC10); + + pir::SymbolicDimProduct *divRes = symDimMgr.symbolicDimProductDivide( + symDimProductDivLhs, symDimProductDivRhs); + + EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS1, symDimS2)); + EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS0, symDimS3)); + EXPECT_TRUE(symDimMgr.isSymbolicDimEqual(symDimS4, symDimS5)); + EXPECT_EQ(symDimS6.getValue(), 200); + EXPECT_EQ(symDimMgr.symbolTable().lookup("C20"), + symDimC20); + EXPECT_EQ(symDimS7.getValue(), symDimC10.getValue()); + EXPECT_EQ(simplifiedProductS7.factor, 10); + EXPECT_EQ(simplifiedProductS7.symbols.size(), static_cast(0)); + EXPECT_EQ(newLhs.symbols.size(), static_cast(1)); + EXPECT_EQ(newRhs.symbols.size(), static_cast(1)); + EXPECT_EQ(newLhs.symbols[0], symDimMgr.getRootSymbolicDim(symDimS4)); + EXPECT_EQ(newRhs.symbols[0], symDimMgr.getRootSymbolicDim(symDimS3)); + EXPECT_EQ(divRes->factor, 2); + EXPECT_EQ(divRes->symbols.size(), static_cast(1)); + EXPECT_EQ(divRes->symbols[0], symDimMgr.getRootSymbolicDim(symDimS4)); + EXPECT_TRUE( + symDimMgr.isSymbolicDimProductEqual(symDimProductLhs, symDimProductRhs)); + EXPECT_TRUE(symDimMgr.isSymbolicDimProductEqual(symDimProductLhs_, + symDimProductRhs_)); +} + +TEST(assist_struct_test, dim) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Program program(ctx); + ctx->GetOrRegisterDialect(); + pir::Builder builder = pir::Builder(ctx, program.block()); + + pir::dialect::DimOp dimOp = builder.Build("S0"); + pir::OpResult res = dimOp.out(); + EXPECT_EQ(dimOp.getName(), "S0"); + dimOp.setName("S1"); + EXPECT_EQ(dimOp.getName(), "S1"); + EXPECT_EQ(res.GetDefiningOp(), dimOp.operation()); + EXPECT_EQ(res.type(), pir::IndexType::get(ctx)); +} + +TEST(assist_struct_test, tie_product_equal) { + pir::IrContext *ctx = pir::IrContext::Instance(); + pir::Program program(ctx); + ctx->GetOrRegisterDialect(); + pir::Builder builder = pir::Builder(ctx, program.block()); + pir::SymbolTable symbolTable(program.module_op()); + + pir::OpResult dimOp0 = builder.Build("S0").out(); + pir::OpResult dimOp1 = builder.Build("S1").out(); + pir::OpResult dimOp2 = builder.Build("S2").out(); + pir::OpResult dimOp3 = builder.Build("S3").out(); + pir::OpResult dimOp4 = builder.Build("S4").out(); + + pir::dialect::TieProductEqualOp tie_product_equal = + builder.Build( + 2, + 3, + std::vector{dimOp0, dimOp1, dimOp2, dimOp3, dimOp4}); + + std::vector lhs = tie_product_equal.getLhs(); + std::vector rhs = tie_product_equal.getRhs(); + + std::vector lhs_ref{dimOp0, dimOp1}; + std::vector rhs_ref{dimOp2, dimOp3, dimOp4}; + + EXPECT_EQ(symbolTable.insert(tie_product_equal), "tie_product_equal"); + EXPECT_EQ( + symbolTable.lookup("tie_product_equal") + .size(), + static_cast(1)); + EXPECT_EQ(symbolTable.lookup( + "tie_product_equal")[0], + tie_product_equal); + EXPECT_EQ(lhs, lhs_ref); + EXPECT_EQ(rhs, rhs_ref); +} diff --git a/test/cpp/ir/tools/CMakeLists.txt b/test/cpp/pir/tools/CMakeLists.txt similarity index 83% rename from test/cpp/ir/tools/CMakeLists.txt rename to test/cpp/pir/tools/CMakeLists.txt index 58179d87e0d88..64e5b97243620 100644 --- a/test/cpp/ir/tools/CMakeLists.txt +++ b/test/cpp/pir/tools/CMakeLists.txt @@ -1,4 +1,4 @@ cc_library( test_dialect SRCS test_dialect.cc test_op.cc - DEPS ir) + DEPS pir) diff --git a/test/cpp/ir/tools/test_dialect.cc b/test/cpp/pir/tools/test_dialect.cc similarity index 90% rename from test/cpp/ir/tools/test_dialect.cc rename to test/cpp/pir/tools/test_dialect.cc index c16b9be067663..bf94e8db3dce1 100644 --- a/test/cpp/ir/tools/test_dialect.cc +++ b/test/cpp/pir/tools/test_dialect.cc @@ -11,8 +11,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "test/cpp/ir/tools/test_dialect.h" -#include "test/cpp/ir/tools/test_op.h" +#include "test/cpp/pir/tools/test_dialect.h" +#include "test/cpp/pir/tools/test_op.h" namespace test { void TestDialect::initialize() { RegisterOps(); } } // namespace test diff --git a/test/cpp/ir/tools/test_dialect.h b/test/cpp/pir/tools/test_dialect.h similarity index 80% rename from test/cpp/ir/tools/test_dialect.h rename to test/cpp/pir/tools/test_dialect.h index 4403719458e4b..8b259c5563c4b 100644 --- a/test/cpp/ir/tools/test_dialect.h +++ b/test/cpp/pir/tools/test_dialect.h @@ -14,13 +14,13 @@ #pragma once -#include "paddle/ir/core/dialect.h" +#include "paddle/pir/core/dialect.h" namespace test { -class TestDialect : public ir::Dialect { +class TestDialect : public pir::Dialect { public: - explicit TestDialect(ir::IrContext *context) - : ir::Dialect(name(), context, ir::TypeId::get()) { + explicit TestDialect(pir::IrContext *context) + : pir::Dialect(name(), context, pir::TypeId::get()) { initialize(); } static const char *name() { return "test"; } diff --git a/test/cpp/ir/tools/test_op.cc b/test/cpp/pir/tools/test_op.cc similarity index 79% rename from test/cpp/ir/tools/test_op.cc rename to test/cpp/pir/tools/test_op.cc index 40dc46c0b8e14..9802f8827cf6f 100644 --- a/test/cpp/ir/tools/test_op.cc +++ b/test/cpp/pir/tools/test_op.cc @@ -11,10 +11,10 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "test/cpp/ir/tools/test_op.h" +#include "test/cpp/pir/tools/test_op.h" namespace test { -void RegionOp::Build(ir::Builder &builder, ir::OperationArgument &argument) { +void RegionOp::Build(pir::Builder &builder, pir::OperationArgument &argument) { argument.num_regions = 1; } void RegionOp::Verify() const { @@ -24,10 +24,10 @@ void RegionOp::Verify() const { num_regions); } -void BranchOp::Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, - const std::vector &target_operands, - ir::Block *target) { +void BranchOp::Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, + const std::vector &target_operands, + pir::Block *target) { argument.AddOperands(target_operands.begin(), target_operands.end()); argument.AddSuccessor(target); } diff --git a/test/cpp/ir/tools/test_op.h b/test/cpp/pir/tools/test_op.h similarity index 70% rename from test/cpp/ir/tools/test_op.h rename to test/cpp/pir/tools/test_op.h index 1462a9555cb07..9e0f9f1e933b2 100644 --- a/test/cpp/ir/tools/test_op.h +++ b/test/cpp/pir/tools/test_op.h @@ -14,37 +14,37 @@ #pragma once -#include "paddle/ir/core/builder.h" -#include "paddle/ir/core/op_base.h" +#include "paddle/pir/core/builder.h" +#include "paddle/pir/core/op_base.h" namespace test { /// /// \brief TestRegionOp /// -class RegionOp : public ir::Op { +class RegionOp : public pir::Op { public: using Op::Op; static const char *name() { return "test.region"; } static constexpr uint32_t attributes_num = 0; static constexpr const char **attributes_name = nullptr; - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument); // NOLINT + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument); // NOLINT void Verify() const; }; /// /// \brief TestBranchOp /// -class BranchOp : public ir::Op { +class BranchOp : public pir::Op { public: using Op::Op; static const char *name() { return "test.branch"; } static constexpr uint32_t attributes_num = 0; static constexpr const char **attributes_name = nullptr; - static void Build(ir::Builder &builder, // NOLINT - ir::OperationArgument &argument, // NOLINT - const std::vector &target_operands, - ir::Block *target); + static void Build(pir::Builder &builder, // NOLINT + pir::OperationArgument &argument, // NOLINT + const std::vector &target_operands, + pir::Block *target); void Verify() const; }; diff --git a/test/cpp/prim/CMakeLists.txt b/test/cpp/prim/CMakeLists.txt index e1ae6d843c96a..efe5f0a635597 100644 --- a/test/cpp/prim/CMakeLists.txt +++ b/test/cpp/prim/CMakeLists.txt @@ -68,5 +68,5 @@ if(NOT WIN32) cc_test( test_vjp_new_ir SRCS test_vjp.cc - DEPS phi_kernel_adaptor pd_dialect ir) + DEPS phi_kernel_adaptor pd_op_dialect pir) endif() diff --git a/test/cpp/prim/test_vjp.cc b/test/cpp/prim/test_vjp.cc index 496bb1de1891a..0a8ddc10a1617 100644 --- a/test/cpp/prim/test_vjp.cc +++ b/test/cpp/prim/test_vjp.cc @@ -16,19 +16,19 @@ #include "paddle/fluid/framework/new_executor/new_ir_interpreter.h" #include "paddle/fluid/framework/new_executor/standalone_executor.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/api_builder.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" -#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" -#include "paddle/fluid/ir/transforms/pd_op_to_kernel_pass.h" +#include "paddle/fluid/pir/dialect/operator/ir/api_builder.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_dialect.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_type.h" +#include "paddle/fluid/pir/dialect/operator/ir/pd_op.h" +#include "paddle/fluid/pir/dialect/operator/utils/utils.h" +#include "paddle/fluid/pir/transforms/pd_op_to_kernel_pass.h" #include "paddle/fluid/platform/init_phi.h" -#include "paddle/ir/core/block.h" -#include "paddle/ir/core/builtin_attribute.h" -#include "paddle/ir/core/builtin_op.h" -#include "paddle/ir/core/ir_context.h" -#include "paddle/ir/core/program.h" -#include "paddle/ir/core/utils.h" +#include "paddle/pir/core/block.h" +#include "paddle/pir/core/builtin_attribute.h" +#include "paddle/pir/core/builtin_op.h" +#include "paddle/pir/core/ir_context.h" +#include "paddle/pir/core/program.h" +#include "paddle/pir/core/utils.h" DECLARE_FILE_SYMBOLS(kernel_dialect); @@ -43,12 +43,12 @@ namespace paddle { namespace framework { TEST(VJP, TanhBackwardTest) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program((ctx)); paddle::dialect::APIBuilder::Instance().SetProgram(&program); - std::shared_ptr builder = + std::shared_ptr builder = paddle::dialect::APIBuilder::Instance().GetBuilder(); paddle::dialect::FullOp op1 = builder->Build( std::vector{1}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -59,9 +59,9 @@ TEST(VJP, TanhBackwardTest) { std::vector{1}, 2.0, phi::DataType::FLOAT32, phi::CPUPlace()); std::vector> stop_gradients{{false}}; - std::vector> out_grads{{op3.out()}}; + std::vector> out_grads{{op3.out()}}; - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd.tanh"); + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd_op.tanh"); auto tanh_vjp_interface_impl = op2_info.GetInterfaceImpl(); tanh_vjp_interface_impl->vjp_(op2.operation(), out_grads, stop_gradients); @@ -98,12 +98,12 @@ TEST(VJP, TanhBackwardTest) { } TEST(VJP, Tanh_BackwardTest) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program((ctx)); paddle::dialect::APIBuilder::Instance().SetProgram(&program); - std::shared_ptr builder = + std::shared_ptr builder = paddle::dialect::APIBuilder::Instance().GetBuilder(); paddle::dialect::FullOp op1 = builder->Build( std::vector{1}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -114,9 +114,9 @@ TEST(VJP, Tanh_BackwardTest) { std::vector{1}, 2.0, phi::DataType::FLOAT32, phi::CPUPlace()); std::vector> stop_gradients{{false}}; - std::vector> out_grads{{op3.out()}}; + std::vector> out_grads{{op3.out()}}; - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd.tanh_"); + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd_op.tanh_"); auto tanh_vjp_interface_impl = op2_info.GetInterfaceImpl(); tanh_vjp_interface_impl->vjp_(op2.operation(), out_grads, stop_gradients); @@ -153,12 +153,12 @@ TEST(VJP, Tanh_BackwardTest) { } TEST(VJP, MeanBackwardTest) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program((ctx)); paddle::dialect::APIBuilder::Instance().SetProgram(&program); - std::shared_ptr builder = + std::shared_ptr builder = paddle::dialect::APIBuilder::Instance().GetBuilder(); paddle::dialect::FullOp op1 = builder->Build( std::vector{2, 2}, 2.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -169,9 +169,9 @@ TEST(VJP, MeanBackwardTest) { std::vector{}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); std::vector> stop_gradients{{false}}; - std::vector> out_grads{{op3.out()}}; + std::vector> out_grads{{op3.out()}}; - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd.mean"); + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd_op.mean"); auto mean_vjp_interface_impl = op2_info.GetInterfaceImpl(); mean_vjp_interface_impl->vjp_(op2.operation(), out_grads, stop_gradients); @@ -210,25 +210,25 @@ TEST(VJP, MeanBackwardTest) { } TEST(VJP, ConcatBackwardTest) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program((ctx)); paddle::dialect::APIBuilder::Instance().SetProgram(&program); - std::shared_ptr builder = + std::shared_ptr builder = paddle::dialect::APIBuilder::Instance().GetBuilder(); paddle::dialect::FullOp op1 = builder->Build( std::vector{1, 2}, 2.0, phi::DataType::FLOAT32, phi::CPUPlace()); - std::vector combine_input{{op1.out(), op1.out()}}; - ir::CombineOp op2 = builder->Build(combine_input); + std::vector combine_input{{op1.out(), op1.out()}}; + pir::CombineOp op2 = builder->Build(combine_input); paddle::dialect::ConcatOp op3 = builder->Build(op2.out(), 0); paddle::dialect::FullOp op4 = builder->Build( std::vector{2, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); std::vector> stop_gradients{{false, false}}; - std::vector> out_grads{{op4.out()}}; - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd.concat"); + std::vector> out_grads{{op4.out()}}; + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd_op.concat"); auto concat_vjp_interface_impl = op2_info.GetInterfaceImpl(); concat_vjp_interface_impl->vjp_(op3.operation(), out_grads, stop_gradients); @@ -273,12 +273,12 @@ TEST(VJP, ConcatBackwardTest) { } TEST(VJP, AddBackwardTest) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program((ctx)); paddle::dialect::APIBuilder::Instance().SetProgram(&program); - std::shared_ptr builder = + std::shared_ptr builder = paddle::dialect::APIBuilder::Instance().GetBuilder(); paddle::dialect::FullOp op1 = builder->Build( std::vector{1}, 2.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -291,9 +291,9 @@ TEST(VJP, AddBackwardTest) { std::vector{1}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); std::vector> stop_gradients{{false}, {false}}; - std::vector> out_grads{{op4.out()}}; + std::vector> out_grads{{op4.out()}}; - ir::OpInfo op3_info = ctx->GetRegisteredOpInfo("pd.add"); + pir::OpInfo op3_info = ctx->GetRegisteredOpInfo("pd_op.add"); auto add_vjp_interface_impl = op3_info.GetInterfaceImpl(); add_vjp_interface_impl->vjp_(op3.operation(), out_grads, stop_gradients); @@ -338,12 +338,12 @@ TEST(VJP, AddBackwardTest) { } TEST(VJP, Add_BackwardTest) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ctx->GetOrRegisterDialect(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + ctx->GetOrRegisterDialect(); + pir::Program program((ctx)); paddle::dialect::APIBuilder::Instance().SetProgram(&program); - std::shared_ptr builder = + std::shared_ptr builder = paddle::dialect::APIBuilder::Instance().GetBuilder(); paddle::dialect::FullOp op1 = builder->Build( std::vector{1}, 2.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -356,9 +356,9 @@ TEST(VJP, Add_BackwardTest) { std::vector{1}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); std::vector> stop_gradients{{false}, {false}}; - std::vector> out_grads{{op4.out()}}; + std::vector> out_grads{{op4.out()}}; - ir::OpInfo op3_info = ctx->GetRegisteredOpInfo("pd.add_"); + pir::OpInfo op3_info = ctx->GetRegisteredOpInfo("pd_op.add_"); auto add_inplace_vjp_interface_impl = op3_info.GetInterfaceImpl(); add_inplace_vjp_interface_impl->vjp_( @@ -404,11 +404,11 @@ TEST(VJP, Add_BackwardTest) { } TEST(VJP, SplitBackwardTest) { - ir::IrContext* ctx = ir::IrContext::Instance(); - ir::Program program((ctx)); + pir::IrContext* ctx = pir::IrContext::Instance(); + pir::Program program((ctx)); paddle::dialect::APIBuilder::Instance().SetProgram(&program); - std::shared_ptr builder = + std::shared_ptr builder = paddle::dialect::APIBuilder::Instance().GetBuilder(); paddle::dialect::FullOp op1 = builder->Build( std::vector{2, 2}, 2.0, phi::DataType::FLOAT32, phi::CPUPlace()); @@ -416,14 +416,14 @@ TEST(VJP, SplitBackwardTest) { paddle::dialect::SplitOp op2 = builder->Build( op1.out(), std::vector{1, 1}, 0); - ir::SplitOp op3 = builder->Build(op2.out()); + pir::SplitOp op3 = builder->Build(op2.out()); paddle::dialect::FullOp op4 = builder->Build( std::vector{1, 2}, 1.0, phi::DataType::FLOAT32, phi::CPUPlace()); std::vector> stop_gradients{{false}, {true}, {true}}; - std::vector> out_grads{{op3.result(0), op4.out()}}; - ir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd.split"); + std::vector> out_grads{{op3.result(0), op4.out()}}; + pir::OpInfo op2_info = ctx->GetRegisteredOpInfo("pd_op.split"); auto concat_vjp_interface_impl = op2_info.GetInterfaceImpl(); diff --git a/test/ir/new_ir/test_build_op.py b/test/ir/new_ir/test_build_op.py index 16bc1adb0628e..1e07d0afb29d3 100644 --- a/test/ir/new_ir/test_build_op.py +++ b/test/ir/new_ir/test_build_op.py @@ -43,14 +43,14 @@ def test_build_mean_op(self): paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True}) with paddle.ir.core.program_guard(newir_program): out = paddle.mean(tanh_out) - self.assertEqual(out.get_defining_op().name(), "pd.mean") + self.assertEqual(out.get_defining_op().name(), "pd_op.mean") self.assertEqual( out.get_defining_op() .operands()[0] .source() .get_defining_op() .name(), - "pd.tanh", + "pd_op.tanh", ) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False}) @@ -64,7 +64,7 @@ def test_build_add_n_op(self): out1 = paddle.mean(tanh_out) out2 = paddle.mean(tanh_out) out = paddle.add_n([out1, out2]) - self.assertEqual(out.get_defining_op().name(), "pd.add_n") + self.assertEqual(out.get_defining_op().name(), "pd_op.add_n") self.assertEqual( out.get_defining_op() .operands()[0] @@ -97,7 +97,7 @@ def test_insertion_point(self): print(newir_program) self.assertEqual( - tanh_operand.source().get_defining_op().name(), "pd.mean" + tanh_operand.source().get_defining_op().name(), "pd_op.mean" ) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False}) @@ -109,7 +109,7 @@ def test_build_concat_op(self): paddle.framework.set_flags({"FLAGS_enable_new_ir_api": True}) with paddle.ir.core.program_guard(newir_program): out = paddle.concat([tanh_out, tanh_out], 0) - self.assertEqual(out.get_defining_op().name(), "pd.concat") + self.assertEqual(out.get_defining_op().name(), "pd_op.concat") self.assertEqual( out.get_defining_op() .operands()[0] @@ -136,7 +136,7 @@ def test_build_split_op(self): .source() .get_defining_op() .name(), - "pd.split", + "pd_op.split", ) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False}) diff --git a/test/ir/new_ir/test_ir_backward.py b/test/ir/new_ir/test_ir_backward.py index 70eabedc41b89..c551ef4111dda 100644 --- a/test/ir/new_ir/test_ir_backward.py +++ b/test/ir/new_ir/test_ir_backward.py @@ -46,15 +46,17 @@ def test_grad(self): out2 = paddle.mean(tanh_out) input_grad = grad(out, input, out2) - self.assertEqual(out.get_defining_op().name(), "pd.mean") - self.assertEqual(input_grad[0].get_defining_op().name(), "pd.tanh_grad") + self.assertEqual(out.get_defining_op().name(), "pd_op.mean") + self.assertEqual( + input_grad[0].get_defining_op().name(), "pd_op.tanh_grad" + ) self.assertEqual( out.get_defining_op() .operands()[0] .source() .get_defining_op() .name(), - "pd.tanh", + "pd_op.tanh", ) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False}) @@ -68,8 +70,10 @@ def test_full(self): out = paddle.mean(tanh_out) input_grad = grad(out, input) - self.assertEqual(newir_program.block().ops[-3].name(), "pd.full") - self.assertEqual(input_grad[0].get_defining_op().name(), "pd.tanh_grad") + self.assertEqual(newir_program.block().ops[-3].name(), "pd_op.full") + self.assertEqual( + input_grad[0].get_defining_op().name(), "pd_op.tanh_grad" + ) self.assertEqual( input_grad[0] .get_defining_op() @@ -77,7 +81,7 @@ def test_full(self): .source() .get_defining_op() .name(), - "pd.mean_grad", + "pd_op.mean_grad", ) paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False}) @@ -91,7 +95,7 @@ def test_no_grad_set(self): out = paddle.mean(tanh_out) input_grad = grad(out, input, no_grad_vars=[input]) - self.assertEqual(newir_program.block().ops[-1].name(), "pd.mean") + self.assertEqual(newir_program.block().ops[-1].name(), "pd_op.mean") paddle.framework.set_flags({"FLAGS_enable_new_ir_api": False}) def test_split(self): @@ -105,16 +109,16 @@ def test_split(self): input_grad = grad(out, input) ops_name = [ - "pd.data", - "pd.tanh", - "pd.full_int_array", - "pd.full", - "pd.split", + "pd_op.data", + "pd_op.tanh", + "pd_op.full_int_array", + "pd_op.full", + "pd_op.split", "builtin.split", - "pd.full", + "pd_op.full", "builtin.combine", - "pd.concat", - "pd.tanh_grad", + "pd_op.concat", + "pd_op.tanh_grad", ] for i, op in enumerate(newir_program.block().ops): self.assertEqual(op.name(), ops_name[i]) @@ -151,7 +155,7 @@ def test_add_n(self): out = paddle.mean(add_out) input_grad = grad(out, input_x) - self.assertEqual(newir_program.block().ops[-1].name(), "pd.add_n") + self.assertEqual(newir_program.block().ops[-1].name(), "pd_op.add_n") self.assertEqual( newir_program.block().ops[-2].name(), "builtin.combine" ) @@ -168,25 +172,25 @@ def test_concat(self): input_grad = grad(out, input_x) ops_name = [ - "pd.data", - "pd.data", - "pd.tanh", - "pd.tanh", - "pd.add", + "pd_op.data", + "pd_op.data", + "pd_op.tanh", + "pd_op.tanh", + "pd_op.add", "builtin.combine", - "pd.full", - "pd.concat", - "pd.full", + "pd_op.full", + "pd_op.concat", + "pd_op.full", "builtin.combine", - "pd.concat_grad", + "pd_op.concat_grad", "builtin.split", "builtin.combine", - "pd.add_n", - "pd.add_grad", - "pd.tanh_grad", - "pd.tanh_grad", + "pd_op.add_n", + "pd_op.add_grad", + "pd_op.tanh_grad", + "pd_op.tanh_grad", "builtin.combine", - "pd.add_n", + "pd_op.add_n", ] for i, op in enumerate(newir_program.block().ops): self.assertEqual(op.name(), ops_name[i]) diff --git a/test/ir/new_ir/test_ir_pybind.py b/test/ir/new_ir/test_ir_pybind.py index 63e9703cedf9f..d177d4a261315 100644 --- a/test/ir/new_ir/test_ir_pybind.py +++ b/test/ir/new_ir/test_ir_pybind.py @@ -81,10 +81,10 @@ def test_value(self): ) self.assertEqual(matmul_op.result(0).shape, [4, 4]) self.assertEqual( - matmul_op.results()[0].get_defining_op().name(), "pd.matmul" + matmul_op.results()[0].get_defining_op().name(), "pd_op.matmul" ) self.assertEqual( - matmul_op.result(0).get_defining_op().name(), "pd.matmul" + matmul_op.result(0).get_defining_op().name(), "pd_op.matmul" ) matmul_op.result(0).stop_gradient = True self.assertEqual(matmul_op.result(0).stop_gradient, True) @@ -111,12 +111,13 @@ def test_value(self): self.assertEqual(add_op.operands()[0].source(), matmul_op.results()[0]) self.assertEqual( - tanh_op.operands()[0].source().get_defining_op().name(), "pd.add" + tanh_op.operands()[0].source().get_defining_op().name(), "pd_op.add" ) add_op.replace_all_uses_with(matmul_op.results()) self.assertEqual( - tanh_op.operands()[0].source().get_defining_op().name(), "pd.matmul" + tanh_op.operands()[0].source().get_defining_op().name(), + "pd_op.matmul", ) self.assertEqual(add_op.result(0).use_empty(), True) diff --git a/test/ir/new_ir/test_ir_vjp.py b/test/ir/new_ir/test_ir_vjp.py index c770153738d2b..0169d269bba6e 100644 --- a/test/ir/new_ir/test_ir_vjp.py +++ b/test/ir/new_ir/test_ir_vjp.py @@ -45,7 +45,7 @@ def test_tanh_vjp1(self): with paddle.ir.core.program_guard(newir_program): grad_outs = call_vjp(tanh_op, out_grads, stop_gradients) self.assertEqual( - grad_outs[0][0].get_defining_op().name(), "pd.tanh_grad" + grad_outs[0][0].get_defining_op().name(), "pd_op.tanh_grad" ) self.assertEqual( grad_outs[0][0] @@ -54,7 +54,7 @@ def test_tanh_vjp1(self): .source() .get_defining_op() .name(), - "pd.tanh", + "pd_op.tanh", ) self.assertEqual( grad_outs[0][0] @@ -63,7 +63,7 @@ def test_tanh_vjp1(self): .source() .get_defining_op() .name(), - "pd.full", + "pd_op.full", ) self.assertEqual(len(newir_program.block().ops), 4) @@ -97,7 +97,7 @@ def test_mean_vjp1(self): with paddle.ir.core.program_guard(newir_program): grad_outs = call_vjp(mean_op, out_grads, stop_gradients) self.assertEqual( - grad_outs[0][0].get_defining_op().name(), "pd.mean_grad" + grad_outs[0][0].get_defining_op().name(), "pd_op.mean_grad" ) self.assertEqual( grad_outs[0][0] @@ -106,7 +106,7 @@ def test_mean_vjp1(self): .source() .get_defining_op() .name(), - "pd.data", + "pd_op.data", ) self.assertEqual( grad_outs[0][0] @@ -115,7 +115,7 @@ def test_mean_vjp1(self): .source() .get_defining_op() .name(), - "pd.full", + "pd_op.full", ) self.assertEqual(len(newir_program.block().ops), 4) diff --git a/test/ir/new_ir/test_pass_manager.py b/test/ir/new_ir/test_pass_manager.py index 81184eb08e8a8..fc8c8288d46b6 100644 --- a/test/ir/new_ir/test_pass_manager.py +++ b/test/ir/new_ir/test_pass_manager.py @@ -48,7 +48,7 @@ def test_op(self): new_program = ir.translate_to_new_ir(main_program.desc) op_names = [op.name() for op in new_program.block().ops] # print(op_names) - self.assertTrue('pd.uniform' in op_names) + self.assertTrue('pd_op.uniform' in op_names) pm = ir.PassManager() pm.add_pass( 'dead_code_elimination' @@ -59,7 +59,7 @@ def test_op(self): self.assertEqual(pm.passes(), ['dead_code_elimination']) self.assertFalse(pm.empty()) self.assertTrue( - 'pd.uniform' not in op_names + 'pd_op.uniform' not in op_names ) # uniform is elimited because its output is not used diff --git a/test/prim/new_ir_prim/test_decomp_op.py b/test/prim/new_ir_prim/test_decomp_op.py index 413008f814f7f..306d94e51b11d 100644 --- a/test/prim/new_ir_prim/test_decomp_op.py +++ b/test/prim/new_ir_prim/test_decomp_op.py @@ -56,14 +56,14 @@ def test_build_op(self): self.assertEqual( op_name_list, [ - 'pd.data', - 'pd.matmul', - 'pd.add', - 'pd.full_int_array', - 'pd.sum', - 'pd.full', - 'pd.divide', - 'pd.tanh', + 'pd_op.data', + 'pd_op.matmul', + 'pd_op.add', + 'pd_op.full_int_array', + 'pd_op.sum', + 'pd_op.full', + 'pd_op.divide', + 'pd_op.tanh', ], ) diff --git a/test/prim/new_ir_prim/test_prim_custom_vjp.py b/test/prim/new_ir_prim/test_prim_custom_vjp.py index 6cd0527ff6438..480761d66b721 100644 --- a/test/prim/new_ir_prim/test_prim_custom_vjp.py +++ b/test/prim/new_ir_prim/test_prim_custom_vjp.py @@ -73,13 +73,15 @@ def base_net(self, flag=None): op.name() for op in main_program.block().ops ] assert ( - "pd.gelu" in whole_ops_before - and "pd.gelu_grad" not in whole_ops_before + "pd_op.gelu" in whole_ops_before + and "pd_op.gelu_grad" not in whole_ops_before ) core._set_prim_forward_enabled(True) - [res2] = decompose(main_program, [res2], whitelist={"pd.gelu"}) + [res2] = decompose( + main_program, [res2], whitelist={"pd_op.gelu"} + ) whole_ops_after = [op.name() for op in main_program.block().ops] - assert "pd.gelu" not in whole_ops_after + assert "pd_op.gelu" not in whole_ops_after core._set_prim_forward_enabled(False) exe = paddle.static.Executor() diff --git a/test/prim/new_ir_prim/test_prim_program.py b/test/prim/new_ir_prim/test_prim_program.py index c4cc0187b1ad8..00545ddbc57cf 100644 --- a/test/prim/new_ir_prim/test_prim_program.py +++ b/test/prim/new_ir_prim/test_prim_program.py @@ -58,17 +58,26 @@ def base_net(self, flag=None): whole_ops = [op.name() for op in main_program.block().ops] if flag == "forward": core._set_prim_forward_enabled(False) - assert 'pd.mean' not in whole_ops and 'pd.divide_grad' in whole_ops + assert ( + 'pd_op.mean' not in whole_ops + and 'pd_op.divide_grad' in whole_ops + ) elif flag == "backward": core._set_prim_backward_enabled(False) - assert 'pd.mean' in whole_ops and 'pd.divide_grad' not in whole_ops + assert ( + 'pd_op.mean' in whole_ops + and 'pd_op.divide_grad' not in whole_ops + ) elif flag == "all": core._set_prim_all_enabled(False) assert ( - 'pd.mean' not in whole_ops and 'pd.divide_grad' not in whole_ops + 'pd_op.mean' not in whole_ops + and 'pd_op.divide_grad' not in whole_ops ) else: - assert 'pd.mean' in whole_ops and 'pd.divide_grad' in whole_ops + assert ( + 'pd_op.mean' in whole_ops and 'pd_op.divide_grad' in whole_ops + ) return fwd, dx, dy def test_prim_forward(self): diff --git a/test/prim/new_ir_prim/test_prim_simpnet.py b/test/prim/new_ir_prim/test_prim_simpnet.py index 505152354b986..8ac186fa6b9e8 100644 --- a/test/prim/new_ir_prim/test_prim_simpnet.py +++ b/test/prim/new_ir_prim/test_prim_simpnet.py @@ -79,7 +79,8 @@ def base_net(self, flag=None): if flag == "all": core._set_prim_all_enabled(False) assert ( - 'pd.gelu' not in whole_ops and 'pd.divide_grad' not in whole_ops + 'pd_op.gelu' not in whole_ops + and 'pd_op.divide_grad' not in whole_ops ) return outs diff --git a/test/prim/new_ir_prim/test_vjp_prim.py b/test/prim/new_ir_prim/test_vjp_prim.py index c6244892bfa5a..45b1087ea84db 100644 --- a/test/prim/new_ir_prim/test_vjp_prim.py +++ b/test/prim/new_ir_prim/test_vjp_prim.py @@ -77,27 +77,27 @@ def test_divide_grad_prim_case1(self): self.assertEqual(reshape_op2.result(0), grad_outs[0][0]) self.assertEqual(reshape_op1.result(0), grad_outs[1][0]) all_op_names = [ - "pd.full", - "pd.full", - "pd.full", - "pd.divide", - "pd.full", - "pd.elementwise_pow", - "pd.divide", - "pd.full", - "pd.scale", - "pd.multiply", - "pd.full_int_array", - "pd.sum", - "pd.full_int_array", - "pd.reshape", - "pd.full", - "pd.divide", - "pd.multiply", - "pd.full_int_array", - "pd.sum", - "pd.full_int_array", - "pd.reshape", + "pd_op.full", + "pd_op.full", + "pd_op.full", + "pd_op.divide", + "pd_op.full", + "pd_op.elementwise_pow", + "pd_op.divide", + "pd_op.full", + "pd_op.scale", + "pd_op.multiply", + "pd_op.full_int_array", + "pd_op.sum", + "pd_op.full_int_array", + "pd_op.reshape", + "pd_op.full", + "pd_op.divide", + "pd_op.multiply", + "pd_op.full_int_array", + "pd_op.sum", + "pd_op.full_int_array", + "pd_op.reshape", ] for idx, op in enumerate(newir_program.block().ops): self.assertEqual(op.name(), all_op_names[idx]) @@ -115,10 +115,10 @@ def test_divide_grad_no_prim(self): grad_outs = call_vjp(divide_op, out_grads, stop_gradients) self.assertEqual(len(grad_outs), 2) self.assertEqual( - grad_outs[0][0].get_defining_op().name(), "pd.divide_grad" + grad_outs[0][0].get_defining_op().name(), "pd_op.divide_grad" ) self.assertEqual( - grad_outs[1][0].get_defining_op().name(), "pd.divide_grad" + grad_outs[1][0].get_defining_op().name(), "pd_op.divide_grad" ) self.assertEqual(len(newir_program.block().ops), 5) @@ -138,14 +138,14 @@ def test_sum_grad_prim(self): self.assertEqual(expand_op.result(0), grad_outs[0][0]) self.assertEqual(grad_outs[1][0], None) all_op_names = [ - "pd.full", - "pd.full", - "pd.full_int_array", - "pd.sum", - "pd.full_int_array", - "pd.reshape", - "pd.full_int_array", - "pd.expand", + "pd_op.full", + "pd_op.full", + "pd_op.full_int_array", + "pd_op.sum", + "pd_op.full_int_array", + "pd_op.reshape", + "pd_op.full_int_array", + "pd_op.expand", ] for idx, op in enumerate(newir_program.block().ops): self.assertEqual(op.name(), all_op_names[idx]) @@ -163,7 +163,7 @@ def test_sum_grad_no_prim(self): grad_outs = call_vjp(sum_op, out_grads, stop_gradients) self.assertEqual(len(grad_outs), 2) self.assertEqual( - grad_outs[0][0].get_defining_op().name(), "pd.sum_grad" + grad_outs[0][0].get_defining_op().name(), "pd_op.sum_grad" ) self.assertEqual(grad_outs[1][0], None) self.assertEqual(len(newir_program.block().ops), 5) diff --git a/tools/check_file_diff_approvals.sh b/tools/check_file_diff_approvals.sh index d467ae1cc4671..04d02ade22f8a 100644 --- a/tools/check_file_diff_approvals.sh +++ b/tools/check_file_diff_approvals.sh @@ -230,10 +230,10 @@ for API_FILE in ${API_FILES[*]}; do fi done -DEPS_PHI_IN_IR=`git diff --name-only upstream/$BRANCH | grep -E "paddle/ir/" | grep "CMakeList" |xargs -r git diff -U0 upstream/$BRANCH --| grep "^\+" | grep "phi" || true` +DEPS_PHI_IN_IR=`git diff --name-only upstream/$BRANCH | grep -E "paddle/pir/" | grep "CMakeList" |xargs -r git diff -U0 upstream/$BRANCH --| grep "^\+" | grep "phi" || true` echo "DEPS_PHI_IN_IR:${DEPS_PHI_IN_IR}" if [ "${DEPS_PHI_IN_IR}" ] && [ "${DEPS_PHI_IN_IR}" != "" ]; then - echo_line="You must have one RD (Aurelius84, phlrain, zhangbo9674, winter-wang) approval for the CMakeLists.txt with DEPS phi* in paddle/ir directory.\n" + echo_line="You must have one RD (Aurelius84, phlrain, zhangbo9674, winter-wang) approval for the CMakeLists.txt with DEPS phi* in paddle/pir directory.\n" check_approval 1 Aurelius84 phlrain zhangbo9674 winter-wang fi FILTER=`git diff --name-only upstream/develop | grep -v "tools/"` diff --git a/tools/coverage/paddle_coverage.sh b/tools/coverage/paddle_coverage.sh index 016e076e7d5ad..7f2b80a9a2427 100644 --- a/tools/coverage/paddle_coverage.sh +++ b/tools/coverage/paddle_coverage.sh @@ -49,10 +49,10 @@ function gen_full_html_report() { '/paddle/paddle/fluid/recordio/*' \ '/paddle/paddle/fluid/string/*' \ '/paddle/paddle/fluid/eager/*' \ - '/paddle/paddle/fluid/ir/*' \ + '/paddle/paddle/fluid/pir/*' \ '/paddle/paddle/fluid/ir_adaptor/*' \ '/paddle/paddle/phi/*' \ - '/paddle/paddle/ir/*' \ + '/paddle/paddle/pir/*' \ '/paddle/paddle/utils/*' \ -o coverage-full.tmp \ --rc lcov_branch_coverage=0