Skip to content

Commit

Permalink
[IR] Refine Builder (#54052)
Browse files Browse the repository at this point in the history
* refine code

* delete some unused code

* refine code of build

* refine code of build

* add block

* refine builder

* refine code

* refine code by comment

* fix compiler bug
  • Loading branch information
zhangbo9674 authored May 25, 2023
1 parent 1549fbd commit 3143d8b
Show file tree
Hide file tree
Showing 18 changed files with 288 additions and 135 deletions.
74 changes: 47 additions & 27 deletions paddle/fluid/dialect/legacy_pd_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,40 +25,60 @@ namespace dialect {
class className : public ir::Op<className> { \
public: \
static const char *name() { return OPNAME(op_name); } \
static const char **attributes_name; \
static constexpr const char **attributes_name = nullptr; \
static constexpr uint32_t attributes_num = 0; \
static void verify(const std::vector<ir::OpResult> &inputs, \
const std::vector<ir::Type> &outputs, \
const ir::AttributeMap &attributes) { \
LOG(WARNING) << "This is a fake verify"; \
} \
}; \
const char **className::attributes_name = nullptr;
};

REIGSTER_EMPTY_OP(conv2d, Conv2DOp);
REIGSTER_EMPTY_OP(feed, FeedOp);
REIGSTER_EMPTY_OP(batch_norm, BatchNormOp);
REIGSTER_EMPTY_OP(batch_norm_, BatchNormOp_);
REIGSTER_EMPTY_OP(elementwise_add, ElementwiseAddOp);
REIGSTER_EMPTY_OP(pool2d, Pool2DOp);
REIGSTER_EMPTY_OP(flatten_contiguous_range, FlattenContiguousRangeOp);
REIGSTER_EMPTY_OP(matmul_v2, MatmulV2Op);
REIGSTER_EMPTY_OP(reshape2, Reshape2Op);
REIGSTER_EMPTY_OP(softmax_with_cross_entropy, SoftmaxWithCrossEntropyOp);
REIGSTER_EMPTY_OP(reduce_mean, ReduceMeanOp);
REIGSTER_EMPTY_OP(top_k_v2, TopKV2Op);
REIGSTER_EMPTY_OP(fill_constant, FillConstantOp);
REIGSTER_EMPTY_OP(reduce_mean_grad, ReduceMeanGradOp);
REIGSTER_EMPTY_OP(softmax_with_cross_entropy_grad,
SoftmaxWithCrossEntropyGradOp);
REIGSTER_EMPTY_OP(elementwise_add_grad, ElementwiseAddGradOp);
REIGSTER_EMPTY_OP(matmul_v2_grad, MatmulV2GradOp);
REIGSTER_EMPTY_OP(flatten_contiguous_range_grad, FlattenContiguousRangeGradOp);
REIGSTER_EMPTY_OP(pool2d_grad, Pool2DGradOp);
REIGSTER_EMPTY_OP(batch_norm_grad, BatchNormGradOp);
REIGSTER_EMPTY_OP(conv2d_grad, Conv2DGradOp);
REIGSTER_EMPTY_OP(sum, SumOp);
REIGSTER_EMPTY_OP(fetch_v2, FetchV2Op);
// TODO(zhangbo): As operators are supplemented and defined, they are gradually
// removed.
REIGSTER_EMPTY_OP(conv2d, Conv2DOp); // To be customized: conv2d
REIGSTER_EMPTY_OP(feed, FeedOp); // To be customized: feed
REIGSTER_EMPTY_OP(batch_norm, BatchNormOp); // To be customized: batch_norm
REIGSTER_EMPTY_OP(batch_norm_, BatchNormOp_); // To be customized: batch_norm_
REIGSTER_EMPTY_OP(elementwise_add,
ElementwiseAddOp); // To be customized: add (elementwise_add)
REIGSTER_EMPTY_OP(pool2d, Pool2DOp); // To be customized: pool2d
REIGSTER_EMPTY_OP(
flatten_contiguous_range,
FlattenContiguousRangeOp); // flatten (flatten_contiguous_range)
REIGSTER_EMPTY_OP(matmul_v2,
MatmulV2Op); // To be customized: matmul (matmul_v2)
REIGSTER_EMPTY_OP(reshape2, Reshape2Op); // To be customized: reshape
REIGSTER_EMPTY_OP(softmax_with_cross_entropy,
SoftmaxWithCrossEntropyOp); // cross_entropy_with_softmax
// (softmax_with_cross_entropy)
REIGSTER_EMPTY_OP(reduce_mean,
ReduceMeanOp); // To be customized: mean (reduce_mean)
REIGSTER_EMPTY_OP(top_k_v2, TopKV2Op); // topk (top_k_v2)
REIGSTER_EMPTY_OP(fill_constant,
FillConstantOp); // To be customized: full (fill_constant)
REIGSTER_EMPTY_OP(reduce_mean_grad,
ReduceMeanGradOp); // To be customized: reduce_mean_grad
REIGSTER_EMPTY_OP(
softmax_with_cross_entropy_grad,
SoftmaxWithCrossEntropyGradOp); // cross_entropy_with_softmax_grad
// (softmax_with_cross_entropy_grad)
REIGSTER_EMPTY_OP(
elementwise_add_grad,
ElementwiseAddGradOp); // To be customized: add_grad (elementwise_add_grad)
REIGSTER_EMPTY_OP(
matmul_v2_grad,
MatmulV2GradOp); // To be customized: matmul_grad (matmul_v2_grad)
REIGSTER_EMPTY_OP(
flatten_contiguous_range_grad,
FlattenContiguousRangeGradOp); // flatten_grad
// (flatten_contiguous_range_grad)
REIGSTER_EMPTY_OP(pool2d_grad, Pool2DGradOp); // To be customized: pool2d_grad
REIGSTER_EMPTY_OP(batch_norm_grad,
BatchNormGradOp); // To be customized: batch_norm_grad
REIGSTER_EMPTY_OP(conv2d_grad, Conv2DGradOp); // To be customized: conv2d_grad
REIGSTER_EMPTY_OP(sum, SumOp); // To be customized: sum(reduce_sum)
REIGSTER_EMPTY_OP(fetch_v2, FetchV2Op); // To be customized: fetch_v2

} // namespace dialect
} // namespace paddle
4 changes: 0 additions & 4 deletions paddle/fluid/dialect/pd_attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,6 @@

namespace paddle {
namespace dialect {
#define GET_PD_DIALECT_ATTRIBUTE_LIST \
IntArrayAttribute, ScalarAttribute, DataTypeAttribute, PlaceAttribute, \
DataLayoutAttribute

class IntArrayAttribute : public ir::Attribute {
public:
using Attribute::Attribute;
Expand Down
20 changes: 10 additions & 10 deletions paddle/fluid/dialect/pd_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@
namespace paddle {
namespace dialect {
std::shared_ptr<paddle::framework::Variable>
ParameterConvertInterface::ParameterToVariable(ir::Parameter* parameter) {
ParameterConvertInterface::ParameterToVariable(ir::Parameter *parameter) {
if (parameter->type().isa<DenseTensorType>()) {
VLOG(4) << "Convert a DenseTensor Parameter to a variable.";
std::shared_ptr<paddle::framework::Variable> var =
std::make_shared<paddle::framework::Variable>();
phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
phi::DenseTensor *tensor = var->GetMutable<phi::DenseTensor>();
// Init DenseTensor
auto dim = parameter->type().dyn_cast<DenseTensorType>().dim();
phi::DenseTensorMeta meta(
Expand All @@ -46,7 +46,7 @@ ParameterConvertInterface::ParameterToVariable(ir::Parameter* parameter) {
parameter->type().dyn_cast<DenseTensorType>().lod(),
parameter->type().dyn_cast<DenseTensorType>().offset());
tensor->set_meta(meta);
paddle::platform::DeviceContext* dev_ctx =
paddle::platform::DeviceContext *dev_ctx =
paddle::platform::DeviceContextPool::Instance().Get(
paddle::platform::CPUPlace());
dev_ctx->Alloc(tensor,
Expand All @@ -62,11 +62,11 @@ ParameterConvertInterface::ParameterToVariable(ir::Parameter* parameter) {
}

std::unique_ptr<ir::Parameter> ParameterConvertInterface::VariableToParameter(
paddle::framework::Variable* var) {
paddle::framework::Variable *var) {
if (var->IsType<phi::DenseTensor>()) {
phi::DenseTensor* tensor = var->GetMutable<phi::DenseTensor>();
phi::DenseTensor *tensor = var->GetMutable<phi::DenseTensor>();
// Get Meta
ir::IrContext* ctx = ir::IrContext::Instance();
ir::IrContext *ctx = ir::IrContext::Instance();
ir::Type data_type = TransToIrDataType(tensor->dtype(), ctx);
DenseTensorTypeStorage::Dim dims(tensor->dims().size());
std::copy(tensor->dims().Get(),
Expand All @@ -76,7 +76,7 @@ std::unique_ptr<ir::Parameter> ParameterConvertInterface::VariableToParameter(
TransToIrDataLayout(tensor->layout());
DenseTensorTypeStorage::LoD lod = tensor->lod();
size_t offset = tensor->meta().offset;
void* data = tensor->data();
void *data = tensor->data();
ir::Type dense_tensor_type =
DenseTensorType::get(ctx, data_type, dims, data_layout, lod, offset);
return std::make_unique<ir::Parameter>(
Expand All @@ -88,7 +88,7 @@ std::unique_ptr<ir::Parameter> ParameterConvertInterface::VariableToParameter(
}
}

PaddleDialect::PaddleDialect(ir::IrContext* context)
PaddleDialect::PaddleDialect(ir::IrContext *context)
: ir::Dialect(name(), context, ir::TypeId::get<PaddleDialect>()) {
initialize();
}
Expand Down Expand Up @@ -136,11 +136,11 @@ void PaddleDialect::initialize() {
FetchV2Op>();
}

void PaddleDialect::PrintType(ir::Type type, std::ostream& os) {
void PaddleDialect::PrintType(ir::Type type, std::ostream &os) {
DenseTensorType tensor_type = type.dyn_cast<DenseTensorType>();

os << "tensor<";
auto& dims = tensor_type.dim();
auto &dims = tensor_type.dim();
for (auto d : dims) {
os << d;
os << "x";
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/dialect/pd_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@

namespace paddle {
namespace dialect {
#define GET_PD_DIALECT_TYPE_LIST paddle::dialect::DenseTensorType

///
/// \brief Define built-in parametric types.
///
Expand Down
26 changes: 26 additions & 0 deletions paddle/ir/block.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/ir/block.h"

namespace ir {
Block::~Block() { clear(); }

void Block::clear() {
while (!empty()) {
ops_.back()->destroy();
ops_.pop_back();
}
}
} // namespace ir
54 changes: 54 additions & 0 deletions paddle/ir/block.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <list>
#include "paddle/ir/operation.h"

namespace ir {
class Block {
public:
using iterator = std::list<Operation *>::iterator;
using reverse_iterator = std::list<Operation *>::reverse_iterator;

Block() = default;
~Block();

bool empty() const { return ops_.empty(); }
size_t size() const { return ops_.size(); }

iterator begin() { return ops_.begin(); }
iterator end() { return ops_.end(); }
reverse_iterator rbegin() { return ops_.rbegin(); }
reverse_iterator rend() { return ops_.rend(); }

Operation *back() { return ops_.back(); }
Operation *front() { return ops_.front(); }
void push_back(Operation *op) { ops_.push_back(op); }
void push_front(Operation *op) { ops_.push_front(op); }
std::list<Operation *>::iterator insert(
std::list<Operation *>::const_iterator iterator, Operation *op) {
return ops_.insert(iterator, op);
}
void clear();

private:
Block(Block &) = delete;
void operator=(Block &) = delete;

private:
std::list<Operation *> ops_; // owned
};
} // namespace ir
41 changes: 41 additions & 0 deletions paddle/ir/builder.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/ir/builder.h"

namespace ir {
Operation *Builder::insert(Operation *op) {
if (block_) {
block_->insert(insert_point_, op);
} else {
LOG(WARNING) << "Builder's Block is nullptr, insert failed.";
}
return op;
}

/// Create an operation given the fields represented as an OperationState.
Operation *Builder::create(const OperationArgument &argument) {
return insert(Operation::create(argument));
}

/// Creates an operation with the given fields.
Operation *Builder::create(const std::vector<ir::OpResult> &inputs,
const std::vector<ir::Type> &output_types,
const AttributeMap &attribute,
ir::OpInfo op_info) {
OperationArgument argument(op_info, inputs, output_types, attribute);
return create(argument);
}

} // namespace ir
40 changes: 32 additions & 8 deletions paddle/ir/builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@

#include <list>

#include "paddle/ir/block.h"
#include "paddle/ir/operation.h"
#include "paddle/ir/program.h"

namespace ir {
///
Expand All @@ -25,25 +27,47 @@ namespace ir {
///
class Builder {
public:
explicit Builder(IrContext *context) : context_(context) {}
explicit Builder(Operation *op) : Builder(op->ir_context()) {}
explicit Builder(IrContext *context,
Block *block,
Block::iterator insert_point)
: context_(context), block_(block), insert_point_(insert_point) {}

static Builder AtBlockBegin(IrContext *context, Block *block) {
return Builder(context, block, block->begin());
}

static Builder AtBlockEnd(IrContext *context, Block *block) {
return Builder(context, block, block->end());
}

IrContext *context() const { return context_; }

Block *block() const { return block_; }

Operation *insert(Operation *op);

/// Creates an operation given the fields represented as an OperationState.
Operation *create(const OperationArgument &argument);

/// Creates an operation with the given fields.
Operation *create(const std::vector<ir::OpResult> &inputs,
const std::vector<ir::Type> &output_types,
const AttributeMap &attribute,
ir::OpInfo op_info);

/// Create an operation of specific op type at the current insertion point.
template <typename OpTy, typename... Args>
OpTy create(Args &&...args) {
OperationArgument argument(context_->GetRegisteredOpInfo(OpTy::name()));
OpTy::build(*this, argument, std::forward<Args>(args)...);
Operation *op = Operation::create(argument);
Operation *op = create(argument);
return op->dyn_cast<OpTy>();
}

private:
IrContext *context_;
// The current op list this builder is inserting into.
// After the design of the block data structure is completed,
// this member will be replaced by the block.
std::list<Operation *> *op_list_ = nullptr;
Block *block_ = nullptr;
// The insertion point within the list that this builder is inserting before.
std::list<Operation *>::iterator insertPoint;
Block::iterator insert_point_;
};
} // namespace ir
7 changes: 0 additions & 7 deletions paddle/ir/builtin_attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,6 @@
#include "paddle/ir/utils.h"

namespace ir {
///
/// \brief All built-in attributes.
///
#define GET_BUILT_IN_ATTRIBUTE_LIST \
StrAttribute, BoolAttribute, FloatAttribute, DoubleAttribute, \
Int32_tAttribute, Int64_tAttribute, ArrayAttribute

class StrAttribute : public Attribute {
public:
using Attribute::Attribute;
Expand Down
9 changes: 0 additions & 9 deletions paddle/ir/builtin_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,6 @@
#include "paddle/ir/type.h"

namespace ir {
///
/// \brief This macro is used to get a list of all built-in types in this file.
/// The built-in Dialect will use this macro to quickly register all built-in
/// types.
///
#define GET_BUILT_IN_TYPE_LIST \
BFloat16Type, Float16Type, Float32Type, Float64Type, Int8Type, Int16Type, \
Int32Type, Int64Type, BoolType, VectorType

///
/// \brief Define built-in parameterless types. Please add the necessary
/// interface functions for built-in types through the macro
Expand Down
Loading

0 comments on commit 3143d8b

Please sign in to comment.