Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[IR] Type system stage4: Add some built-in types and type conversion methods #51112

Merged
merged 16 commits into from
Mar 14, 2023
Merged
34 changes: 34 additions & 0 deletions paddle/ir/builtin_type.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/ir/builtin_type.h"

namespace ir {
const ir::Type& DenseTensorType::dtype() const { return storage()->dtype_; }

const ir::DenseTensorTypeStorage::Dim& DenseTensorType::dim() const {
return storage()->dims_;
}

const ir::DenseTensorTypeStorage::DataLayout& DenseTensorType::data_layout()
const {
return storage()->layout_;
}

const ir::DenseTensorTypeStorage::LoD& DenseTensorType::lod() const {
return storage()->lod_;
}

const size_t& DenseTensorType::offset() const { return storage()->offset_; }
} // namespace ir
75 changes: 72 additions & 3 deletions paddle/ir/builtin_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,44 @@

#pragma once

#include "paddle/ir/builtin_type_storage.h"
#include "paddle/ir/type.h"

namespace ir {
///
/// \brief This macro is used to get a list of all built-in types in this file.
/// The built-in Dialect will use this macro to quickly register all built-in
/// types.
///
#define GET_BUILT_IN_TYPE_LIST ir::Float32Type, ir::Int32Type
#define GET_BUILT_IN_TYPE_LIST \
ir::Float16Type, ir::Float32Type, ir::Float64Type, ir::Int16Type, \
ir::Int32Type, ir::Int64Type, ir::DenseTensorType

///
/// \brief Definitions of built-in type classes. The built-in type object get
/// method is as follows:
/// \brief Define built-in parameterless types. Please add the necessary
/// interface functions for built-in types through the macro
/// DECLARE_TYPE_UTILITY_FUNCTOR.
///
/// NOTE(zhangbo9674): If you need to directly
/// cache the object of this built-in type in IrContext, please overload the get
/// method, and construct and cache the object in IrContext. For the specific
/// implementation method, please refer to Float16Type.
///
/// The built-in type object get method is as follows:
/// \code{cpp}
/// ir::IrContext *ctx = ir::IrContext::Instance();
/// Type fp32 = Float32Type::get(ctx);
/// \endcode
///
class Float16Type : public ir::Type {
public:
using Type::Type;

DECLARE_TYPE_UTILITY_FUNCTOR(Float16Type, ir::TypeStorage);

static Float16Type get(ir::IrContext *context);
};

class Float32Type : public ir::Type {
public:
using Type::Type;
Expand All @@ -39,6 +61,24 @@ class Float32Type : public ir::Type {
static Float32Type get(ir::IrContext *context);
};

class Float64Type : public ir::Type {
public:
using Type::Type;

DECLARE_TYPE_UTILITY_FUNCTOR(Float64Type, ir::TypeStorage);

static Float64Type get(ir::IrContext *context);
};

class Int16Type : public ir::Type {
public:
using Type::Type;

DECLARE_TYPE_UTILITY_FUNCTOR(Int16Type, ir::TypeStorage);

static Int16Type get(ir::IrContext *context);
};

class Int32Type : public ir::Type {
public:
using Type::Type;
Expand All @@ -48,4 +88,33 @@ class Int32Type : public ir::Type {
static Int32Type get(ir::IrContext *context);
};

class Int64Type : public ir::Type {
public:
using Type::Type;

DECLARE_TYPE_UTILITY_FUNCTOR(Int64Type, ir::TypeStorage);

static Int64Type get(ir::IrContext *context);
};

///
/// \brief Define built-in parameteric types.
///
class DenseTensorType : public ir::Type {
public:
using Type::Type;

DECLARE_TYPE_UTILITY_FUNCTOR(DenseTensorType, DenseTensorTypeStorage);

const ir::Type &dtype() const;

const ir::DenseTensorTypeStorage::Dim &dim() const;

const ir::DenseTensorTypeStorage::DataLayout &data_layout() const;

const ir::DenseTensorTypeStorage::LoD &lod() const;

const size_t &offset() const;
};

} // namespace ir
156 changes: 156 additions & 0 deletions paddle/ir/builtin_type_storage.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <type_traits>

#include "paddle/ir/type.h"

namespace std {
///
/// \brief Enable hashing std::vector<T> instances.
///
template <typename T>
struct hash<std::vector<T>> {
std::size_t operator()(const std::vector<T> &dim) const {
std::size_t seed = 0;
for (size_t i = 0; i < dim.size(); ++i) {
seed ^= std::hash<T>()(dim[i]) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
return seed;
}
};

} // namespace std

namespace ir {
///
/// \brief Define Parameteric TypeStorage for DenseTensorType.
///
/// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the
/// following methods: (1)declare ParamKey, (2)define Construction method,
/// (3)define HashValue method, (4)overload operator==.
///
struct DenseTensorTypeStorage : public ir::TypeStorage {
///
/// \brief It is consistent with the DataLayout defined by Phi operator
/// library. See the file for details: paddle/phi/common/layout.h.
///
enum class DataLayout : unsigned int {
UNDEFINED = 0,
NHWC,
NCHW,
NCDHW,
NDHWC,
ONEDNN,
SPARSE_COO,
SPARSE_CSR,
PSTRING_UNION,

NUM_DATA_LAYOUTS,

// See Note [ Why we need ALL in basic kernel key member? ]
ALL_LAYOUT = UNDEFINED,

// Note: Unify phi DataLayout and fluid::framework::DataLayout,
// for compatible with fluid DataLayout, here need prefix `k`
kNHWC = NHWC,
kNCHW = NCHW,
kMKLDNN = ONEDNN, // all layouts supported by ONEDNN internally
kNDHWC = NDHWC,
kNCDHW = NCDHW,
};

using Dim = std::vector<int64_t>;

using LoD = std::vector<std::vector<size_t>>;

///
/// \brief Declare ParamKey according to parameter type.
///
using ParamKey = std::tuple<ir::Type, Dim, DataLayout, LoD, size_t>;

DenseTensorTypeStorage(
ir::Type dtype, Dim dims, DataLayout layout, LoD lod, size_t offset)
: dtype_(dtype),
dims_(dims),
layout_(layout),
lod_(lod),
offset_(offset) {}

///
/// \brief Each derived TypeStorage must define a Construc method, which
/// StorageManager uses to construct a derived TypeStorage.
///
static DenseTensorTypeStorage *Construct(ParamKey key) {
return new DenseTensorTypeStorage(std::get<0>(key),
std::get<1>(key),
std::get<2>(key),
std::get<3>(key),
std::get<4>(key));
}

///
/// \brief Each derived TypeStorage must provide a HashValue method.
///
static std::size_t HashValue(const ParamKey &key) {
std::size_t hash_value = 0;
// hash dtype
hash_value =
hash_combine(hash_value, std::hash<ir::Type>()(std::get<0>(key)));
// hash dims
hash_value = hash_combine(hash_value, std::hash<Dim>()(std::get<1>(key)));
// hash layout
hash_value =
hash_combine(hash_value,
std::hash<std::underlying_type<DataLayout>::type>()(
static_cast<std::underlying_type<DataLayout>::type>(
std::get<2>(key))));
// hash lod
hash_value = hash_combine(hash_value, std::hash<LoD>()(std::get<3>(key)));
// hash offset
hash_value =
hash_combine(hash_value, std::hash<size_t>()(std::get<4>(key)));
return hash_value;
}

///
/// \brief Each derived TypeStorage needs to overload operator==.
///
bool operator==(const ParamKey &key) const {
return ParamKey(dtype_, dims_, layout_, lod_, offset_) == key;
}

ParamKey GetAsKey() const {
return ParamKey(dtype_, dims_, layout_, lod_, offset_);
}

///
/// \brief DenseTensorTypeStorage include five parameters: dims, dtype,
/// layout, lod, offset.
///
ir::Type dtype_;
Dim dims_;
DataLayout layout_;
LoD lod_;
size_t offset_;

private:
static std::size_t hash_combine(std::size_t lhs, std::size_t rhs) {
return lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2);
}
};

} // namespace ir
Loading